repo
string | commit
string | message
string | diff
string |
---|---|---|---|
inorton/xrhash | fa9bbe0b222cc6badd264d9df08acf28003e196b | add fast (cached) iterator | diff --git a/src/lib/xrhash_fast.c b/src/lib/xrhash_fast.c
new file mode 100644
index 0000000..2e30de5
--- /dev/null
+++ b/src/lib/xrhash_fast.c
@@ -0,0 +1,70 @@
+#include <xrhash.h>
+#include <xrhash_fast.h>
+
+xrhash_fast_iterator * xr_init_fasthashiterator( XRHash * xr )
+{
+ xrhash_fast_iterator * fast = NULL;
+ fast = (xrhash_fast_iterator *)malloc(1*sizeof(xrhash_fast_iterator));
+ if ( fast == NULL ) return fast;
+ fast->iter = xr_init_hashiterator( xr );
+ if ( fast->iter != NULL ){
+ fast->cache_progress = 0;
+ fast->cache_index = 0;
+ fast->cache = (void**)calloc( sizeof(void**), fast->iter->xr->count );
+ fast->cache_len = fast->iter->xr->count;
+ }
+ return fast;
+}
+
+
+void xr_hash_resetfastiterator( xrhash_fast_iterator * fast )
+{
+ fast->cache_index = 0;
+ if ( fast->iter->hash_generation != fast->iter->xr->hash_generation ){
+ /* hash has changed */
+ xr_hash_resetiterator( fast->iter );
+ if ( fast->cache_len != fast->iter->xr->count ){
+ /* hash size has changed too */
+ fast->cache_len = fast->iter->xr->count;
+ fast->cache = (void**) realloc( fast->cache, fast->iter->xr->count * sizeof( void** ) );
+ fast->cache_progress = 0;
+ }
+ } else {
+ /* hash is unchanged */
+ }
+}
+
+void xr_hash_fastiterator_free( xrhash_fast_iterator * fast )
+{
+ if ( fast != NULL ){
+ if ( fast->cache != NULL ){
+ free(fast->cache);
+ free(fast->iter);
+ memset( fast, 0, sizeof(xrhash_fast_iterator) );
+ }
+ free(fast);
+ }
+}
+
+void * xr_hash_fastiteratekey( xrhash_fast_iterator * fast )
+{
+ if ( fast->iter->hash_generation == fast->iter->xr->hash_generation ){
+ if ( fast->cache_index < fast->cache_len ){
+ if ( fast->cache_index < fast->cache_progress ){
+ return fast->cache[fast->cache_index++];
+ } else {
+ /* iterate and copy key */
+ void * key = xr_hash_iteratekey( fast->iter );
+ fast->cache[fast->cache_index++] = key;
+ fast->cache_progress++;
+ return key;
+ }
+ }
+ return NULL; /* end of hash */
+ } else {
+ fprintf(stderr,"hash changed during iteration\n");
+ abort();
+ }
+}
+
+
diff --git a/src/lib/xrhash_fast.h b/src/lib/xrhash_fast.h
new file mode 100644
index 0000000..4de8fa1
--- /dev/null
+++ b/src/lib/xrhash_fast.h
@@ -0,0 +1,24 @@
+#ifndef XRHASH_FAST_H
+#define XRHASH_FAST_H
+
+#include <xrhash.h>
+
+typedef struct xrhash_fast_iterator
+{
+ XRHashIter * iter;
+ void ** cache;
+ size_t cache_len;
+ size_t cache_index;
+ size_t cache_progress;
+} xrhash_fast_iterator;
+
+
+xrhash_fast_iterator * xr_init_fasthashiterator( XRHash * xr );
+
+void xr_hash_resetfastiterator( xrhash_fast_iterator * fast );
+
+void xr_hash_fastiterator_free( xrhash_fast_iterator * fast );
+
+void * xr_hash_fastiteratekey( xrhash_fast_iterator * fast );
+
+#endif
|
inorton/xrhash | 4ed3cba1908bf53d2138a8a26facb2e67f1de2ff | fix fast hash iteration | diff --git a/src/lib/SConscript b/src/lib/SConscript
index af20021..c0ffd37 100644
--- a/src/lib/SConscript
+++ b/src/lib/SConscript
@@ -1,10 +1,12 @@
import os
Import("env")
-libxrhash = env.Library( "xrhash",["xrhash.c","xrhash_fast.c"])
+
+libxrhash = env.Library( "xrhash",source=["xrhash.c","xrhash_fast.c"])
+
env.Append(LIBPATH=os.getcwd() )
env.Append(CPPPATH=os.getcwd() )
Export("env");
SConscript("tests/SConscript",exports="env")
diff --git a/src/lib/tests/xrhash-test.c b/src/lib/tests/xrhash-test.c
index 6bdcb73..c4fd954 100644
--- a/src/lib/tests/xrhash-test.c
+++ b/src/lib/tests/xrhash-test.c
@@ -1,177 +1,178 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "xrhash.h"
#include "xrhash_fast.h"
#include <assert.h>
#include "testutils.h"
#include <sys/time.h>
#define TEST_STR "foo%d"
int assert_contains( XRHash * xr, void * keyptr, int expected )
{
int rv = xr_hash_contains( xr, keyptr );
int hc;
if ( rv == expected ) return rv;
hc = xr_get_hashcode( xr, keyptr );
fprintf(stderr,"test failure: xr_hash_contains(xr=0x%08x,key=0x%08x) returned 0x%02x - expected 0x%02x\n",
(int)xr, (int)keyptr, rv, expected );
fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
hc, xr_get_index( xr, hc ) );
abort();
}
int assert_get( XRHash * xr, void * keyptr, void ** outptr, int expected )
{
int rv = xr_hash_get( xr, keyptr, outptr );
int hc;
if ( rv == expected ) return rv;
hc = xr_get_hashcode( xr, keyptr );
fprintf(stderr,"test failure: xr_hash_get(xr=0x%08x,key=0x%08x,outptr=0x%08x) returned 0x%02x - expected 0x%02x\n",
(int)xr, (int)keyptr, (int)outptr, rv, expected );
fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
hc, xr_get_index( xr, hc ) );
abort();
}
void iterate( XRHash * xr )
{
XRHashIter * iter = xr_init_hashiterator( xr );
void * key = NULL;
int x = 0;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
void * value = NULL;
assert ( xr_hash_get( xr, key , &value ) == XRHASH_EXISTS_TRUE );
x++;
}
assert( x == xr->maxslots );
free(iter);
}
int runtest( int hashsize )
{
struct timeval tstart;
struct timeval tend;
XRHash * xr = xr_init_hash_len( &xr_hash__strhash , &xr_hash__strcmp, hashsize );
int x = 0;
int contains = -1;
int datacount = hashsize;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( (datacount * datalen * sizeof(char)) + datacount );
char * newstr = data_vec;
fprintf(stderr,"test add\n");
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
newstr[datalen] = 0x0;
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
int got = -1;
assert_get( xr, newstr, (void**) &got, XRHASH_EXISTS_TRUE );
assert( got == x );
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per get", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test iteration\n");
gettimeofday( &tstart, 0x0 );
iterate( xr );
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per iteration with get", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
contains = assert_contains( xr, newstr, XRHASH_EXISTS_TRUE );
xr_hash_remove( xr, newstr );
contains = assert_contains( xr, newstr, XRHASH_EXISTS_FALSE );
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per remove", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
assert( xr->count == 0 );
fprintf(stderr,"\n---\ncompleted test:");
fprintf(stderr,"unique items added : %d\n",datacount);
fprintf(stderr,"used unique indexes : %ld\n",(long)xr->touched_indexes);
fprintf(stderr,"index collisions : %lu\n",(unsigned long)( datacount - xr->touched_indexes));
fprintf(stderr,"collision factor : %f\n", ( xr->touched_indexes + 0.0 ) / datacount );
fprintf(stderr,"average lookups / key : %f\n", ( datacount + 0.0 ) / xr->touched_indexes );
fprintf(stderr,"\n------------\ntest fast iter\n");
newstr = data_vec;
x = 0;
while ( x < datacount ){
xr_hash_add( xr, newstr, (void*) x++ );
newstr += datalen;
}
xrhash_fast_iterator * fast = xr_init_fasthashiterator( xr );
void ** _key;
while ( ( _key = xr_hash_fastiteratekey( fast ) ) != NULL ){
+
}
x = 0;
gettimeofday ( &tstart, 0 );
while ( x++ < 10 ){
xr_hash_resetfastiterator( fast );
while ( ( _key = xr_hash_fastiteratekey( fast ) ) != NULL ){
}
}
gettimeofday ( &tend, 0 );
fprintf(stderr,"* avg %lld us per iter step\n",
timeval_diff(NULL,&tend,&tstart) / ( 10 * datacount ) );
xr_hash_fastiterator_free( fast );
xr_hash_free( xr );
free(data_vec);
return 0;
}
int main( int argc, char** argv )
{
runtest( 512 );
runtest( 4096 );
runtest( 8192 );
runtest( 16384 );
runtest( 65000 );
runtest( 200000 );
return 0;
}
|
inorton/xrhash | 0d9b92c3a32c35a50597b401c15e2f2272f57646 | fix xrhash_free add 'fast' iterator | diff --git a/src/lib/SConscript b/src/lib/SConscript
index 407f8e2..af20021 100644
--- a/src/lib/SConscript
+++ b/src/lib/SConscript
@@ -1,10 +1,10 @@
import os
Import("env")
-libxrhash = env.Library( "xrhash","xrhash.c")
+libxrhash = env.Library( "xrhash",["xrhash.c","xrhash_fast.c"])
env.Append(LIBPATH=os.getcwd() )
env.Append(CPPPATH=os.getcwd() )
Export("env");
SConscript("tests/SConscript",exports="env")
diff --git a/src/lib/tests/xrhash-test.c b/src/lib/tests/xrhash-test.c
index b2fdf9e..6bdcb73 100644
--- a/src/lib/tests/xrhash-test.c
+++ b/src/lib/tests/xrhash-test.c
@@ -1,155 +1,177 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "xrhash.h"
+#include "xrhash_fast.h"
#include <assert.h>
#include "testutils.h"
#include <sys/time.h>
#define TEST_STR "foo%d"
int assert_contains( XRHash * xr, void * keyptr, int expected )
{
int rv = xr_hash_contains( xr, keyptr );
int hc;
if ( rv == expected ) return rv;
hc = xr_get_hashcode( xr, keyptr );
fprintf(stderr,"test failure: xr_hash_contains(xr=0x%08x,key=0x%08x) returned 0x%02x - expected 0x%02x\n",
(int)xr, (int)keyptr, rv, expected );
fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
hc, xr_get_index( xr, hc ) );
abort();
}
int assert_get( XRHash * xr, void * keyptr, void ** outptr, int expected )
{
int rv = xr_hash_get( xr, keyptr, outptr );
int hc;
if ( rv == expected ) return rv;
hc = xr_get_hashcode( xr, keyptr );
fprintf(stderr,"test failure: xr_hash_get(xr=0x%08x,key=0x%08x,outptr=0x%08x) returned 0x%02x - expected 0x%02x\n",
(int)xr, (int)keyptr, (int)outptr, rv, expected );
fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
hc, xr_get_index( xr, hc ) );
abort();
}
void iterate( XRHash * xr )
{
XRHashIter * iter = xr_init_hashiterator( xr );
void * key = NULL;
int x = 0;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
void * value = NULL;
assert ( xr_hash_get( xr, key , &value ) == XRHASH_EXISTS_TRUE );
x++;
}
assert( x == xr->maxslots );
free(iter);
}
int runtest( int hashsize )
{
struct timeval tstart;
struct timeval tend;
XRHash * xr = xr_init_hash_len( &xr_hash__strhash , &xr_hash__strcmp, hashsize );
int x = 0;
int contains = -1;
int datacount = hashsize;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( (datacount * datalen * sizeof(char)) + datacount );
char * newstr = data_vec;
fprintf(stderr,"test add\n");
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
newstr[datalen] = 0x0;
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
int got = -1;
assert_get( xr, newstr, (void**) &got, XRHASH_EXISTS_TRUE );
assert( got == x );
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per get", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test iteration\n");
gettimeofday( &tstart, 0x0 );
iterate( xr );
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per iteration with get", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
contains = assert_contains( xr, newstr, XRHASH_EXISTS_TRUE );
xr_hash_remove( xr, newstr );
contains = assert_contains( xr, newstr, XRHASH_EXISTS_FALSE );
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per remove", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
assert( xr->count == 0 );
-
-
-
fprintf(stderr,"\n---\ncompleted test:");
fprintf(stderr,"unique items added : %d\n",datacount);
fprintf(stderr,"used unique indexes : %ld\n",(long)xr->touched_indexes);
fprintf(stderr,"index collisions : %lu\n",(unsigned long)( datacount - xr->touched_indexes));
fprintf(stderr,"collision factor : %f\n", ( xr->touched_indexes + 0.0 ) / datacount );
fprintf(stderr,"average lookups / key : %f\n", ( datacount + 0.0 ) / xr->touched_indexes );
+ fprintf(stderr,"\n------------\ntest fast iter\n");
+ newstr = data_vec;
+ x = 0;
+ while ( x < datacount ){
+ xr_hash_add( xr, newstr, (void*) x++ );
+ newstr += datalen;
+ }
+ xrhash_fast_iterator * fast = xr_init_fasthashiterator( xr );
+ void ** _key;
+ while ( ( _key = xr_hash_fastiteratekey( fast ) ) != NULL ){
+ }
+ x = 0;
+
+ gettimeofday ( &tstart, 0 );
+ while ( x++ < 10 ){
+ xr_hash_resetfastiterator( fast );
+ while ( ( _key = xr_hash_fastiteratekey( fast ) ) != NULL ){
+ }
+ }
+ gettimeofday ( &tend, 0 );
+ fprintf(stderr,"* avg %lld us per iter step\n",
+ timeval_diff(NULL,&tend,&tstart) / ( 10 * datacount ) );
+
+ xr_hash_fastiterator_free( fast );
xr_hash_free( xr );
free(data_vec);
return 0;
}
int main( int argc, char** argv )
{
runtest( 512 );
runtest( 4096 );
runtest( 8192 );
runtest( 16384 );
runtest( 65000 );
runtest( 200000 );
return 0;
}
diff --git a/src/lib/xrhash.c b/src/lib/xrhash.c
index 12a60c6..8050ab7 100644
--- a/src/lib/xrhash.c
+++ b/src/lib/xrhash.c
@@ -1,360 +1,362 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
static inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
int xr_get_hashcode( XRHash * xr, void * key )
{
return xr__get_hashcode( xr, key );
}
static inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
int pindex = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= xr->maxslots ){
pindex = index;
index = index % ( xr->maxslots - xr->index_mod_magic );
if ( index < 1 ){
index = (index * -1);
}
if ( index == 0 ){
index = 1 + ( pindex - ( pindex / 3 ));
}
}
return index;
}
int xr_get_index( XRHash * xr, int hashcode )
{
return xr__get_index( xr, hashcode );
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
}
XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->maxslots = len;
table->hash_generation = 0;
table->index_mod_magic = ( len > 512 ) ? 91 : 3 ;
table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
void xr_hash_free( XRHash * xr )
{
if ( xr == NULL ) return;
- XRHashLink * slot = xr->buckets[0];
+ XRHashLink * slot;
int bucket = 0;
- while ( slot != NULL ){
- XRHashLink * nextslot = slot->next;
- if ( nextslot == NULL ){
- if ( (++bucket) < ( xr->maxslots ) ){
- nextslot = xr->buckets[bucket];
- }
- } else {
- if ( slot != xr->buckets[bucket] ){
- slot->next = NULL;
- free( slot );
+ while ( bucket < xr->maxslots ){
+ if ( xr->buckets[bucket] != NULL ){
+ slot = xr->buckets[bucket];
+ /* iterate through and free links */
+ if ( slot != NULL ){
+ XRHashLink * prevslot = NULL;
+ XRHashLink * nextslot = slot;
+ while ( nextslot != NULL ){
+ prevslot = nextslot;
+ nextslot = prevslot->next;
+ free(prevslot);
+ }
}
}
- slot = nextslot;
+ bucket++;
}
free(xr->buckets);
free(xr);
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
xr->touched_indexes++;
slot = xr->buckets[index];
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
xr_hash_resetiterator( iter );
}
return iter;
}
void xr_hash_resetiterator( XRHashIter * iter )
{
iter->hash_generation = iter->xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = iter->xr->buckets[0];
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
do {
if ( ++iter->current_bucket >= iter->xr->maxslots )
return NULL; /* no more filled buckets, end of iterations */
} while ( iter->xr->buckets[iter->current_bucket] == NULL );
/* reached the end of the hash */
if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
int xr_hash__strhash( void * instr )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) instr;
size_t len = strlen(str);
int hash = 0;
int c = 0;
if ( ( len % 4 ) == 0 ){
while ( c < (len/4) ){
hash = (hash << 5) - hash + ((int*)instr)[c++];
}
} else {
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
}
while ( hash < 1 )
hash = 1 + ( hash * -1 );
return hash;
}
int xr_hash__strcmp( void * stra, void * strb )
{
return strcmp((char*)stra,(char*)strb);
}
|
inorton/xrhash | cc4fc41a064edcb88ef5b22a49cc28566c753e14 | attempt to speed up iteration a little | diff --git a/src/lib/xrhash.c b/src/lib/xrhash.c
index 8392e14..12a60c6 100644
--- a/src/lib/xrhash.c
+++ b/src/lib/xrhash.c
@@ -1,354 +1,360 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
-int xr__get_hashcode( XRHash * xr, void * key )
+static inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
int xr_get_hashcode( XRHash * xr, void * key )
{
return xr__get_hashcode( xr, key );
}
-int xr__get_index( XRHash * xr, int hashcode )
+static inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
int pindex = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= xr->maxslots ){
pindex = index;
index = index % ( xr->maxslots - xr->index_mod_magic );
if ( index < 1 ){
index = (index * -1);
}
if ( index == 0 ){
index = 1 + ( pindex - ( pindex / 3 ));
}
}
return index;
}
int xr_get_index( XRHash * xr, int hashcode )
{
return xr__get_index( xr, hashcode );
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
}
XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->maxslots = len;
table->hash_generation = 0;
table->index_mod_magic = ( len > 512 ) ? 91 : 3 ;
table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
void xr_hash_free( XRHash * xr )
{
if ( xr == NULL ) return;
XRHashLink * slot = xr->buckets[0];
int bucket = 0;
while ( slot != NULL ){
XRHashLink * nextslot = slot->next;
if ( nextslot == NULL ){
if ( (++bucket) < ( xr->maxslots ) ){
nextslot = xr->buckets[bucket];
}
} else {
if ( slot != xr->buckets[bucket] ){
slot->next = NULL;
free( slot );
}
}
slot = nextslot;
}
free(xr->buckets);
free(xr);
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
xr->touched_indexes++;
slot = xr->buckets[index];
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
xr_hash_resetiterator( iter );
}
return iter;
}
void xr_hash_resetiterator( XRHashIter * iter )
{
iter->hash_generation = iter->xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = iter->xr->buckets[0];
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
do {
if ( ++iter->current_bucket >= iter->xr->maxslots )
return NULL; /* no more filled buckets, end of iterations */
} while ( iter->xr->buckets[iter->current_bucket] == NULL );
/* reached the end of the hash */
if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
int xr_hash__strhash( void * instr )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) instr;
size_t len = strlen(str);
int hash = 0;
int c = 0;
- while ( c < len ){
- hash = (hash << 5) - hash + str[c];
- c++;
+ if ( ( len % 4 ) == 0 ){
+ while ( c < (len/4) ){
+ hash = (hash << 5) - hash + ((int*)instr)[c++];
+ }
+ } else {
+ while ( c < len ){
+ hash = (hash << 5) - hash + str[c];
+ c++;
+ }
}
while ( hash < 1 )
hash = 1 + ( hash * -1 );
return hash;
}
int xr_hash__strcmp( void * stra, void * strb )
{
return strcmp((char*)stra,(char*)strb);
}
diff --git a/src/lib/xrhash.h b/src/lib/xrhash.h
index 09da47c..c5ed4fd 100644
--- a/src/lib/xrhash.h
+++ b/src/lib/xrhash.h
@@ -1,207 +1,207 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/**
* @brief function to generate a hash code for the object at the given pointer. should return a positive int greater than zero.
*/
typedef int (*hashfn)(void*);
/**
* @brief function to compare to objects for equality
*/
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
XRHashLink * next;
};
/**
* @brief xrhash hashtable object
*/
typedef struct xrhash
{
- int hash_generation; /* used to monitor changes in the hash for iterators */
- hashfn hash;
- cmpfn cmp;
- size_t count;
- size_t touched_indexes;
- size_t maxslots;
- int index_mod_magic;
- XRHashLink ** buckets;
+ int hash_generation; /** used to monitor changes in the hash for iterators */
+ hashfn hash; /** function to generate hashcodes */
+ cmpfn cmp; /** function to compare keys ( eg strcmp() ) */
+ size_t count; /** number of unique items assigned */
+ size_t touched_indexes; /* number of unique hashcodes used */
+ size_t maxslots; /** total number of availible hashcodes */
+ int index_mod_magic;
+ XRHashLink ** buckets;
} XRHash;
/**
* @brief xrhash iteration object
*/
typedef struct xrhash_iter
{
- XRHash * xr;
- int hash_generation;
- int current_bucket;
- XRHashLink * next_slot;
+ XRHash * xr; /** hash this iterator operates on */
+ int hash_generation; /** used to check for changes in the hash */
+ int current_bucket;
+ XRHashLink * next_slot;
} XRHashIter;
/**
* @brief initialize a xrhash hashtable object
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
*
* @return
*/
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/**
* @brief initialize a xrhash hashtable object with a specific number of hash buckets
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
* @param len number of buckets to use
*
* @return
*/
XRHash * xr_init_hash_len( int (*hash)(void*), int(*cmp)(void*,void*), size_t len );
/**
* @brief free and xrhash
*
* @param xr hashtable to free
*/
void xr_hash_free( XRHash * xr );
/**
* @brief add an object to the given xr hashtable
*
* @param xr hashtable to add to
* @param key store value with this key
* @param value object to store
*
* @return XRHASH_ADDED on success, XRHASH_ADDED_ALREADY if the key is already taken or XRHASH_NULL_KEY if key was NULL
*/
int xr_hash_add( XRHash * xr, void * key, void * value );
/**
* @brief test if a given key exists in the hashtable
*
* @param xr hashtable to check
* @param key pointer to key
*
* @return XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE
*/
int xr_hash_contains( XRHash * xr, void * key );
/**
* @brief get the value corresponding to key
*
* @param xr hashtable to access
* @param key key to use
* @param dataout put value (pointer) here
*
* @returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/**
* @brief delete a given key+value from the hashtable
*
* @param xr hashtable to access
* @param key delete the value with this key
*
* @return
*/
int xr_hash_remove( XRHash * xr, void * key );
/**
* @brief initialize an object to iterate forwards through keys int the hashtable
*
* @param xr hashtable to use
*
* @return initialized iterator object
*/
XRHashIter * xr_init_hashiterator( XRHash * xr );
/**
* @brief reset a hash iterator
*
* @param iter
*/
void xr_hash_resetiterator( XRHashIter * iter );
/**
* @brief iterate to the next item using a hash iterator
*
* @param iter iterator to use
*
* @return pointer to a key used in the iter->xr hashtable
*/
void * xr_hash_iteratekey( XRHashIter * iter );
/**
* @brief generate a hashcode for a given null terminated string
*
* @param str string to hash
*
* @return hashcode > 1 on success, <= 0 on error
*/
int xr_hash__strhash( void * str );
/**
* @brief wrapper around strcmp
*
* @param stra
* @param strb
*
* @return 0,-1 or 1
*/
int xr_hash__strcmp( void * stra, void * strb );
int xr_get_index( XRHash * xr, int hashcode );
int xr_get_hashcode( XRHash * xr, void*keyptr );
#endif
|
inorton/xrhash | 0d411a79059ddaa9fdcecc596920ad3f671b0c4b | minor changes to make macos happy | diff --git a/src/lib/tests/xrhash-test.c b/src/lib/tests/xrhash-test.c
index 38ff7f5..b2fdf9e 100644
--- a/src/lib/tests/xrhash-test.c
+++ b/src/lib/tests/xrhash-test.c
@@ -1,155 +1,155 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "xrhash.h"
#include <assert.h>
#include "testutils.h"
#include <sys/time.h>
#define TEST_STR "foo%d"
int assert_contains( XRHash * xr, void * keyptr, int expected )
{
int rv = xr_hash_contains( xr, keyptr );
int hc;
if ( rv == expected ) return rv;
hc = xr_get_hashcode( xr, keyptr );
fprintf(stderr,"test failure: xr_hash_contains(xr=0x%08x,key=0x%08x) returned 0x%02x - expected 0x%02x\n",
(int)xr, (int)keyptr, rv, expected );
fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
hc, xr_get_index( xr, hc ) );
abort();
}
int assert_get( XRHash * xr, void * keyptr, void ** outptr, int expected )
{
int rv = xr_hash_get( xr, keyptr, outptr );
int hc;
if ( rv == expected ) return rv;
hc = xr_get_hashcode( xr, keyptr );
fprintf(stderr,"test failure: xr_hash_get(xr=0x%08x,key=0x%08x,outptr=0x%08x) returned 0x%02x - expected 0x%02x\n",
(int)xr, (int)keyptr, (int)outptr, rv, expected );
fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
hc, xr_get_index( xr, hc ) );
abort();
}
void iterate( XRHash * xr )
{
XRHashIter * iter = xr_init_hashiterator( xr );
void * key = NULL;
int x = 0;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
void * value = NULL;
assert ( xr_hash_get( xr, key , &value ) == XRHASH_EXISTS_TRUE );
x++;
}
assert( x == xr->maxslots );
free(iter);
}
int runtest( int hashsize )
{
struct timeval tstart;
struct timeval tend;
XRHash * xr = xr_init_hash_len( &xr_hash__strhash , &xr_hash__strcmp, hashsize );
int x = 0;
int contains = -1;
int datacount = hashsize;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( (datacount * datalen * sizeof(char)) + datacount );
char * newstr = data_vec;
fprintf(stderr,"test add\n");
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
newstr[datalen] = 0x0;
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
int got = -1;
assert_get( xr, newstr, (void**) &got, XRHASH_EXISTS_TRUE );
assert( got == x );
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per get", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test iteration\n");
gettimeofday( &tstart, 0x0 );
iterate( xr );
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per iteration with get", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
contains = assert_contains( xr, newstr, XRHASH_EXISTS_TRUE );
xr_hash_remove( xr, newstr );
contains = assert_contains( xr, newstr, XRHASH_EXISTS_FALSE );
newstr += datalen;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per remove", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
assert( xr->count == 0 );
fprintf(stderr,"\n---\ncompleted test:");
fprintf(stderr,"unique items added : %d\n",datacount);
- fprintf(stderr,"used unique indexes : %d\n",xr->touched_indexes);
- fprintf(stderr,"index collisions : %d\n",datacount - xr->touched_indexes);
+ fprintf(stderr,"used unique indexes : %ld\n",(long)xr->touched_indexes);
+ fprintf(stderr,"index collisions : %lu\n",(unsigned long)( datacount - xr->touched_indexes));
fprintf(stderr,"collision factor : %f\n", ( xr->touched_indexes + 0.0 ) / datacount );
fprintf(stderr,"average lookups / key : %f\n", ( datacount + 0.0 ) / xr->touched_indexes );
xr_hash_free( xr );
free(data_vec);
return 0;
}
int main( int argc, char** argv )
{
runtest( 512 );
runtest( 4096 );
runtest( 8192 );
runtest( 16384 );
runtest( 65000 );
runtest( 200000 );
return 0;
}
diff --git a/src/lib/xrhash.c b/src/lib/xrhash.c
index 8defd1d..8392e14 100644
--- a/src/lib/xrhash.c
+++ b/src/lib/xrhash.c
@@ -1,354 +1,354 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
-inline int xr__get_hashcode( XRHash * xr, void * key )
+int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
int xr_get_hashcode( XRHash * xr, void * key )
{
return xr__get_hashcode( xr, key );
}
-inline int xr__get_index( XRHash * xr, int hashcode )
+int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
int pindex = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= xr->maxslots ){
pindex = index;
index = index % ( xr->maxslots - xr->index_mod_magic );
if ( index < 1 ){
index = (index * -1);
}
if ( index == 0 ){
index = 1 + ( pindex - ( pindex / 3 ));
}
}
return index;
}
int xr_get_index( XRHash * xr, int hashcode )
{
return xr__get_index( xr, hashcode );
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
}
XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->maxslots = len;
table->hash_generation = 0;
table->index_mod_magic = ( len > 512 ) ? 91 : 3 ;
table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
void xr_hash_free( XRHash * xr )
{
if ( xr == NULL ) return;
XRHashLink * slot = xr->buckets[0];
int bucket = 0;
while ( slot != NULL ){
XRHashLink * nextslot = slot->next;
if ( nextslot == NULL ){
if ( (++bucket) < ( xr->maxslots ) ){
nextslot = xr->buckets[bucket];
}
} else {
if ( slot != xr->buckets[bucket] ){
slot->next = NULL;
free( slot );
}
}
slot = nextslot;
}
free(xr->buckets);
free(xr);
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
xr->touched_indexes++;
slot = xr->buckets[index];
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
xr_hash_resetiterator( iter );
}
return iter;
}
void xr_hash_resetiterator( XRHashIter * iter )
{
iter->hash_generation = iter->xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = iter->xr->buckets[0];
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
do {
if ( ++iter->current_bucket >= iter->xr->maxslots )
return NULL; /* no more filled buckets, end of iterations */
} while ( iter->xr->buckets[iter->current_bucket] == NULL );
/* reached the end of the hash */
if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
int xr_hash__strhash( void * instr )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) instr;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
while ( hash < 1 )
hash = 1 + ( hash * -1 );
return hash;
}
int xr_hash__strcmp( void * stra, void * strb )
{
return strcmp((char*)stra,(char*)strb);
}
|
inorton/xrhash | 3e73bad06d70ad65d6b159c1e88b445641c8eaea | removed saved hashcode - we dont rehash the table so dont need it | diff --git a/src/lib/xrhash.c b/src/lib/xrhash.c
index af48497..8defd1d 100644
--- a/src/lib/xrhash.c
+++ b/src/lib/xrhash.c
@@ -1,359 +1,354 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
int xr_get_hashcode( XRHash * xr, void * key )
{
return xr__get_hashcode( xr, key );
}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
int pindex = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= xr->maxslots ){
pindex = index;
index = index % ( xr->maxslots - xr->index_mod_magic );
if ( index < 1 ){
index = (index * -1);
}
if ( index == 0 ){
index = 1 + ( pindex - ( pindex / 3 ));
}
}
return index;
}
int xr_get_index( XRHash * xr, int hashcode )
{
return xr__get_index( xr, hashcode );
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
}
XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->maxslots = len;
table->hash_generation = 0;
table->index_mod_magic = ( len > 512 ) ? 91 : 3 ;
table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
void xr_hash_free( XRHash * xr )
{
if ( xr == NULL ) return;
XRHashLink * slot = xr->buckets[0];
int bucket = 0;
while ( slot != NULL ){
XRHashLink * nextslot = slot->next;
if ( nextslot == NULL ){
if ( (++bucket) < ( xr->maxslots ) ){
nextslot = xr->buckets[bucket];
}
} else {
if ( slot != xr->buckets[bucket] ){
slot->next = NULL;
free( slot );
}
}
slot = nextslot;
}
free(xr->buckets);
free(xr);
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
xr->touched_indexes++;
slot = xr->buckets[index];
- slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
- if ( slot->hashcode == 0 ){
- break;
- }
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
- slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
xr_hash_resetiterator( iter );
}
return iter;
}
void xr_hash_resetiterator( XRHashIter * iter )
{
iter->hash_generation = iter->xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = iter->xr->buckets[0];
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
do {
if ( ++iter->current_bucket >= iter->xr->maxslots )
return NULL; /* no more filled buckets, end of iterations */
} while ( iter->xr->buckets[iter->current_bucket] == NULL );
/* reached the end of the hash */
if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
int xr_hash__strhash( void * instr )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) instr;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
while ( hash < 1 )
hash = 1 + ( hash * -1 );
return hash;
}
int xr_hash__strcmp( void * stra, void * strb )
{
return strcmp((char*)stra,(char*)strb);
}
diff --git a/src/lib/xrhash.h b/src/lib/xrhash.h
index ea68f3b..09da47c 100644
--- a/src/lib/xrhash.h
+++ b/src/lib/xrhash.h
@@ -1,208 +1,207 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/**
* @brief function to generate a hash code for the object at the given pointer. should return a positive int greater than zero.
*/
typedef int (*hashfn)(void*);
/**
* @brief function to compare to objects for equality
*/
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
- int hashcode;
XRHashLink * next;
};
/**
* @brief xrhash hashtable object
*/
typedef struct xrhash
{
int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
size_t count;
size_t touched_indexes;
size_t maxslots;
int index_mod_magic;
XRHashLink ** buckets;
} XRHash;
/**
* @brief xrhash iteration object
*/
typedef struct xrhash_iter
{
XRHash * xr;
int hash_generation;
int current_bucket;
XRHashLink * next_slot;
} XRHashIter;
/**
* @brief initialize a xrhash hashtable object
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
*
* @return
*/
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/**
* @brief initialize a xrhash hashtable object with a specific number of hash buckets
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
* @param len number of buckets to use
*
* @return
*/
XRHash * xr_init_hash_len( int (*hash)(void*), int(*cmp)(void*,void*), size_t len );
/**
* @brief free and xrhash
*
* @param xr hashtable to free
*/
void xr_hash_free( XRHash * xr );
/**
* @brief add an object to the given xr hashtable
*
* @param xr hashtable to add to
* @param key store value with this key
* @param value object to store
*
* @return XRHASH_ADDED on success, XRHASH_ADDED_ALREADY if the key is already taken or XRHASH_NULL_KEY if key was NULL
*/
int xr_hash_add( XRHash * xr, void * key, void * value );
/**
* @brief test if a given key exists in the hashtable
*
* @param xr hashtable to check
* @param key pointer to key
*
* @return XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE
*/
int xr_hash_contains( XRHash * xr, void * key );
/**
* @brief get the value corresponding to key
*
* @param xr hashtable to access
* @param key key to use
* @param dataout put value (pointer) here
*
* @returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/**
* @brief delete a given key+value from the hashtable
*
* @param xr hashtable to access
* @param key delete the value with this key
*
* @return
*/
int xr_hash_remove( XRHash * xr, void * key );
/**
* @brief initialize an object to iterate forwards through keys int the hashtable
*
* @param xr hashtable to use
*
* @return initialized iterator object
*/
XRHashIter * xr_init_hashiterator( XRHash * xr );
/**
* @brief reset a hash iterator
*
* @param iter
*/
void xr_hash_resetiterator( XRHashIter * iter );
/**
* @brief iterate to the next item using a hash iterator
*
* @param iter iterator to use
*
* @return pointer to a key used in the iter->xr hashtable
*/
void * xr_hash_iteratekey( XRHashIter * iter );
/**
* @brief generate a hashcode for a given null terminated string
*
* @param str string to hash
*
* @return hashcode > 1 on success, <= 0 on error
*/
int xr_hash__strhash( void * str );
/**
* @brief wrapper around strcmp
*
* @param stra
* @param strb
*
* @return 0,-1 or 1
*/
int xr_hash__strcmp( void * stra, void * strb );
int xr_get_index( XRHash * xr, int hashcode );
int xr_get_hashcode( XRHash * xr, void*keyptr );
#endif
|
inorton/xrhash | 3f99ffd5ef14d7a234f4eb106cd4ceb3b52daab3 | allow iterator to be re-used | diff --git a/src/lib/xrhash.c b/src/lib/xrhash.c
index 0a180c0..af48497 100644
--- a/src/lib/xrhash.c
+++ b/src/lib/xrhash.c
@@ -1,353 +1,359 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
int xr_get_hashcode( XRHash * xr, void * key )
{
return xr__get_hashcode( xr, key );
}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
int pindex = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= xr->maxslots ){
pindex = index;
index = index % ( xr->maxslots - xr->index_mod_magic );
if ( index < 1 ){
index = (index * -1);
}
if ( index == 0 ){
index = 1 + ( pindex - ( pindex / 3 ));
}
}
return index;
}
int xr_get_index( XRHash * xr, int hashcode )
{
return xr__get_index( xr, hashcode );
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
}
XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->maxslots = len;
table->hash_generation = 0;
table->index_mod_magic = ( len > 512 ) ? 91 : 3 ;
table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
void xr_hash_free( XRHash * xr )
{
if ( xr == NULL ) return;
XRHashLink * slot = xr->buckets[0];
int bucket = 0;
while ( slot != NULL ){
XRHashLink * nextslot = slot->next;
if ( nextslot == NULL ){
if ( (++bucket) < ( xr->maxslots ) ){
nextslot = xr->buckets[bucket];
}
} else {
if ( slot != xr->buckets[bucket] ){
slot->next = NULL;
free( slot );
}
}
slot = nextslot;
}
free(xr->buckets);
free(xr);
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
xr->touched_indexes++;
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
- iter->hash_generation = xr->hash_generation;
- iter->current_bucket = 0;
- iter->next_slot = xr->buckets[0];
+ xr_hash_resetiterator( iter );
}
return iter;
}
+void xr_hash_resetiterator( XRHashIter * iter )
+{
+ iter->hash_generation = iter->xr->hash_generation;
+ iter->current_bucket = 0;
+ iter->next_slot = iter->xr->buckets[0];
+
+}
+
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
do {
if ( ++iter->current_bucket >= iter->xr->maxslots )
return NULL; /* no more filled buckets, end of iterations */
} while ( iter->xr->buckets[iter->current_bucket] == NULL );
/* reached the end of the hash */
if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
int xr_hash__strhash( void * instr )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) instr;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
while ( hash < 1 )
hash = 1 + ( hash * -1 );
return hash;
}
int xr_hash__strcmp( void * stra, void * strb )
{
return strcmp((char*)stra,(char*)strb);
}
diff --git a/src/lib/xrhash.h b/src/lib/xrhash.h
index 9655b13..ea68f3b 100644
--- a/src/lib/xrhash.h
+++ b/src/lib/xrhash.h
@@ -1,202 +1,208 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/**
* @brief function to generate a hash code for the object at the given pointer. should return a positive int greater than zero.
*/
typedef int (*hashfn)(void*);
/**
* @brief function to compare to objects for equality
*/
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
/**
* @brief xrhash hashtable object
*/
typedef struct xrhash
{
int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
size_t count;
size_t touched_indexes;
size_t maxslots;
int index_mod_magic;
XRHashLink ** buckets;
} XRHash;
/**
* @brief xrhash iteration object
*/
typedef struct xrhash_iter
{
XRHash * xr;
int hash_generation;
int current_bucket;
XRHashLink * next_slot;
} XRHashIter;
/**
* @brief initialize a xrhash hashtable object
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
*
* @return
*/
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/**
* @brief initialize a xrhash hashtable object with a specific number of hash buckets
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
* @param len number of buckets to use
*
* @return
*/
XRHash * xr_init_hash_len( int (*hash)(void*), int(*cmp)(void*,void*), size_t len );
/**
* @brief free and xrhash
*
* @param xr hashtable to free
*/
void xr_hash_free( XRHash * xr );
/**
* @brief add an object to the given xr hashtable
*
* @param xr hashtable to add to
* @param key store value with this key
* @param value object to store
*
* @return XRHASH_ADDED on success, XRHASH_ADDED_ALREADY if the key is already taken or XRHASH_NULL_KEY if key was NULL
*/
int xr_hash_add( XRHash * xr, void * key, void * value );
/**
* @brief test if a given key exists in the hashtable
*
* @param xr hashtable to check
* @param key pointer to key
*
* @return XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE
*/
int xr_hash_contains( XRHash * xr, void * key );
/**
* @brief get the value corresponding to key
*
* @param xr hashtable to access
* @param key key to use
* @param dataout put value (pointer) here
*
* @returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/**
* @brief delete a given key+value from the hashtable
*
* @param xr hashtable to access
* @param key delete the value with this key
*
* @return
*/
int xr_hash_remove( XRHash * xr, void * key );
/**
* @brief initialize an object to iterate forwards through keys int the hashtable
*
* @param xr hashtable to use
*
* @return initialized iterator object
*/
XRHashIter * xr_init_hashiterator( XRHash * xr );
+/**
+* @brief reset a hash iterator
+*
+* @param iter
+*/
+void xr_hash_resetiterator( XRHashIter * iter );
/**
* @brief iterate to the next item using a hash iterator
*
* @param iter iterator to use
*
* @return pointer to a key used in the iter->xr hashtable
*/
void * xr_hash_iteratekey( XRHashIter * iter );
/**
* @brief generate a hashcode for a given null terminated string
*
* @param str string to hash
*
* @return hashcode > 1 on success, <= 0 on error
*/
int xr_hash__strhash( void * str );
/**
* @brief wrapper around strcmp
*
* @param stra
* @param strb
*
* @return 0,-1 or 1
*/
int xr_hash__strcmp( void * stra, void * strb );
int xr_get_index( XRHash * xr, int hashcode );
int xr_get_hashcode( XRHash * xr, void*keyptr );
#endif
|
inorton/xrhash | 4cb9b5eac467810b28c0e53401655d1a4bf9edd5 | export libpath | diff --git a/src/examples/SConscript b/src/examples/SConscript
index f06bc7b..2ae852b 100644
--- a/src/examples/SConscript
+++ b/src/examples/SConscript
@@ -1,5 +1,3 @@
Import("env");
-env.Append(CPPPATH="..")
-
-env.Program("spacemap.c",LIBS=["xrhash","ncurses"],LIBPATH="..")
+env.Program("spacemap.c",LIBS=["xrhash","ncurses"])
diff --git a/src/lib/SConscript b/src/lib/SConscript
index 6ed3345..407f8e2 100644
--- a/src/lib/SConscript
+++ b/src/lib/SConscript
@@ -1,10 +1,10 @@
import os
Import("env")
libxrhash = env.Library( "xrhash","xrhash.c")
-
+env.Append(LIBPATH=os.getcwd() )
env.Append(CPPPATH=os.getcwd() )
Export("env");
SConscript("tests/SConscript",exports="env")
diff --git a/src/lib/tests/SConscript b/src/lib/tests/SConscript
index df2a976..642dac0 100644
--- a/src/lib/tests/SConscript
+++ b/src/lib/tests/SConscript
@@ -1,4 +1,4 @@
Import("env")
-env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH=".." );
+env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash" );
|
inorton/xrhash | f422365065d7c46a7c34487af3624a2e33ad1ac5 | move cflags out | diff --git a/src/lib/tests/SConscript b/src/lib/tests/SConscript
index 28d6c9a..df2a976 100644
--- a/src/lib/tests/SConscript
+++ b/src/lib/tests/SConscript
@@ -1,6 +1,4 @@
Import("env")
-env.Append(CFLAGS="-Wall -Werror")
-env.Append(CFLAGS="-g -O0")
env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH=".." );
|
inorton/xrhash | 90dc41154fc964e9cf1cb3fe808ae91656f54ba7 | move cflags to SConstruct file | diff --git a/SConstruct b/SConstruct
index 778c9a1..ecf49d6 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1,6 +1,7 @@
import os
env = Environment()
-
+env.Append(CFLAGS="-Wall -Werror")
+env.Append(CFLAGS="-g -O0")
env.SConscript("src/lib/SConscript",exports="env");
diff --git a/src/lib/SConscript b/src/lib/SConscript
index a5d8b53..6ed3345 100644
--- a/src/lib/SConscript
+++ b/src/lib/SConscript
@@ -1,12 +1,10 @@
import os
Import("env")
-env.Append(CFLAGS="-Wall -Werror")
-env.Append(CFLAGS="-g -O0")
libxrhash = env.Library( "xrhash","xrhash.c")
env.Append(CPPPATH=os.getcwd() )
Export("env");
SConscript("tests/SConscript",exports="env")
|
inorton/xrhash | 1ef9ac0dae72fe3aefb63cafc645495c07323645 | scons tweaks | diff --git a/SConstruct b/SConstruct
index a3ab98e..778c9a1 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1 +1,6 @@
-SConscript("src/lib/SConscript");
+import os
+
+env = Environment()
+
+env.SConscript("src/lib/SConscript",exports="env");
+
diff --git a/src/lib/SConscript b/src/lib/SConscript
index b4b7d2a..a5d8b53 100644
--- a/src/lib/SConscript
+++ b/src/lib/SConscript
@@ -1,12 +1,12 @@
import os
-env = Environment()
+Import("env")
env.Append(CFLAGS="-Wall -Werror")
env.Append(CFLAGS="-g -O0")
libxrhash = env.Library( "xrhash","xrhash.c")
env.Append(CPPPATH=os.getcwd() )
Export("env");
SConscript("tests/SConscript",exports="env")
|
inorton/xrhash | 2eb062c0fdd8b201486bfd8b1c17d909d597082d | moved xrhash into src/lib | diff --git a/SConstruct b/SConstruct
index bdadb56..a3ab98e 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1 +1 @@
-SConscript("src/SConscript");
+SConscript("src/lib/SConscript");
diff --git a/src/SConscript b/src/SConscript
deleted file mode 100644
index 5ff44ab..0000000
--- a/src/SConscript
+++ /dev/null
@@ -1,9 +0,0 @@
-env = Environment()
-env.Append(CFLAGS="-Wall -Werror")
-env.Append(CFLAGS="-g -O0")
-env.Library( "xrhash","xrhash.c");
-
-Export("env");
-
-SConscript("tests/SConscript");
-SConscript("examples/SConscript")
diff --git a/src/lib/SConscript b/src/lib/SConscript
new file mode 100644
index 0000000..b4b7d2a
--- /dev/null
+++ b/src/lib/SConscript
@@ -0,0 +1,12 @@
+import os
+
+env = Environment()
+env.Append(CFLAGS="-Wall -Werror")
+env.Append(CFLAGS="-g -O0")
+libxrhash = env.Library( "xrhash","xrhash.c")
+
+env.Append(CPPPATH=os.getcwd() )
+
+Export("env");
+
+SConscript("tests/SConscript",exports="env")
diff --git a/src/tests/SConscript b/src/lib/tests/SConscript
similarity index 100%
rename from src/tests/SConscript
rename to src/lib/tests/SConscript
diff --git a/src/tests/cs/test_Dictionary.cs b/src/lib/tests/cs/test_Dictionary.cs
similarity index 100%
rename from src/tests/cs/test_Dictionary.cs
rename to src/lib/tests/cs/test_Dictionary.cs
diff --git a/src/tests/testutils.c b/src/lib/tests/testutils.c
similarity index 100%
rename from src/tests/testutils.c
rename to src/lib/tests/testutils.c
diff --git a/src/tests/testutils.h b/src/lib/tests/testutils.h
similarity index 100%
rename from src/tests/testutils.h
rename to src/lib/tests/testutils.h
diff --git a/src/tests/xrhash-test.c b/src/lib/tests/xrhash-test.c
similarity index 100%
rename from src/tests/xrhash-test.c
rename to src/lib/tests/xrhash-test.c
diff --git a/src/xrhash.c b/src/lib/xrhash.c
similarity index 100%
rename from src/xrhash.c
rename to src/lib/xrhash.c
diff --git a/src/xrhash.h b/src/lib/xrhash.h
similarity index 100%
rename from src/xrhash.h
rename to src/lib/xrhash.h
|
inorton/xrhash | 8305f05c834d51a8dad8e9956fcbf0485c9cc8f8 | c# comparison with Dictionary | diff --git a/src/tests/cs/test_Dictionary.cs b/src/tests/cs/test_Dictionary.cs
index 092a4b7..f27a979 100644
--- a/src/tests/cs/test_Dictionary.cs
+++ b/src/tests/cs/test_Dictionary.cs
@@ -1,24 +1,41 @@
using System;
+using System.Diagnostics;
using System.Collections.Generic;
namespace dictionary_test {
public class DictionaryTester
{
- static readonly int size = 64000;
+ static readonly int size = 200000;
public static void Main()
{
List<string> keydata = new List<string>(size);
for( int x = 0; x < size; x++ ){
string s = String.Format("foo{0}",x);
- keys.Add(s);
+ keydata.Add(s);
}
+
+ Stopwatch w = new Stopwatch();
+
+ w.Start();
+ Dictionary<string,int> test = new Dictionary<string,int>(size);
+ int i = 0;
+ foreach ( string s in keydata ){
+ test.Add( s, i );
+ i++;
+ }
+ w.Stop();
+
+ TimeSpan t = w.Elapsed;
+
+ Console.WriteLine( "avg add time {0}ms", t.TotalMilliseconds / size);
+
}
}
}
|
inorton/xrhash | 3c3e9655d778b249e9d98d25e47e243b2f917d6c | have multiple xrhashes for different data sizes | diff --git a/src/SConscript b/src/SConscript
index 467da98..5ff44ab 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,11 +1,9 @@
env = Environment()
env.Append(CFLAGS="-Wall -Werror")
env.Append(CFLAGS="-g -O0")
env.Library( "xrhash","xrhash.c");
-env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH="." );
-
-
Export("env");
+SConscript("tests/SConscript");
SConscript("examples/SConscript")
diff --git a/src/tests/SConscript b/src/tests/SConscript
new file mode 100644
index 0000000..28d6c9a
--- /dev/null
+++ b/src/tests/SConscript
@@ -0,0 +1,6 @@
+Import("env")
+env.Append(CFLAGS="-Wall -Werror")
+env.Append(CFLAGS="-g -O0")
+env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH=".." );
+
+
diff --git a/src/tests/cs/test_Dictionary.cs b/src/tests/cs/test_Dictionary.cs
new file mode 100644
index 0000000..092a4b7
--- /dev/null
+++ b/src/tests/cs/test_Dictionary.cs
@@ -0,0 +1,24 @@
+using System;
+using System.Collections.Generic;
+
+namespace dictionary_test {
+ public class DictionaryTester
+ {
+ static readonly int size = 64000;
+
+ public static void Main()
+ {
+ List<string> keydata = new List<string>(size);
+ for( int x = 0; x < size; x++ ){
+ string s = String.Format("foo{0}",x);
+ keys.Add(s);
+ }
+
+
+
+ }
+
+ }
+
+}
+
diff --git a/src/testutils.c b/src/tests/testutils.c
similarity index 100%
rename from src/testutils.c
rename to src/tests/testutils.c
diff --git a/src/testutils.h b/src/tests/testutils.h
similarity index 100%
rename from src/testutils.h
rename to src/tests/testutils.h
diff --git a/src/tests/xrhash-test.c b/src/tests/xrhash-test.c
new file mode 100644
index 0000000..38ff7f5
--- /dev/null
+++ b/src/tests/xrhash-test.c
@@ -0,0 +1,155 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "xrhash.h"
+#include <assert.h>
+
+#include "testutils.h"
+#include <sys/time.h>
+
+#define TEST_STR "foo%d"
+
+int assert_contains( XRHash * xr, void * keyptr, int expected )
+{
+ int rv = xr_hash_contains( xr, keyptr );
+ int hc;
+ if ( rv == expected ) return rv;
+ hc = xr_get_hashcode( xr, keyptr );
+ fprintf(stderr,"test failure: xr_hash_contains(xr=0x%08x,key=0x%08x) returned 0x%02x - expected 0x%02x\n",
+ (int)xr, (int)keyptr, rv, expected );
+ fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
+ hc, xr_get_index( xr, hc ) );
+ abort();
+}
+
+int assert_get( XRHash * xr, void * keyptr, void ** outptr, int expected )
+{
+ int rv = xr_hash_get( xr, keyptr, outptr );
+ int hc;
+ if ( rv == expected ) return rv;
+
+ hc = xr_get_hashcode( xr, keyptr );
+ fprintf(stderr,"test failure: xr_hash_get(xr=0x%08x,key=0x%08x,outptr=0x%08x) returned 0x%02x - expected 0x%02x\n",
+ (int)xr, (int)keyptr, (int)outptr, rv, expected );
+ fprintf(stderr," : key hashcode = 0x%08x, key index = 0x%08x\n",
+ hc, xr_get_index( xr, hc ) );
+
+ abort();
+}
+
+
+void iterate( XRHash * xr )
+{
+ XRHashIter * iter = xr_init_hashiterator( xr );
+ void * key = NULL;
+ int x = 0;
+
+ while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
+ void * value = NULL;
+ assert ( xr_hash_get( xr, key , &value ) == XRHASH_EXISTS_TRUE );
+ x++;
+ }
+
+ assert( x == xr->maxslots );
+
+ free(iter);
+}
+
+int runtest( int hashsize )
+{
+ struct timeval tstart;
+ struct timeval tend;
+
+ XRHash * xr = xr_init_hash_len( &xr_hash__strhash , &xr_hash__strcmp, hashsize );
+
+ int x = 0;
+ int contains = -1;
+ int datacount = hashsize;
+
+ int datalen = 10 + strlen(TEST_STR); /* ten digits */
+
+ char* data_vec = (char*) malloc ( (datacount * datalen * sizeof(char)) + datacount );
+ char * newstr = data_vec;
+ fprintf(stderr,"test add\n");
+ gettimeofday( &tstart, 0x0 );
+ while ( x++ < datacount ){
+ snprintf(newstr,datalen,TEST_STR,x);
+ xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
+ newstr[datalen] = 0x0;
+ newstr += datalen;
+ }
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+
+ fprintf(stderr,"test get\n");
+ newstr = data_vec;
+ x = 0;
+ gettimeofday( &tstart, 0x0 );
+ while ( x++ < datacount ){
+ int got = -1;
+ assert_get( xr, newstr, (void**) &got, XRHASH_EXISTS_TRUE );
+ assert( got == x );
+ newstr += datalen;
+ }
+
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per get", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+ fprintf(stderr,"test iteration\n");
+ gettimeofday( &tstart, 0x0 );
+ iterate( xr );
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per iteration with get", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+
+ fprintf(stderr,"test remove\n");
+ newstr = data_vec;
+ x = 0;
+ gettimeofday( &tstart, 0x0 );
+ while ( x++ < datacount ){
+ contains = assert_contains( xr, newstr, XRHASH_EXISTS_TRUE );
+ xr_hash_remove( xr, newstr );
+ contains = assert_contains( xr, newstr, XRHASH_EXISTS_FALSE );
+ newstr += datalen;
+ }
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per remove", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+ assert( xr->count == 0 );
+
+
+
+
+ fprintf(stderr,"\n---\ncompleted test:");
+ fprintf(stderr,"unique items added : %d\n",datacount);
+ fprintf(stderr,"used unique indexes : %d\n",xr->touched_indexes);
+ fprintf(stderr,"index collisions : %d\n",datacount - xr->touched_indexes);
+ fprintf(stderr,"collision factor : %f\n", ( xr->touched_indexes + 0.0 ) / datacount );
+ fprintf(stderr,"average lookups / key : %f\n", ( datacount + 0.0 ) / xr->touched_indexes );
+
+
+ xr_hash_free( xr );
+ free(data_vec);
+
+ return 0;
+}
+
+
+int main( int argc, char** argv )
+{
+ runtest( 512 );
+ runtest( 4096 );
+ runtest( 8192 );
+ runtest( 16384 );
+ runtest( 65000 );
+ runtest( 200000 );
+ return 0;
+}
+
+
diff --git a/src/xrhash-test.c b/src/xrhash-test.c
deleted file mode 100644
index 4f00c6d..0000000
--- a/src/xrhash-test.c
+++ /dev/null
@@ -1,102 +0,0 @@
-#include <stdio.h>
-
-#define XRHASH_SLOTS 32768
-
-#include "xrhash.h"
-#include <assert.h>
-
-#include "testutils.h"
-#include <sys/time.h>
-
-#define TEST_STR "foo%d"
-#define DATASET 64000
-
-void iterate( XRHash * xr )
-{
- XRHashIter * iter = xr_init_hashiterator( xr );
- void * key = NULL;
- int x = 0;
-
- while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
- void * value = NULL;
- assert ( xr_hash_get( xr, key , &value ) == XRHASH_EXISTS_TRUE );
- x++;
- }
-
- assert( x == DATASET );
-
- free(iter);
-}
-
-int main( int argc, char** argv )
-{
- struct timeval tstart;
- struct timeval tend;
-
- XRHash * xr = xr_init_hash( NULL, NULL );
-
- int x = 0;
- int contains = -1;
- int datacount = DATASET;
-
- int datalen = 10 + strlen(TEST_STR); /* ten digits */
-
- char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
- char * newstr = data_vec;
- fprintf(stderr,"test add\n");
- gettimeofday( &tstart, 0x0 );
- while ( x++ < datacount ){
- snprintf(newstr,datalen,TEST_STR,x);
- xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
- newstr += 4;
- }
- gettimeofday( &tend, 0x0 );
- fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
- fprintf(stderr,"\n");
-
-
- fprintf(stderr,"test get\n");
- newstr = data_vec;
- x = 0;
- gettimeofday( &tstart, 0x0 );
- while ( x++ < datacount ){
- int got = -1;
- contains = xr_hash_get( xr, newstr, (void**) &got );
- assert( contains == XRHASH_EXISTS_TRUE );
- assert( got == x );
- newstr += 4;
- }
-
- gettimeofday( &tend, 0x0 );
- fprintf(stderr,"* avg %lld us per get", (timeval_diff(NULL,&tend,&tstart))/datacount );
- fprintf(stderr,"\n");
-
- fprintf(stderr,"test iteration\n");
- gettimeofday( &tstart, 0x0 );
- iterate( xr );
- gettimeofday( &tend, 0x0 );
- fprintf(stderr,"* avg %lld us per iteration with get", (timeval_diff(NULL,&tend,&tstart))/datacount );
- fprintf(stderr,"\n");
-
-
-
- fprintf(stderr,"test remove\n");
- newstr = data_vec;
- x = 0;
- gettimeofday( &tstart, 0x0 );
- while ( x++ < datacount ){
- contains = xr_hash_remove( xr, newstr );
- assert( contains == XRHASH_REMOVED );
- newstr += 4;
- }
- gettimeofday( &tend, 0x0 );
- fprintf(stderr,"* avg %lld us per remove", (timeval_diff(NULL,&tend,&tstart))/datacount );
- fprintf(stderr,"\n");
-
- assert( xr->count == 0 );
-
- free( xr );
- free(data_vec);
-
- return 0;
-}
diff --git a/src/xrhash.c b/src/xrhash.c
index b47eb3d..0a180c0 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,311 +1,353 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
+int xr_get_hashcode( XRHash * xr, void * key )
+{
+ return xr__get_hashcode( xr, key );
+}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
+ int pindex = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
- while ( index >= xr->maxslots )
- index = index % XRHASH_MOD;
-
+ while ( index >= xr->maxslots ){
+ pindex = index;
+ index = index % ( xr->maxslots - xr->index_mod_magic );
+ if ( index < 1 ){
+ index = (index * -1);
+ }
+ if ( index == 0 ){
+ index = 1 + ( pindex - ( pindex / 3 ));
+ }
+ }
return index;
}
+int xr_get_index( XRHash * xr, int hashcode )
+{
+ return xr__get_index( xr, hashcode );
+}
+
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
}
XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->maxslots = len;
table->hash_generation = 0;
+ table->index_mod_magic = ( len > 512 ) ? 91 : 3 ;
table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
+void xr_hash_free( XRHash * xr )
+{
+ if ( xr == NULL ) return;
+ XRHashLink * slot = xr->buckets[0];
+ int bucket = 0;
+ while ( slot != NULL ){
+ XRHashLink * nextslot = slot->next;
+ if ( nextslot == NULL ){
+ if ( (++bucket) < ( xr->maxslots ) ){
+ nextslot = xr->buckets[bucket];
+ }
+ } else {
+ if ( slot != xr->buckets[bucket] ){
+ slot->next = NULL;
+ free( slot );
+ }
+ }
+ slot = nextslot;
+ }
+ free(xr->buckets);
+ free(xr);
+}
+
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
+ xr->touched_indexes++;
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
iter->hash_generation = xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = xr->buckets[0];
}
return iter;
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
- while ( iter->xr->buckets[++iter->current_bucket] == NULL ){
- if ( iter->current_bucket >= iter->xr->maxslots )
- return NULL; /* no more filled buckets, end of iterations */
- }
+ do {
+ if ( ++iter->current_bucket >= iter->xr->maxslots )
+ return NULL; /* no more filled buckets, end of iterations */
+ } while ( iter->xr->buckets[iter->current_bucket] == NULL );
/* reached the end of the hash */
if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
int xr_hash__strhash( void * instr )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) instr;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
- if ( hash < 1 )
- hash = 3 + ( hash * -1 );
+ while ( hash < 1 )
+ hash = 1 + ( hash * -1 );
return hash;
}
int xr_hash__strcmp( void * stra, void * strb )
{
return strcmp((char*)stra,(char*)strb);
}
diff --git a/src/xrhash.h b/src/xrhash.h
index 8d6f619..9655b13 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,187 +1,202 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
-#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/**
* @brief function to generate a hash code for the object at the given pointer. should return a positive int greater than zero.
*/
typedef int (*hashfn)(void*);
/**
* @brief function to compare to objects for equality
*/
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
/**
* @brief xrhash hashtable object
*/
typedef struct xrhash
{
int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
size_t count;
+ size_t touched_indexes;
size_t maxslots;
+ int index_mod_magic;
XRHashLink ** buckets;
} XRHash;
/**
* @brief xrhash iteration object
*/
typedef struct xrhash_iter
{
XRHash * xr;
int hash_generation;
int current_bucket;
XRHashLink * next_slot;
} XRHashIter;
/**
* @brief initialize a xrhash hashtable object
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
*
* @return
*/
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/**
* @brief initialize a xrhash hashtable object with a specific number of hash buckets
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
* @param len number of buckets to use
*
* @return
*/
XRHash * xr_init_hash_len( int (*hash)(void*), int(*cmp)(void*,void*), size_t len );
+
+/**
+* @brief free and xrhash
+*
+* @param xr hashtable to free
+*/
+void xr_hash_free( XRHash * xr );
+
+
/**
* @brief add an object to the given xr hashtable
*
* @param xr hashtable to add to
* @param key store value with this key
* @param value object to store
*
* @return XRHASH_ADDED on success, XRHASH_ADDED_ALREADY if the key is already taken or XRHASH_NULL_KEY if key was NULL
*/
int xr_hash_add( XRHash * xr, void * key, void * value );
/**
* @brief test if a given key exists in the hashtable
*
* @param xr hashtable to check
* @param key pointer to key
*
* @return XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE
*/
int xr_hash_contains( XRHash * xr, void * key );
/**
* @brief get the value corresponding to key
*
* @param xr hashtable to access
* @param key key to use
* @param dataout put value (pointer) here
*
* @returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/**
* @brief delete a given key+value from the hashtable
*
* @param xr hashtable to access
* @param key delete the value with this key
*
* @return
*/
int xr_hash_remove( XRHash * xr, void * key );
/**
* @brief initialize an object to iterate forwards through keys int the hashtable
*
* @param xr hashtable to use
*
* @return initialized iterator object
*/
XRHashIter * xr_init_hashiterator( XRHash * xr );
/**
* @brief iterate to the next item using a hash iterator
*
* @param iter iterator to use
*
* @return pointer to a key used in the iter->xr hashtable
*/
void * xr_hash_iteratekey( XRHashIter * iter );
/**
* @brief generate a hashcode for a given null terminated string
*
* @param str string to hash
*
* @return hashcode > 1 on success, <= 0 on error
*/
int xr_hash__strhash( void * str );
/**
* @brief wrapper around strcmp
*
* @param stra
* @param strb
*
* @return 0,-1 or 1
*/
int xr_hash__strcmp( void * stra, void * strb );
+int xr_get_index( XRHash * xr, int hashcode );
+int xr_get_hashcode( XRHash * xr, void*keyptr );
+
+
+
#endif
|
inorton/xrhash | bdc4c54998cbe1c1c599a8bee628cdb7e7bdece8 | add default string hashcode and comparison functions, also add xr_hash_init_len() | diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
index aef22f0..e5f2387 100644
--- a/src/examples/spacemap.c
+++ b/src/examples/spacemap.c
@@ -1,372 +1,360 @@
#include <ncurses.h>
#include <assert.h>
#include "spacemap.h"
#include <string.h>
int spaceship_id_cmp( void * id_a, void * id_b )
{
- return strcmp((char*)id_a,(char*)id_b);
+ return xr_hash__strcmp( id_a, id_b );
}
/* id is a pointer to spaceship.id */
int spaceship_id_hash( void * id )
{
- /* this hashes strings in a similar way to the mono String.cs class */
- char* str = (char*) id;
- size_t len = strlen(str);
- int hash = 0;
- int c = 0;
- while ( c < len ){
- hash = (hash << 5) - hash + str[c];
- c++;
- }
- if ( hash < 1 )
- hash = 3 + ( hash * -1 );
-
- return hash;
+ return xr_hash__strhash( id );
}
spacemap * spacemap_init()
{
spacemap * map = (spacemap*) malloc( 1 * sizeof(spacemap) );
map->xr = xr_init_hash( &spaceship_id_hash, &spaceship_id_cmp );
map->selected_ship = "bob";
return map;
}
spaceship * new_spaceship( char* name, uint8 type, int x, int y )
{
spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
ship->name = strdup( name );
ship->x = x * 100;
ship->y = y * 100;
ship->vx = 0;
ship->vy = 0;
return ship;
}
void new_random_ship(spacemap * map)
{
int rand_x = ((int) rand()) % map->w;
int rand_y = ((int) rand()) % map->h;
char * shipname = (char*) malloc( 20 * sizeof(char));
spaceship * newship;
snprintf(shipname,20,"contact %d",map->xr->count + 1 );
newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
if ( (rand_x % 2 )) {rand_x = rand_x * -0.5; } else {rand_x = rand_x * 0.5; }
if ( (rand_y % 2 )) {rand_y = rand_y * -0.5; } else {rand_y = rand_y * 0.5; }
newship->vx = rand_x;
newship->vy = rand_y;
spacemap_add( map, newship );
}
void spacemap_add( spacemap * map, spaceship * ship )
{
xr_hash_add( map->xr, (void*)ship->name, ship );
}
spaceship * spacemap_get( spacemap * map, const char* name )
{
spaceship * ret = NULL;
xr_hash_get( map->xr, (void*) name, (void**) &ret );
return ret;
}
void moveships( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
ship->x += ship->vx;
ship->y += ship->vy;
/* wrap/warp ship at edges of map - asteroids style */
if ( ship->x > ( map->w * 100 ) ){
ship->x = 0;
}
if ( ship->x < 0 ){
ship->x = map->w * 100;
}
if ( ship->y > ( map->h * 100 ) ){
ship->y = 0;
}
if ( ship->y < 0 ){
ship->y = map->h * 100;
}
}
free ( iter );
}
void clearships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, " " );
mvwprintw( wind, shipy+1,shipx+1," ");
}
free ( iter );
}
void paintinfo ( spacemap * map, WINDOW * wind )
{
int menu_row = 0;
int menu_max_rows = LINES / 2;
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
spaceship * current = spacemap_get( map, map->selected_ship );
werase( wind );
/* paint selected item details */
mvwprintw( wind, 3, 2, "Current Target" );
wattron(wind,A_BOLD);
mvwprintw( wind, 5, 2, "Name: %s", current->name );
mvwprintw( wind, 6, 2, "Position: x = %d", current->x );
mvwprintw( wind, 7, 2, " y = %d", current->y );
wattroff(wind,A_BOLD);
/* paint list - with selected highlighted */
mvwprintw( wind, 10, 2, "%d Radar Contact(s)", map->xr->count );
while (( key = xr_hash_iteratekey( iter ) ) != NULL ){
char * iname = (char*)key;
if ( menu_row > menu_max_rows ) break;
if ( strcmp(iname,current->name) == 0 ){
wattron(wind,A_REVERSE);
}
mvwprintw( wind, 12 + menu_row, 2, "* %-12s", iname );
if ( strcmp(iname,current->name) == 0 ){
wattroff(wind,A_REVERSE);
}
menu_row++;
}
free(iter);
/* paint help */
mvwprintw( wind, LINES - 7, 2 , "F1 - Quit" );
mvwprintw( wind, LINES - 6, 2 , "F2 - Create Ship" );
mvwprintw( wind, LINES - 5, 2 , "F9 - Select Previous Ship" );
mvwprintw( wind, LINES - 4, 2 , "F10 - Select Next Ship" );
box( wind, 0,0 );
}
void paintships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
wattron(wind,A_BOLD);
mvwprintw( wind, shipy, shipx, "*" );
} else {
wattron(wind,A_DIM);
mvwprintw( wind, shipy, shipx, "." );
}
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
wattroff(wind,A_BOLD);
} else {
wattron(wind,A_DIM);
}
}
free ( iter );
}
void choose_next( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * next = NULL;
while (( next = xr_hash_iteratekey( iter ) ) != NULL )
{
if ( next != NULL ){
if ( strcmp( map->selected_ship, (char*) next ) == 0 ){
next = xr_hash_iteratekey( iter );
if ( next != NULL ){
map->selected_ship = next;
break;
}
}
}
}
free(iter);
}
void choose_previous ( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * prev = NULL;
void * next = NULL;
do {
if ( next != NULL ){
if ( strcmp(next,map->selected_ship) == 0 ){
if ( prev != NULL )
map->selected_ship = prev;
break;
}
}
prev = next;
} while ( ( next = xr_hash_iteratekey( iter ) ) != NULL );
free(iter);
}
WINDOW * w_map;
WINDOW * w_info;
int main( int argc, char** argv )
{
int map_width;
int map_height;
int ch;
int info_width = 38;
int info_height;
spacemap * map = spacemap_init();
spaceship * bob;
/* setup ncurses */
initscr();
start_color();
cbreak();
init_pair(1,COLOR_RED, COLOR_BLACK); /* for selected ship */
info_height = map_height = LINES - 2;
map_width = COLS - info_width - 2;
map->w = map_width - 1;
map->h = map_height - 1;
w_map = newwin( map_height, map_width, 0, 0);
box( w_map, 0,0 );
w_info = newwin( info_height, info_width, 0, map_width + 1 );
box( w_info, 0,0 );
keypad( stdscr, TRUE );
refresh();
timeout(50);
/* add first ship */
bob = new_spaceship("bob",VIPER, 5, 12 );
bob->vx = 20;
bob->vy = 34;
spacemap_add( map, bob );
/* game loop */
while((ch = getch()) != KEY_F(1))
{ switch(ch)
{
case KEY_F(2):
/* make a new ship */
srand( bob->y );
new_random_ship( map );
break;
case KEY_F(10):
choose_next( map );
break;
case KEY_F(9):
choose_previous( map );
break;
default:
break;
}
wrefresh( w_map );
wrefresh( w_info );
/* move ships */
clearships( map, w_map );
moveships( map );
/* show ships */
paintships( map, w_map );
paintinfo( map, w_info );
box( w_map, 0, 0 );
/* game loop delay */
timeout(100);
}
endwin();
return 0;
}
diff --git a/src/xrhash.c b/src/xrhash.c
index 0849b77..b47eb3d 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,279 +1,311 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
- while ( index >= XRHASH_SLOTS )
+ while ( index >= xr->maxslots )
index = index % XRHASH_MOD;
return index;
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
+{
+ return xr_init_hash_len( hash, cmp, XRHASH_SLOTS );
+}
+
+
+XRHash * xr_init_hash_len( hashfn hash , cmpfn cmp, size_t len )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
+ table->maxslots = len;
table->hash_generation = 0;
+ table->buckets = (XRHashLink**)calloc(len,sizeof(XRHashLink*));
}
return table;
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
iter->hash_generation = xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = xr->buckets[0];
}
return iter;
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
while ( iter->xr->buckets[++iter->current_bucket] == NULL ){
- if ( iter->current_bucket >= XRHASH_SLOTS )
+ if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* no more filled buckets, end of iterations */
}
/* reached the end of the hash */
- if ( iter->current_bucket >= XRHASH_SLOTS )
+ if ( iter->current_bucket >= iter->xr->maxslots )
return NULL; /* end of iterations */
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
+
+int xr_hash__strhash( void * instr )
+{
+ /* this hashes strings in a similar way to the mono String.cs class */
+ char* str = (char*) instr;
+ size_t len = strlen(str);
+ int hash = 0;
+ int c = 0;
+ while ( c < len ){
+ hash = (hash << 5) - hash + str[c];
+ c++;
+ }
+ if ( hash < 1 )
+ hash = 3 + ( hash * -1 );
+
+ return hash;
+
+}
+
+int xr_hash__strcmp( void * stra, void * strb )
+{
+ return strcmp((char*)stra,(char*)strb);
+}
+
diff --git a/src/xrhash.h b/src/xrhash.h
index 2869c20..8d6f619 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,156 +1,187 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/**
* @brief function to generate a hash code for the object at the given pointer. should return a positive int greater than zero.
*/
typedef int (*hashfn)(void*);
/**
* @brief function to compare to objects for equality
*/
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
/**
* @brief xrhash hashtable object
*/
typedef struct xrhash
{
int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
- size_t count;
-
- XRHashLink * buckets[XRHASH_SLOTS];
+ size_t count;
+ size_t maxslots;
+ XRHashLink ** buckets;
} XRHash;
/**
* @brief xrhash iteration object
*/
typedef struct xrhash_iter
{
XRHash * xr;
int hash_generation;
int current_bucket;
XRHashLink * next_slot;
} XRHashIter;
/**
* @brief initialize a xrhash hashtable object
*
* @param hash callback function for hashcode generation
* @param cmp callback function for equality comparison (eg strcmp)
*
* @return
*/
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
+/**
+* @brief initialize a xrhash hashtable object with a specific number of hash buckets
+*
+* @param hash callback function for hashcode generation
+* @param cmp callback function for equality comparison (eg strcmp)
+* @param len number of buckets to use
+*
+* @return
+*/
+XRHash * xr_init_hash_len( int (*hash)(void*), int(*cmp)(void*,void*), size_t len );
+
/**
* @brief add an object to the given xr hashtable
*
* @param xr hashtable to add to
* @param key store value with this key
* @param value object to store
*
* @return XRHASH_ADDED on success, XRHASH_ADDED_ALREADY if the key is already taken or XRHASH_NULL_KEY if key was NULL
*/
int xr_hash_add( XRHash * xr, void * key, void * value );
/**
* @brief test if a given key exists in the hashtable
*
* @param xr hashtable to check
* @param key pointer to key
*
* @return XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE
*/
int xr_hash_contains( XRHash * xr, void * key );
/**
* @brief get the value corresponding to key
*
* @param xr hashtable to access
* @param key key to use
* @param dataout put value (pointer) here
*
* @returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/**
* @brief delete a given key+value from the hashtable
*
* @param xr hashtable to access
* @param key delete the value with this key
*
* @return
*/
int xr_hash_remove( XRHash * xr, void * key );
/**
* @brief initialize an object to iterate forwards through keys int the hashtable
*
* @param xr hashtable to use
*
* @return initialized iterator object
*/
XRHashIter * xr_init_hashiterator( XRHash * xr );
/**
* @brief iterate to the next item using a hash iterator
*
* @param iter iterator to use
*
* @return pointer to a key used in the iter->xr hashtable
*/
void * xr_hash_iteratekey( XRHashIter * iter );
+/**
+* @brief generate a hashcode for a given null terminated string
+*
+* @param str string to hash
+*
+* @return hashcode > 1 on success, <= 0 on error
+*/
+int xr_hash__strhash( void * str );
+
+
+/**
+* @brief wrapper around strcmp
+*
+* @param stra
+* @param strb
+*
+* @return 0,-1 or 1
+*/
+int xr_hash__strcmp( void * stra, void * strb );
+
#endif
|
inorton/xrhash | fcadd94c27487239e276066c28729ab396bcea1e | add some doxygen comments | diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
index d6bb624..aef22f0 100644
--- a/src/examples/spacemap.c
+++ b/src/examples/spacemap.c
@@ -1,370 +1,372 @@
#include <ncurses.h>
#include <assert.h>
#include "spacemap.h"
#include <string.h>
int spaceship_id_cmp( void * id_a, void * id_b )
{
return strcmp((char*)id_a,(char*)id_b);
}
/* id is a pointer to spaceship.id */
int spaceship_id_hash( void * id )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) id;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
if ( hash < 1 )
hash = 3 + ( hash * -1 );
return hash;
}
spacemap * spacemap_init()
{
spacemap * map = (spacemap*) malloc( 1 * sizeof(spacemap) );
map->xr = xr_init_hash( &spaceship_id_hash, &spaceship_id_cmp );
map->selected_ship = "bob";
return map;
}
spaceship * new_spaceship( char* name, uint8 type, int x, int y )
{
spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
ship->name = strdup( name );
ship->x = x * 100;
ship->y = y * 100;
ship->vx = 0;
ship->vy = 0;
return ship;
}
void new_random_ship(spacemap * map)
{
int rand_x = ((int) rand()) % map->w;
int rand_y = ((int) rand()) % map->h;
- char * shipname = (char*) malloc( 10 * sizeof(char));
+ char * shipname = (char*) malloc( 20 * sizeof(char));
spaceship * newship;
- snprintf(shipname,10,"contact %d",map->xr->count + 1 );
+ snprintf(shipname,20,"contact %d",map->xr->count + 1 );
newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
if ( (rand_x % 2 )) {rand_x = rand_x * -0.5; } else {rand_x = rand_x * 0.5; }
if ( (rand_y % 2 )) {rand_y = rand_y * -0.5; } else {rand_y = rand_y * 0.5; }
newship->vx = rand_x;
newship->vy = rand_y;
spacemap_add( map, newship );
}
void spacemap_add( spacemap * map, spaceship * ship )
{
xr_hash_add( map->xr, (void*)ship->name, ship );
}
spaceship * spacemap_get( spacemap * map, const char* name )
{
spaceship * ret = NULL;
xr_hash_get( map->xr, (void*) name, (void**) &ret );
return ret;
}
void moveships( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
ship->x += ship->vx;
ship->y += ship->vy;
/* wrap/warp ship at edges of map - asteroids style */
if ( ship->x > ( map->w * 100 ) ){
ship->x = 0;
}
if ( ship->x < 0 ){
ship->x = map->w * 100;
}
if ( ship->y > ( map->h * 100 ) ){
ship->y = 0;
}
if ( ship->y < 0 ){
ship->y = map->h * 100;
}
}
free ( iter );
}
void clearships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, " " );
mvwprintw( wind, shipy+1,shipx+1," ");
}
free ( iter );
}
void paintinfo ( spacemap * map, WINDOW * wind )
{
int menu_row = 0;
- int menu_max_rows = LINES - 12;
+ int menu_max_rows = LINES / 2;
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
spaceship * current = spacemap_get( map, map->selected_ship );
werase( wind );
/* paint selected item details */
mvwprintw( wind, 3, 2, "Current Target" );
wattron(wind,A_BOLD);
mvwprintw( wind, 5, 2, "Name: %s", current->name );
mvwprintw( wind, 6, 2, "Position: x = %d", current->x );
mvwprintw( wind, 7, 2, " y = %d", current->y );
wattroff(wind,A_BOLD);
/* paint list - with selected highlighted */
mvwprintw( wind, 10, 2, "%d Radar Contact(s)", map->xr->count );
while (( key = xr_hash_iteratekey( iter ) ) != NULL ){
char * iname = (char*)key;
if ( menu_row > menu_max_rows ) break;
if ( strcmp(iname,current->name) == 0 ){
wattron(wind,A_REVERSE);
}
mvwprintw( wind, 12 + menu_row, 2, "* %-12s", iname );
if ( strcmp(iname,current->name) == 0 ){
wattroff(wind,A_REVERSE);
}
menu_row++;
}
free(iter);
/* paint help */
mvwprintw( wind, LINES - 7, 2 , "F1 - Quit" );
mvwprintw( wind, LINES - 6, 2 , "F2 - Create Ship" );
mvwprintw( wind, LINES - 5, 2 , "F9 - Select Previous Ship" );
mvwprintw( wind, LINES - 4, 2 , "F10 - Select Next Ship" );
box( wind, 0,0 );
}
void paintships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
- mvwprintw( wind, shipy, shipx, "*" );
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
wattron(wind,A_BOLD);
+ mvwprintw( wind, shipy, shipx, "*" );
} else {
wattron(wind,A_DIM);
+ mvwprintw( wind, shipy, shipx, "." );
}
- mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
+ mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
wattroff(wind,A_BOLD);
} else {
wattron(wind,A_DIM);
}
}
free ( iter );
}
void choose_next( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * next = NULL;
while (( next = xr_hash_iteratekey( iter ) ) != NULL )
{
if ( next != NULL ){
if ( strcmp( map->selected_ship, (char*) next ) == 0 ){
next = xr_hash_iteratekey( iter );
if ( next != NULL ){
map->selected_ship = next;
break;
}
}
}
}
free(iter);
}
void choose_previous ( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * prev = NULL;
void * next = NULL;
do {
if ( next != NULL ){
if ( strcmp(next,map->selected_ship) == 0 ){
if ( prev != NULL )
map->selected_ship = prev;
break;
}
}
prev = next;
} while ( ( next = xr_hash_iteratekey( iter ) ) != NULL );
free(iter);
}
WINDOW * w_map;
WINDOW * w_info;
int main( int argc, char** argv )
{
int map_width;
int map_height;
int ch;
int info_width = 38;
int info_height;
spacemap * map = spacemap_init();
spaceship * bob;
/* setup ncurses */
initscr();
start_color();
cbreak();
init_pair(1,COLOR_RED, COLOR_BLACK); /* for selected ship */
info_height = map_height = LINES - 2;
map_width = COLS - info_width - 2;
map->w = map_width - 1;
map->h = map_height - 1;
w_map = newwin( map_height, map_width, 0, 0);
box( w_map, 0,0 );
w_info = newwin( info_height, info_width, 0, map_width + 1 );
box( w_info, 0,0 );
keypad( stdscr, TRUE );
refresh();
timeout(50);
/* add first ship */
bob = new_spaceship("bob",VIPER, 5, 12 );
bob->vx = 20;
bob->vy = 34;
spacemap_add( map, bob );
/* game loop */
while((ch = getch()) != KEY_F(1))
{ switch(ch)
{
case KEY_F(2):
/* make a new ship */
+ srand( bob->y );
new_random_ship( map );
break;
case KEY_F(10):
choose_next( map );
break;
case KEY_F(9):
choose_previous( map );
break;
default:
break;
}
wrefresh( w_map );
wrefresh( w_info );
/* move ships */
clearships( map, w_map );
moveships( map );
/* show ships */
paintships( map, w_map );
paintinfo( map, w_info );
box( w_map, 0, 0 );
/* game loop delay */
timeout(100);
}
endwin();
return 0;
}
diff --git a/src/xrhash.h b/src/xrhash.h
index 58dc783..2869c20 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,84 +1,156 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
-/* should never return 0, should return -ve on error */
+/**
+* @brief function to generate a hash code for the object at the given pointer. should return a positive int greater than zero.
+*/
typedef int (*hashfn)(void*);
+
+/**
+* @brief function to compare to objects for equality
+*/
typedef int (*cmpfn)(void*,void*);
+
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
+
+/**
+* @brief xrhash hashtable object
+*/
typedef struct xrhash
{
int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
size_t count;
XRHashLink * buckets[XRHASH_SLOTS];
} XRHash;
+
+/**
+* @brief xrhash iteration object
+*/
typedef struct xrhash_iter
{
XRHash * xr;
int hash_generation;
int current_bucket;
XRHashLink * next_slot;
} XRHashIter;
-/* create a new empty hash, return NULL on error */
+/**
+* @brief initialize a xrhash hashtable object
+*
+* @param hash callback function for hashcode generation
+* @param cmp callback function for equality comparison (eg strcmp)
+*
+* @return
+*/
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
-/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
+
+/**
+* @brief add an object to the given xr hashtable
+*
+* @param xr hashtable to add to
+* @param key store value with this key
+* @param value object to store
+*
+* @return XRHASH_ADDED on success, XRHASH_ADDED_ALREADY if the key is already taken or XRHASH_NULL_KEY if key was NULL
+*/
int xr_hash_add( XRHash * xr, void * key, void * value );
-/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
+
+/**
+* @brief test if a given key exists in the hashtable
+*
+* @param xr hashtable to check
+* @param key pointer to key
+*
+* @return XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE
+*/
int xr_hash_contains( XRHash * xr, void * key );
-/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
+/**
+* @brief get the value corresponding to key
+*
+* @param xr hashtable to access
+* @param key key to use
+* @param dataout put value (pointer) here
+*
+* @returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
-/* returns XRHASH_REMOVED or XRHASH_REMOVE_FAILED */
+
+/**
+* @brief delete a given key+value from the hashtable
+*
+* @param xr hashtable to access
+* @param key delete the value with this key
+*
+* @return
+*/
int xr_hash_remove( XRHash * xr, void * key );
+
+
+/**
+* @brief initialize an object to iterate forwards through keys int the hashtable
+*
+* @param xr hashtable to use
+*
+* @return initialized iterator object
+*/
XRHashIter * xr_init_hashiterator( XRHash * xr );
+
+
+/**
+* @brief iterate to the next item using a hash iterator
+*
+* @param iter iterator to use
+*
+* @return pointer to a key used in the iter->xr hashtable
+*/
void * xr_hash_iteratekey( XRHashIter * iter );
#endif
|
inorton/xrhash | 4142bc63c081d9f6055b11a725d12a433e068659 | space ship exampe now complete | diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
index c08d2aa..d6bb624 100644
--- a/src/examples/spacemap.c
+++ b/src/examples/spacemap.c
@@ -1,257 +1,370 @@
#include <ncurses.h>
#include <assert.h>
#include "spacemap.h"
#include <string.h>
int spaceship_id_cmp( void * id_a, void * id_b )
{
return strcmp((char*)id_a,(char*)id_b);
}
/* id is a pointer to spaceship.id */
int spaceship_id_hash( void * id )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) id;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
if ( hash < 1 )
hash = 3 + ( hash * -1 );
return hash;
}
spacemap * spacemap_init()
{
spacemap * map = (spacemap*) malloc( 1 * sizeof(spacemap) );
map->xr = xr_init_hash( &spaceship_id_hash, &spaceship_id_cmp );
map->selected_ship = "bob";
return map;
}
spaceship * new_spaceship( char* name, uint8 type, int x, int y )
{
spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
ship->name = strdup( name );
ship->x = x * 100;
ship->y = y * 100;
- ship->vx = 100;
- ship->vy = 50;
+ ship->vx = 0;
+ ship->vy = 0;
return ship;
}
void new_random_ship(spacemap * map)
{
int rand_x = ((int) rand()) % map->w;
int rand_y = ((int) rand()) % map->h;
char * shipname = (char*) malloc( 10 * sizeof(char));
spaceship * newship;
snprintf(shipname,10,"contact %d",map->xr->count + 1 );
newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
- if ( (rand_x % 2 )) {rand_x = rand_x * -3; } else {rand_x = rand_x * 3; }
- if ( (rand_y % 2 )) {rand_y = rand_y * -3; } else {rand_y = rand_y * 3; }
+ if ( (rand_x % 2 )) {rand_x = rand_x * -0.5; } else {rand_x = rand_x * 0.5; }
+ if ( (rand_y % 2 )) {rand_y = rand_y * -0.5; } else {rand_y = rand_y * 0.5; }
newship->vx = rand_x;
newship->vy = rand_y;
spacemap_add( map, newship );
}
void spacemap_add( spacemap * map, spaceship * ship )
{
xr_hash_add( map->xr, (void*)ship->name, ship );
}
spaceship * spacemap_get( spacemap * map, const char* name )
{
spaceship * ret = NULL;
xr_hash_get( map->xr, (void*) name, (void**) &ret );
return ret;
}
void moveships( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
ship->x += ship->vx;
ship->y += ship->vy;
/* wrap/warp ship at edges of map - asteroids style */
if ( ship->x > ( map->w * 100 ) ){
ship->x = 0;
}
if ( ship->x < 0 ){
ship->x = map->w * 100;
}
if ( ship->y > ( map->h * 100 ) ){
ship->y = 0;
}
if ( ship->y < 0 ){
ship->y = map->h * 100;
}
}
free ( iter );
}
void clearships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, " " );
mvwprintw( wind, shipy+1,shipx+1," ");
}
free ( iter );
}
+void paintinfo ( spacemap * map, WINDOW * wind )
+{
+
+ int menu_row = 0;
+ int menu_max_rows = LINES - 12;
+ XRHashIter * iter = xr_init_hashiterator( map->xr );
+ void * key = NULL;
+ spaceship * current = spacemap_get( map, map->selected_ship );
+ werase( wind );
+
+ /* paint selected item details */
+
+ mvwprintw( wind, 3, 2, "Current Target" );
+ wattron(wind,A_BOLD);
+
+ mvwprintw( wind, 5, 2, "Name: %s", current->name );
+ mvwprintw( wind, 6, 2, "Position: x = %d", current->x );
+ mvwprintw( wind, 7, 2, " y = %d", current->y );
+
+ wattroff(wind,A_BOLD);
+
+ /* paint list - with selected highlighted */
+
+
+ mvwprintw( wind, 10, 2, "%d Radar Contact(s)", map->xr->count );
+
+ while (( key = xr_hash_iteratekey( iter ) ) != NULL ){
+ char * iname = (char*)key;
+
+ if ( menu_row > menu_max_rows ) break;
+
+ if ( strcmp(iname,current->name) == 0 ){
+ wattron(wind,A_REVERSE);
+ }
+
+ mvwprintw( wind, 12 + menu_row, 2, "* %-12s", iname );
+
+
+ if ( strcmp(iname,current->name) == 0 ){
+ wattroff(wind,A_REVERSE);
+ }
+
+ menu_row++;
+
+ }
+ free(iter);
+
+ /* paint help */
+
+ mvwprintw( wind, LINES - 7, 2 , "F1 - Quit" );
+ mvwprintw( wind, LINES - 6, 2 , "F2 - Create Ship" );
+ mvwprintw( wind, LINES - 5, 2 , "F9 - Select Previous Ship" );
+ mvwprintw( wind, LINES - 4, 2 , "F10 - Select Next Ship" );
+
+ box( wind, 0,0 );
+}
+
void paintships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, "*" );
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
- wattron(wind,A_REVERSE);
- }
+ wattron(wind,A_BOLD);
+ } else {
+ wattron(wind,A_DIM);
+ }
mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
- wattroff(wind,A_REVERSE);
+ wattroff(wind,A_BOLD);
+ } else {
+ wattron(wind,A_DIM);
}
}
free ( iter );
}
+void choose_next( spacemap * map )
+{
+ XRHashIter * iter = xr_init_hashiterator( map->xr );
+ void * next = NULL;
+
+ while (( next = xr_hash_iteratekey( iter ) ) != NULL )
+ {
+ if ( next != NULL ){
+ if ( strcmp( map->selected_ship, (char*) next ) == 0 ){
+ next = xr_hash_iteratekey( iter );
+ if ( next != NULL ){
+ map->selected_ship = next;
+ break;
+ }
+ }
+ }
+ }
+ free(iter);
+}
+
+void choose_previous ( spacemap * map )
+{
+ XRHashIter * iter = xr_init_hashiterator( map->xr );
+
+ void * prev = NULL;
+ void * next = NULL;
+
+ do {
+ if ( next != NULL ){
+ if ( strcmp(next,map->selected_ship) == 0 ){
+ if ( prev != NULL )
+ map->selected_ship = prev;
+ break;
+ }
+ }
+ prev = next;
+ } while ( ( next = xr_hash_iteratekey( iter ) ) != NULL );
+
+ free(iter);
+}
+
+
+
+
+
WINDOW * w_map;
WINDOW * w_info;
int main( int argc, char** argv )
{
int map_width;
int map_height;
int ch;
- int info_width = 30;
+ int info_width = 38;
int info_height;
spacemap * map = spacemap_init();
spaceship * bob;
-
/* setup ncurses */
initscr();
start_color();
cbreak();
init_pair(1,COLOR_RED, COLOR_BLACK); /* for selected ship */
info_height = map_height = LINES - 2;
map_width = COLS - info_width - 2;
map->w = map_width - 1;
map->h = map_height - 1;
w_map = newwin( map_height, map_width, 0, 0);
box( w_map, 0,0 );
w_info = newwin( info_height, info_width, 0, map_width + 1 );
box( w_info, 0,0 );
keypad( stdscr, TRUE );
refresh();
timeout(50);
/* add first ship */
bob = new_spaceship("bob",VIPER, 5, 12 );
+ bob->vx = 20;
+ bob->vy = 34;
spacemap_add( map, bob );
+
/* game loop */
while((ch = getch()) != KEY_F(1))
{ switch(ch)
{
case KEY_F(2):
/* make a new ship */
new_random_ship( map );
break;
- case KEY_DOWN:
+ case KEY_F(10):
+ choose_next( map );
break;
- case KEY_UP:
+ case KEY_F(9):
+ choose_previous( map );
break;
default:
break;
}
wrefresh( w_map );
wrefresh( w_info );
/* move ships */
clearships( map, w_map );
moveships( map );
/* show ships */
paintships( map, w_map );
+
+ paintinfo( map, w_info );
+
box( w_map, 0, 0 );
/* game loop delay */
- timeout(500);
+ timeout(100);
}
endwin();
return 0;
}
diff --git a/src/xrhash.c b/src/xrhash.c
index 53d947b..0849b77 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,274 +1,279 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= XRHASH_SLOTS )
index = index % XRHASH_MOD;
return index;
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
table->hash_generation = 0;
}
return table;
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
XRHashIter * xr_init_hashiterator( XRHash * xr )
{
XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
if ( iter == NULL ){
errno = ENOMEM;
} else {
iter->xr = xr;
iter->hash_generation = xr->hash_generation;
iter->current_bucket = 0;
iter->next_slot = xr->buckets[0];
}
return iter;
}
void * xr_hash_iteratekey( XRHashIter * iter )
{
void * key = NULL;
if ( iter->xr->hash_generation != iter->hash_generation ){
fprintf(stderr,"hash changed during iteration\n");
abort();
/* return NULL; */
}
if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
} else { /* no more links here, move to next bucket */
while ( iter->xr->buckets[++iter->current_bucket] == NULL ){
if ( iter->current_bucket >= XRHASH_SLOTS )
return NULL; /* no more filled buckets, end of iterations */
}
+
+ /* reached the end of the hash */
+ if ( iter->current_bucket >= XRHASH_SLOTS )
+ return NULL; /* end of iterations */
+
/* now pointing at the next slot */
iter->next_slot = iter->xr->buckets[iter->current_bucket];
key = iter->next_slot->key;
iter->next_slot = iter->next_slot->next;
}
return key;
}
diff --git a/src/xrhash.h b/src/xrhash.h
index 8aa79bc..58dc783 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,82 +1,84 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/* should never return 0, should return -ve on error */
typedef int (*hashfn)(void*);
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
typedef struct xrhash
{
int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
size_t count;
XRHashLink * buckets[XRHASH_SLOTS];
} XRHash;
typedef struct xrhash_iter
{
XRHash * xr;
int hash_generation;
int current_bucket;
XRHashLink * next_slot;
} XRHashIter;
/* create a new empty hash, return NULL on error */
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
int xr_hash_add( XRHash * xr, void * key, void * value );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_contains( XRHash * xr, void * key );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/* returns XRHASH_REMOVED or XRHASH_REMOVE_FAILED */
int xr_hash_remove( XRHash * xr, void * key );
XRHashIter * xr_init_hashiterator( XRHash * xr );
void * xr_hash_iteratekey( XRHashIter * iter );
+
+
#endif
|
inorton/xrhash | 612117042edce905b477b4a258b89d45c3933337 | tidy spacemap a little | diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
index 69d8bcd..c08d2aa 100644
--- a/src/examples/spacemap.c
+++ b/src/examples/spacemap.c
@@ -1,251 +1,257 @@
#include <ncurses.h>
#include <assert.h>
#include "spacemap.h"
#include <string.h>
int spaceship_id_cmp( void * id_a, void * id_b )
{
return strcmp((char*)id_a,(char*)id_b);
}
/* id is a pointer to spaceship.id */
int spaceship_id_hash( void * id )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) id;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
if ( hash < 1 )
hash = 3 + ( hash * -1 );
return hash;
}
spacemap * spacemap_init()
{
spacemap * map = (spacemap*) malloc( 1 * sizeof(spacemap) );
map->xr = xr_init_hash( &spaceship_id_hash, &spaceship_id_cmp );
map->selected_ship = "bob";
return map;
}
spaceship * new_spaceship( char* name, uint8 type, int x, int y )
{
spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
ship->name = strdup( name );
ship->x = x * 100;
ship->y = y * 100;
ship->vx = 100;
ship->vy = 50;
return ship;
}
+void new_random_ship(spacemap * map)
+{
+ int rand_x = ((int) rand()) % map->w;
+ int rand_y = ((int) rand()) % map->h;
+ char * shipname = (char*) malloc( 10 * sizeof(char));
+ spaceship * newship;
+ snprintf(shipname,10,"contact %d",map->xr->count + 1 );
+ newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
+
+ if ( (rand_x % 2 )) {rand_x = rand_x * -3; } else {rand_x = rand_x * 3; }
+ if ( (rand_y % 2 )) {rand_y = rand_y * -3; } else {rand_y = rand_y * 3; }
+
+ newship->vx = rand_x;
+ newship->vy = rand_y;
+
+ spacemap_add( map, newship );
+}
+
void spacemap_add( spacemap * map, spaceship * ship )
{
xr_hash_add( map->xr, (void*)ship->name, ship );
}
spaceship * spacemap_get( spacemap * map, const char* name )
{
spaceship * ret = NULL;
xr_hash_get( map->xr, (void*) name, (void**) &ret );
return ret;
}
void moveships( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
ship->x += ship->vx;
ship->y += ship->vy;
/* wrap/warp ship at edges of map - asteroids style */
if ( ship->x > ( map->w * 100 ) ){
ship->x = 0;
}
if ( ship->x < 0 ){
ship->x = map->w * 100;
}
if ( ship->y > ( map->h * 100 ) ){
ship->y = 0;
}
if ( ship->y < 0 ){
ship->y = map->h * 100;
}
}
free ( iter );
}
void clearships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, " " );
mvwprintw( wind, shipy+1,shipx+1," ");
}
free ( iter );
}
void paintships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, "*" );
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
wattron(wind,A_REVERSE);
}
mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
if ( strcmp( map->selected_ship, ship->name ) == 0 ){
wattroff(wind,A_REVERSE);
}
}
free ( iter );
}
WINDOW * w_map;
WINDOW * w_info;
int main( int argc, char** argv )
{
int map_width;
int map_height;
int ch;
int info_width = 30;
int info_height;
- int rand_x;
- int rand_y;
-
spacemap * map = spacemap_init();
spaceship * bob;
- spaceship * newship;
- char * shipname;
/* setup ncurses */
initscr();
start_color();
cbreak();
init_pair(1,COLOR_RED, COLOR_BLACK); /* for selected ship */
info_height = map_height = LINES - 2;
map_width = COLS - info_width - 2;
map->w = map_width - 1;
map->h = map_height - 1;
w_map = newwin( map_height, map_width, 0, 0);
box( w_map, 0,0 );
w_info = newwin( info_height, info_width, 0, map_width + 1 );
box( w_info, 0,0 );
keypad( stdscr, TRUE );
refresh();
timeout(50);
/* add first ship */
bob = new_spaceship("bob",VIPER, 5, 12 );
spacemap_add( map, bob );
/* game loop */
while((ch = getch()) != KEY_F(1))
{ switch(ch)
{
case KEY_F(2):
/* make a new ship */
- rand_x = ((int) rand()) % map_width;
- rand_y = ((int) rand()) % map_height;
- shipname = (char*) malloc( 10 * sizeof(char));
- snprintf(shipname,10,"contact %d",map->xr->count + 1 );
- newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
-
- if ( (rand_x % 2 )) {rand_x = rand_x * -3; } else {rand_x = rand_x * 3; }
- if ( (rand_y % 2 )) {rand_y = rand_y * -3; } else {rand_y = rand_y * 3; }
-
- newship->vx = rand_x;
- newship->vy = rand_y;
-
- spacemap_add( map, newship );
+ new_random_ship( map );
+ break;
+
+ case KEY_DOWN:
+ break;
+
+ case KEY_UP:
break;
default:
-
break;
}
wrefresh( w_map );
wrefresh( w_info );
/* move ships */
clearships( map, w_map );
moveships( map );
/* show ships */
paintships( map, w_map );
box( w_map, 0, 0 );
/* game loop delay */
timeout(500);
}
endwin();
return 0;
}
|
inorton/xrhash | 1d4f1a713fdce09c769e9a410a255ef3383d1d27 | show highlighted ship | diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
index ce49353..69d8bcd 100644
--- a/src/examples/spacemap.c
+++ b/src/examples/spacemap.c
@@ -1,238 +1,251 @@
#include <ncurses.h>
-
+#include <assert.h>
#include "spacemap.h"
#include <string.h>
int spaceship_id_cmp( void * id_a, void * id_b )
{
return strcmp((char*)id_a,(char*)id_b);
}
/* id is a pointer to spaceship.id */
int spaceship_id_hash( void * id )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) id;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
if ( hash < 1 )
hash = 3 + ( hash * -1 );
return hash;
}
spacemap * spacemap_init()
{
spacemap * map = (spacemap*) malloc( 1 * sizeof(spacemap) );
map->xr = xr_init_hash( &spaceship_id_hash, &spaceship_id_cmp );
+ map->selected_ship = "bob";
return map;
}
spaceship * new_spaceship( char* name, uint8 type, int x, int y )
{
spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
ship->name = strdup( name );
ship->x = x * 100;
ship->y = y * 100;
ship->vx = 100;
ship->vy = 50;
return ship;
}
void spacemap_add( spacemap * map, spaceship * ship )
{
xr_hash_add( map->xr, (void*)ship->name, ship );
}
spaceship * spacemap_get( spacemap * map, const char* name )
{
spaceship * ret = NULL;
xr_hash_get( map->xr, (void*) name, (void**) &ret );
return ret;
}
void moveships( spacemap * map )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
ship->x += ship->vx;
ship->y += ship->vy;
/* wrap/warp ship at edges of map - asteroids style */
if ( ship->x > ( map->w * 100 ) ){
ship->x = 0;
}
if ( ship->x < 0 ){
ship->x = map->w * 100;
}
if ( ship->y > ( map->h * 100 ) ){
ship->y = 0;
}
if ( ship->y < 0 ){
ship->y = map->h * 100;
}
}
free ( iter );
}
void clearships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, " " );
mvwprintw( wind, shipy+1,shipx+1," ");
}
free ( iter );
}
void paintships( spacemap * map, WINDOW * wind )
{
XRHashIter * iter = xr_init_hashiterator( map->xr );
void * key = NULL;
while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
spaceship * ship = NULL;
int shipx;
int shipy;
xr_hash_get( map->xr, key, (void**) &ship );
if ( ship == NULL )
break;
shipx = ( ship->x / 100 ) ;
shipy = ( ship->y / 100 ) ;
mvwprintw( wind, shipy, shipx, "*" );
+ if ( strcmp( map->selected_ship, ship->name ) == 0 ){
+ wattron(wind,A_REVERSE);
+ }
+
mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
+ if ( strcmp( map->selected_ship, ship->name ) == 0 ){
+ wattroff(wind,A_REVERSE);
+ }
+
+
}
free ( iter );
}
WINDOW * w_map;
WINDOW * w_info;
int main( int argc, char** argv )
{
int map_width;
int map_height;
int ch;
int info_width = 30;
int info_height;
int rand_x;
int rand_y;
spacemap * map = spacemap_init();
spaceship * bob;
spaceship * newship;
char * shipname;
/* setup ncurses */
initscr();
+ start_color();
+
cbreak();
+ init_pair(1,COLOR_RED, COLOR_BLACK); /* for selected ship */
info_height = map_height = LINES - 2;
map_width = COLS - info_width - 2;
map->w = map_width - 1;
map->h = map_height - 1;
w_map = newwin( map_height, map_width, 0, 0);
box( w_map, 0,0 );
w_info = newwin( info_height, info_width, 0, map_width + 1 );
box( w_info, 0,0 );
keypad( stdscr, TRUE );
refresh();
timeout(50);
/* add first ship */
bob = new_spaceship("bob",VIPER, 5, 12 );
spacemap_add( map, bob );
/* game loop */
while((ch = getch()) != KEY_F(1))
{ switch(ch)
{
case KEY_F(2):
/* make a new ship */
rand_x = ((int) rand()) % map_width;
rand_y = ((int) rand()) % map_height;
shipname = (char*) malloc( 10 * sizeof(char));
snprintf(shipname,10,"contact %d",map->xr->count + 1 );
newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
if ( (rand_x % 2 )) {rand_x = rand_x * -3; } else {rand_x = rand_x * 3; }
if ( (rand_y % 2 )) {rand_y = rand_y * -3; } else {rand_y = rand_y * 3; }
newship->vx = rand_x;
newship->vy = rand_y;
spacemap_add( map, newship );
break;
default:
break;
}
wrefresh( w_map );
wrefresh( w_info );
/* move ships */
clearships( map, w_map );
moveships( map );
/* show ships */
paintships( map, w_map );
box( w_map, 0, 0 );
/* game loop delay */
timeout(500);
}
endwin();
return 0;
}
diff --git a/src/examples/spacemap.h b/src/examples/spacemap.h
index 9409f38..f7cc8f1 100644
--- a/src/examples/spacemap.h
+++ b/src/examples/spacemap.h
@@ -1,43 +1,44 @@
#ifndef SPACEMAP_H
#define SPACEMAP_H
#include <xrhash.h>
#include <stdio.h>
#include <string.h>
/* silly example storing ships in a map space using 'ship names' as the hash keys */
typedef char uint8 ;
typedef struct spaceship {
char * name;
uint8 type;
int x; /* corrdinate x100 */
int y;
int vx;
int vy;
} spaceship;
typedef struct spacemap {
XRHash * xr;
int w;
int h;
+ char * selected_ship;
} spacemap;
#define VIPER 1
#define RAIDER 2
#define SHUTTLE 3
int spaceship_id_cmp( void * id_a, void * id_b );
int spaceship_id_hash( void * id );
spacemap * spacemap_init();
spaceship * new_spaceship( char* name, uint8 type, int x, int y );
void spacemap_add( spacemap * map, spaceship * ship );
spaceship * spacemap_get( spacemap * map, const char* name );
#endif
|
inorton/xrhash | b95f282524b1d53f881d7e3db2172f55b7f4ae32 | space map example using ncurses | diff --git a/src/examples/SConscript b/src/examples/SConscript
index 98e96ba..f06bc7b 100644
--- a/src/examples/SConscript
+++ b/src/examples/SConscript
@@ -1,5 +1,5 @@
Import("env");
env.Append(CPPPATH="..")
-env.Program("spacemap.c",LIBS="xrhash",LIBPATH="..")
+env.Program("spacemap.c",LIBS=["xrhash","ncurses"],LIBPATH="..")
diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
index 225f698..ce49353 100644
--- a/src/examples/spacemap.c
+++ b/src/examples/spacemap.c
@@ -1,46 +1,238 @@
+#include <ncurses.h>
+
#include "spacemap.h"
#include <string.h>
int spaceship_id_cmp( void * id_a, void * id_b )
{
return strcmp((char*)id_a,(char*)id_b);
}
/* id is a pointer to spaceship.id */
int spaceship_id_hash( void * id )
{
/* this hashes strings in a similar way to the mono String.cs class */
char* str = (char*) id;
size_t len = strlen(str);
int hash = 0;
int c = 0;
while ( c < len ){
hash = (hash << 5) - hash + str[c];
c++;
}
+ if ( hash < 1 )
+ hash = 3 + ( hash * -1 );
return hash;
}
-spaceship * new_spaceship( char* name, uint8 type, int x, int y, double head )
+spacemap * spacemap_init()
+{
+ spacemap * map = (spacemap*) malloc( 1 * sizeof(spacemap) );
+ map->xr = xr_init_hash( &spaceship_id_hash, &spaceship_id_cmp );
+ return map;
+}
+
+spaceship * new_spaceship( char* name, uint8 type, int x, int y )
{
spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
ship->name = strdup( name );
- ship->x = x;
- ship->y = y;
- ship->heading = head;
+ ship->x = x * 100;
+ ship->y = y * 100;
+
+ ship->vx = 100;
+ ship->vy = 50;
+
return ship;
}
void spacemap_add( spacemap * map, spaceship * ship )
{
xr_hash_add( map->xr, (void*)ship->name, ship );
}
spaceship * spacemap_get( spacemap * map, const char* name )
{
spaceship * ret = NULL;
xr_hash_get( map->xr, (void*) name, (void**) &ret );
return ret;
}
+
+void moveships( spacemap * map )
+{
+ XRHashIter * iter = xr_init_hashiterator( map->xr );
+ void * key = NULL;
+
+ while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
+ spaceship * ship = NULL;
+ xr_hash_get( map->xr, key, (void**) &ship );
+ if ( ship == NULL )
+ break;
+
+ ship->x += ship->vx;
+ ship->y += ship->vy;
+
+ /* wrap/warp ship at edges of map - asteroids style */
+ if ( ship->x > ( map->w * 100 ) ){
+ ship->x = 0;
+ }
+ if ( ship->x < 0 ){
+ ship->x = map->w * 100;
+ }
+
+ if ( ship->y > ( map->h * 100 ) ){
+ ship->y = 0;
+ }
+ if ( ship->y < 0 ){
+ ship->y = map->h * 100;
+ }
+
+
+ }
+
+ free ( iter );
+}
+
+void clearships( spacemap * map, WINDOW * wind )
+{
+ XRHashIter * iter = xr_init_hashiterator( map->xr );
+ void * key = NULL;
+
+ while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
+ spaceship * ship = NULL;
+ int shipx;
+ int shipy;
+ xr_hash_get( map->xr, key, (void**) &ship );
+ if ( ship == NULL )
+ break;
+
+ shipx = ( ship->x / 100 ) ;
+ shipy = ( ship->y / 100 ) ;
+
+
+ mvwprintw( wind, shipy, shipx, " " );
+
+ mvwprintw( wind, shipy+1,shipx+1," ");
+
+ }
+
+ free ( iter );
+}
+
+void paintships( spacemap * map, WINDOW * wind )
+{
+ XRHashIter * iter = xr_init_hashiterator( map->xr );
+ void * key = NULL;
+
+ while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
+ spaceship * ship = NULL;
+ int shipx;
+ int shipy;
+ xr_hash_get( map->xr, key, (void**) &ship );
+ if ( ship == NULL )
+ break;
+
+ shipx = ( ship->x / 100 ) ;
+ shipy = ( ship->y / 100 ) ;
+
+
+ mvwprintw( wind, shipy, shipx, "*" );
+
+ mvwprintw( wind, shipy+1,shipx+1,"%s", ship->name );
+
+ }
+
+ free ( iter );
+}
+
+WINDOW * w_map;
+WINDOW * w_info;
+
+int main( int argc, char** argv )
+{
+ int map_width;
+ int map_height;
+
+ int ch;
+
+ int info_width = 30;
+ int info_height;
+
+ int rand_x;
+ int rand_y;
+
+ spacemap * map = spacemap_init();
+ spaceship * bob;
+ spaceship * newship;
+
+ char * shipname;
+
+ /* setup ncurses */
+
+ initscr();
+ cbreak();
+
+ info_height = map_height = LINES - 2;
+ map_width = COLS - info_width - 2;
+
+ map->w = map_width - 1;
+ map->h = map_height - 1;
+
+ w_map = newwin( map_height, map_width, 0, 0);
+ box( w_map, 0,0 );
+ w_info = newwin( info_height, info_width, 0, map_width + 1 );
+ box( w_info, 0,0 );
+
+ keypad( stdscr, TRUE );
+ refresh();
+
+ timeout(50);
+
+ /* add first ship */
+ bob = new_spaceship("bob",VIPER, 5, 12 );
+ spacemap_add( map, bob );
+
+ /* game loop */
+ while((ch = getch()) != KEY_F(1))
+ { switch(ch)
+ {
+ case KEY_F(2):
+ /* make a new ship */
+ rand_x = ((int) rand()) % map_width;
+ rand_y = ((int) rand()) % map_height;
+ shipname = (char*) malloc( 10 * sizeof(char));
+ snprintf(shipname,10,"contact %d",map->xr->count + 1 );
+ newship = new_spaceship( shipname, RAIDER, rand_x, rand_y );
+
+ if ( (rand_x % 2 )) {rand_x = rand_x * -3; } else {rand_x = rand_x * 3; }
+ if ( (rand_y % 2 )) {rand_y = rand_y * -3; } else {rand_y = rand_y * 3; }
+
+ newship->vx = rand_x;
+ newship->vy = rand_y;
+
+ spacemap_add( map, newship );
+ break;
+
+ default:
+
+ break;
+ }
+ wrefresh( w_map );
+ wrefresh( w_info );
+ /* move ships */
+ clearships( map, w_map );
+ moveships( map );
+ /* show ships */
+ paintships( map, w_map );
+ box( w_map, 0, 0 );
+
+ /* game loop delay */
+ timeout(500);
+ }
+
+ endwin();
+
+ return 0;
+}
+
diff --git a/src/examples/spacemap.h b/src/examples/spacemap.h
index 3092623..9409f38 100644
--- a/src/examples/spacemap.h
+++ b/src/examples/spacemap.h
@@ -1,38 +1,43 @@
#ifndef SPACEMAP_H
#define SPACEMAP_H
#include <xrhash.h>
#include <stdio.h>
#include <string.h>
/* silly example storing ships in a map space using 'ship names' as the hash keys */
typedef char uint8 ;
typedef struct spaceship {
char * name;
uint8 type;
- int x;
+ int x; /* corrdinate x100 */
int y;
- double heading;
+
+ int vx;
+ int vy;
+
} spaceship;
typedef struct spacemap {
XRHash * xr;
+ int w;
+ int h;
} spacemap;
#define VIPER 1
#define RAIDER 2
#define SHUTTLE 3
int spaceship_id_cmp( void * id_a, void * id_b );
int spaceship_id_hash( void * id );
spacemap * spacemap_init();
-spaceship * new_spaceship( char* name, uint8 type, int x, int y, double head );
+spaceship * new_spaceship( char* name, uint8 type, int x, int y );
void spacemap_add( spacemap * map, spaceship * ship );
spaceship * spacemap_get( spacemap * map, const char* name );
#endif
|
inorton/xrhash | c1c68f3e84097e5178aab07819cdc5d493565ded | add space map example (nearly done) | diff --git a/src/SConscript b/src/SConscript
index a0858dd..467da98 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,6 +1,11 @@
env = Environment()
env.Append(CFLAGS="-Wall -Werror")
env.Append(CFLAGS="-g -O0")
env.Library( "xrhash","xrhash.c");
env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH="." );
+
+
+Export("env");
+
+SConscript("examples/SConscript")
diff --git a/src/examples/SConscript b/src/examples/SConscript
new file mode 100644
index 0000000..98e96ba
--- /dev/null
+++ b/src/examples/SConscript
@@ -0,0 +1,5 @@
+Import("env");
+
+env.Append(CPPPATH="..")
+
+env.Program("spacemap.c",LIBS="xrhash",LIBPATH="..")
diff --git a/src/examples/spacemap.c b/src/examples/spacemap.c
new file mode 100644
index 0000000..225f698
--- /dev/null
+++ b/src/examples/spacemap.c
@@ -0,0 +1,46 @@
+#include "spacemap.h"
+#include <string.h>
+
+int spaceship_id_cmp( void * id_a, void * id_b )
+{
+ return strcmp((char*)id_a,(char*)id_b);
+}
+
+/* id is a pointer to spaceship.id */
+int spaceship_id_hash( void * id )
+{
+ /* this hashes strings in a similar way to the mono String.cs class */
+ char* str = (char*) id;
+ size_t len = strlen(str);
+ int hash = 0;
+ int c = 0;
+ while ( c < len ){
+ hash = (hash << 5) - hash + str[c];
+ c++;
+ }
+
+ return hash;
+}
+
+spaceship * new_spaceship( char* name, uint8 type, int x, int y, double head )
+{
+ spaceship * ship = (spaceship*)malloc(1 * sizeof(spaceship));
+ ship->name = strdup( name );
+ ship->x = x;
+ ship->y = y;
+ ship->heading = head;
+ return ship;
+}
+
+void spacemap_add( spacemap * map, spaceship * ship )
+{
+ xr_hash_add( map->xr, (void*)ship->name, ship );
+}
+
+spaceship * spacemap_get( spacemap * map, const char* name )
+{
+ spaceship * ret = NULL;
+ xr_hash_get( map->xr, (void*) name, (void**) &ret );
+ return ret;
+}
+
diff --git a/src/examples/spacemap.h b/src/examples/spacemap.h
new file mode 100644
index 0000000..3092623
--- /dev/null
+++ b/src/examples/spacemap.h
@@ -0,0 +1,38 @@
+#ifndef SPACEMAP_H
+#define SPACEMAP_H
+
+#include <xrhash.h>
+#include <stdio.h>
+#include <string.h>
+
+/* silly example storing ships in a map space using 'ship names' as the hash keys */
+
+typedef char uint8 ;
+
+typedef struct spaceship {
+ char * name;
+ uint8 type;
+ int x;
+ int y;
+ double heading;
+} spaceship;
+
+
+typedef struct spacemap {
+ XRHash * xr;
+} spacemap;
+
+#define VIPER 1
+#define RAIDER 2
+#define SHUTTLE 3
+
+int spaceship_id_cmp( void * id_a, void * id_b );
+int spaceship_id_hash( void * id );
+
+spacemap * spacemap_init();
+
+spaceship * new_spaceship( char* name, uint8 type, int x, int y, double head );
+void spacemap_add( spacemap * map, spaceship * ship );
+spaceship * spacemap_get( spacemap * map, const char* name );
+
+#endif
|
inorton/xrhash | a8a687fbdeb0ca663d3b160ace5a471b444d6e1e | can now iterate, add missing testutils remove looks buggy, need better test | diff --git a/src/SConscript b/src/SConscript
index d48e255..a0858dd 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,6 +1,6 @@
env = Environment()
env.Append(CFLAGS="-Wall -Werror")
-# env.Append(CFLAGS="-g -O0")
+env.Append(CFLAGS="-g -O0")
env.Library( "xrhash","xrhash.c");
env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH="." );
diff --git a/src/testutils.c b/src/testutils.c
new file mode 100644
index 0000000..063f117
--- /dev/null
+++ b/src/testutils.c
@@ -0,0 +1,33 @@
+#include <sys/time.h>
+#include "testutils.h"
+
+long long
+timeval_diff(struct timeval *difference,
+ struct timeval *end_time,
+ struct timeval *start_time
+ )
+{
+ struct timeval temp_diff;
+
+ if(difference==NULL)
+ {
+ difference=&temp_diff;
+ }
+
+ difference->tv_sec =end_time->tv_sec -start_time->tv_sec ;
+ difference->tv_usec=end_time->tv_usec-start_time->tv_usec;
+
+ /* Using while instead of if below makes the code slightly more robust. */
+
+ while(difference->tv_usec<0)
+ {
+ difference->tv_usec+=1000000;
+ difference->tv_sec -=1;
+ }
+
+ return 1000000LL*difference->tv_sec+
+ difference->tv_usec;
+
+} /* timeval_diff() */
+
+
diff --git a/src/testutils.h b/src/testutils.h
new file mode 100644
index 0000000..58a6840
--- /dev/null
+++ b/src/testutils.h
@@ -0,0 +1,14 @@
+#include <sys/time.h>
+#ifndef XR_TESTUTILS_H
+#define XR_TESTUTILS_H
+
+#include <stdlib.h>
+
+long long
+timeval_diff(struct timeval *difference,
+ struct timeval *end_time,
+ struct timeval *start_time
+ );
+
+#endif
+
diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index e0d2385..4f00c6d 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,65 +1,102 @@
#include <stdio.h>
#define XRHASH_SLOTS 32768
#include "xrhash.h"
#include <assert.h>
#include "testutils.h"
#include <sys/time.h>
#define TEST_STR "foo%d"
+#define DATASET 64000
+
+void iterate( XRHash * xr )
+{
+ XRHashIter * iter = xr_init_hashiterator( xr );
+ void * key = NULL;
+ int x = 0;
+
+ while ( ( key = xr_hash_iteratekey( iter ) ) != NULL ){
+ void * value = NULL;
+ assert ( xr_hash_get( xr, key , &value ) == XRHASH_EXISTS_TRUE );
+ x++;
+ }
+
+ assert( x == DATASET );
+
+ free(iter);
+}
int main( int argc, char** argv )
{
struct timeval tstart;
struct timeval tend;
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
int contains = -1;
- int datacount = 100000;
+ int datacount = DATASET;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
char * newstr = data_vec;
fprintf(stderr,"test add\n");
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
-/* contains = xr_hash_contains( xr, newstr );*/
-/* assert( contains == XRHASH_EXISTS_TRUE ); */
newstr += 4;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
+
+
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
+ gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
int got = -1;
contains = xr_hash_get( xr, newstr, (void**) &got );
assert( contains == XRHASH_EXISTS_TRUE );
- assert( got == x );
-
+ assert( got == x );
newstr += 4;
}
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per get", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+ fprintf(stderr,"test iteration\n");
+ gettimeofday( &tstart, 0x0 );
+ iterate( xr );
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per iteration with get", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+
+
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
+ gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
contains = xr_hash_remove( xr, newstr );
assert( contains == XRHASH_REMOVED );
newstr += 4;
}
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per remove", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
+
+ assert( xr->count == 0 );
free( xr );
free(data_vec);
return 0;
}
diff --git a/src/xrhash.c b/src/xrhash.c
index 690805d..53d947b 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,231 +1,274 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
if ( key == NULL )
return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= XRHASH_SLOTS )
index = index % XRHASH_MOD;
return index;
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
+ table->hash_generation = 0;
}
return table;
}
int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
+ xr->hash_generation++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->key = key;
slot->value = value;
slot->next = NULL;
xr->count++;
+ xr->hash_generation++;
return XRHASH_ADDED;
}
int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
+ xr->hash_generation++;
slot->key = NULL;
slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
int xr_hash_get( XRHash * xr, void * key, void **dataout )
{
XRHashLink * slot = NULL;
int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
*dataout = slot->value;
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
+
+XRHashIter * xr_init_hashiterator( XRHash * xr )
+{
+ XRHashIter * iter = (XRHashIter*)malloc(1*sizeof(XRHashIter));
+ if ( iter == NULL ){
+ errno = ENOMEM;
+ } else {
+ iter->xr = xr;
+ iter->hash_generation = xr->hash_generation;
+ iter->current_bucket = 0;
+ iter->next_slot = xr->buckets[0];
+ }
+ return iter;
+}
+
+void * xr_hash_iteratekey( XRHashIter * iter )
+{
+ void * key = NULL;
+ if ( iter->xr->hash_generation != iter->hash_generation ){
+ fprintf(stderr,"hash changed during iteration\n");
+ abort();
+/* return NULL; */
+ }
+ if ( iter->next_slot != NULL ){ /* iterate through links in the current bucket */
+ key = iter->next_slot->key;
+ iter->next_slot = iter->next_slot->next;
+ } else { /* no more links here, move to next bucket */
+ while ( iter->xr->buckets[++iter->current_bucket] == NULL ){
+ if ( iter->current_bucket >= XRHASH_SLOTS )
+ return NULL; /* no more filled buckets, end of iterations */
+ }
+ /* now pointing at the next slot */
+ iter->next_slot = iter->xr->buckets[iter->current_bucket];
+ key = iter->next_slot->key;
+ iter->next_slot = iter->next_slot->next;
+ }
+ return key;
+}
+
diff --git a/src/xrhash.h b/src/xrhash.h
index dd3134f..8aa79bc 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,66 +1,82 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
+#include <stdio.h>
#include <errno.h>
#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
#endif
#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
+
/* should never return 0, should return -ve on error */
typedef int (*hashfn)(void*);
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
typedef struct xrhash
{
+ int hash_generation; /* used to monitor changes in the hash for iterators */
hashfn hash;
cmpfn cmp;
size_t count;
-
+
XRHashLink * buckets[XRHASH_SLOTS];
} XRHash;
+typedef struct xrhash_iter
+{
+ XRHash * xr;
+ int hash_generation;
+ int current_bucket;
+ XRHashLink * next_slot;
+} XRHashIter;
+
+
/* create a new empty hash, return NULL on error */
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
int xr_hash_add( XRHash * xr, void * key, void * value );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_contains( XRHash * xr, void * key );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/* returns XRHASH_REMOVED or XRHASH_REMOVE_FAILED */
int xr_hash_remove( XRHash * xr, void * key );
+XRHashIter * xr_init_hashiterator( XRHash * xr );
+void * xr_hash_iteratekey( XRHashIter * iter );
+
+
#endif
|
inorton/xrhash | 3d8715cb16cd0915515e6b1045316ba911d4c661 | add a readme | diff --git a/README b/README
new file mode 100644
index 0000000..b956888
--- /dev/null
+++ b/README
@@ -0,0 +1,70 @@
+xrhash - (c) 2010 Ian Norton-Badrul <inorton-at-gmail>
+
+xrhash is a simple to use 'dictionary' object for C. It currently supports the
+following operations:-
+
+ add
+ remove
+ exists
+
+Examples
+---------
+
+#include "xrhash.h"
+
+/* a hash that just compares the pointers of it's keys */
+char* mykey = (char*) malloc(10 * sizeof(char));
+int* myval = (int*) malloc(1*sizeof(int));
+
+snprintf(mykey,10,"a key value"); // set key
+*myval = 123; // set value
+
+XRHash * myhash = xr_init_hash( NULL, NULL );
+
+// add value with key
+assert( xr_hash_add( myhash, mykey, myval ) == XRHASH_ADDED );
+
+
+You should implement your own hashcode genration function for key values and a pass pointer this in to the first argument of xr_init_hash().
+
+You should also define a comparison function that will compare two keys for equality ( eg, like strcmp ).
+
+
+iteration is planned soon.
+
+Building
+---------
+
+simply run 'scons'
+
+
+BSD License
+------------
+
+Copyright (c) 2010, Ian Norton
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of xrlist nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
inorton/xrhash | 7a4e7face5cc804b4261e2a89f10f0b08d153b57 | clean example by moving time_diff | diff --git a/src/SConscript b/src/SConscript
index 6a79319..d48e255 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,6 +1,6 @@
env = Environment()
env.Append(CFLAGS="-Wall -Werror")
# env.Append(CFLAGS="-g -O0")
env.Library( "xrhash","xrhash.c");
-env.Program( "xrhash-test.c",LIBS="xrhash",LIBPATH="." );
+env.Program( "xrhash-test", ["xrhash-test.c","testutils.c"] ,LIBS="xrhash",LIBPATH="." );
diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index afc7de7..e0d2385 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,95 +1,65 @@
#include <stdio.h>
#define XRHASH_SLOTS 32768
#include "xrhash.h"
#include <assert.h>
+#include "testutils.h"
#include <sys/time.h>
#define TEST_STR "foo%d"
-
-
-long long
-timeval_diff(struct timeval *difference,
- struct timeval *end_time,
- struct timeval *start_time
- )
-{
- struct timeval temp_diff;
-
- if(difference==NULL)
- {
- difference=&temp_diff;
- }
-
- difference->tv_sec =end_time->tv_sec -start_time->tv_sec ;
- difference->tv_usec=end_time->tv_usec-start_time->tv_usec;
-
- /* Using while instead of if below makes the code slightly more robust. */
-
- while(difference->tv_usec<0)
- {
- difference->tv_usec+=1000000;
- difference->tv_sec -=1;
- }
-
- return 1000000LL*difference->tv_sec+
- difference->tv_usec;
-
-} /* timeval_diff() */
-
int main( int argc, char** argv )
{
struct timeval tstart;
struct timeval tend;
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
int contains = -1;
int datacount = 100000;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
char * newstr = data_vec;
fprintf(stderr,"test add\n");
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
/* contains = xr_hash_contains( xr, newstr );*/
/* assert( contains == XRHASH_EXISTS_TRUE ); */
newstr += 4;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
while ( x++ < datacount ){
int got = -1;
contains = xr_hash_get( xr, newstr, (void**) &got );
assert( contains == XRHASH_EXISTS_TRUE );
assert( got == x );
newstr += 4;
}
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
while ( x++ < datacount ){
contains = xr_hash_remove( xr, newstr );
assert( contains == XRHASH_REMOVED );
newstr += 4;
}
free( xr );
free(data_vec);
return 0;
}
|
inorton/xrhash | 0bb9d6365295072b0b85f296c3a121aefd3aa92e | allow user to override hash table bucket count | diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index 5e22216..afc7de7 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,90 +1,95 @@
#include <stdio.h>
+
+#define XRHASH_SLOTS 32768
+
#include "xrhash.h"
#include <assert.h>
#include <sys/time.h>
#define TEST_STR "foo%d"
+
+
long long
timeval_diff(struct timeval *difference,
struct timeval *end_time,
struct timeval *start_time
)
{
struct timeval temp_diff;
if(difference==NULL)
{
difference=&temp_diff;
}
difference->tv_sec =end_time->tv_sec -start_time->tv_sec ;
difference->tv_usec=end_time->tv_usec-start_time->tv_usec;
/* Using while instead of if below makes the code slightly more robust. */
while(difference->tv_usec<0)
{
difference->tv_usec+=1000000;
difference->tv_sec -=1;
}
return 1000000LL*difference->tv_sec+
difference->tv_usec;
} /* timeval_diff() */
int main( int argc, char** argv )
{
struct timeval tstart;
struct timeval tend;
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
int contains = -1;
- int datacount = 16000;
+ int datacount = 100000;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
char * newstr = data_vec;
fprintf(stderr,"test add\n");
gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
/* contains = xr_hash_contains( xr, newstr );*/
/* assert( contains == XRHASH_EXISTS_TRUE ); */
newstr += 4;
}
gettimeofday( &tend, 0x0 );
fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
fprintf(stderr,"\n");
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
while ( x++ < datacount ){
int got = -1;
contains = xr_hash_get( xr, newstr, (void**) &got );
assert( contains == XRHASH_EXISTS_TRUE );
assert( got == x );
newstr += 4;
}
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
while ( x++ < datacount ){
contains = xr_hash_remove( xr, newstr );
assert( contains == XRHASH_REMOVED );
newstr += 4;
}
free( xr );
free(data_vec);
return 0;
}
diff --git a/src/xrhash.h b/src/xrhash.h
index 1717612..dd3134f 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,63 +1,66 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <errno.h>
+
+#ifndef XRHASH_SLOTS
#define XRHASH_SLOTS 8192
+#endif
#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/* should never return 0, should return -ve on error */
typedef int (*hashfn)(void*);
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * key;
void * value;
int hashcode;
XRHashLink * next;
};
typedef struct xrhash
{
hashfn hash;
cmpfn cmp;
size_t count;
XRHashLink * buckets[XRHASH_SLOTS];
} XRHash;
/* create a new empty hash, return NULL on error */
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
int xr_hash_add( XRHash * xr, void * key, void * value );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_contains( XRHash * xr, void * key );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_get( XRHash * xr, void * key, void **dataout );
/* returns XRHASH_REMOVED or XRHASH_REMOVE_FAILED */
int xr_hash_remove( XRHash * xr, void * key );
#endif
|
inorton/xrhash | 22abf69b23661e1aa7785d07eb10e7d2634daba7 | timer for insertions in test app | diff --git a/src/SConscript b/src/SConscript
index e94e29d..6a79319 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,6 +1,6 @@
env = Environment()
env.Append(CFLAGS="-Wall -Werror")
-env.Append(CFLAGS="-g -O0")
+# env.Append(CFLAGS="-g -O0")
env.Library( "xrhash","xrhash.c");
env.Program( "xrhash-test.c",LIBS="xrhash",LIBPATH="." );
diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index 622f23e..5e22216 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,53 +1,90 @@
#include <stdio.h>
#include "xrhash.h"
#include <assert.h>
+#include <sys/time.h>
+
#define TEST_STR "foo%d"
+long long
+timeval_diff(struct timeval *difference,
+ struct timeval *end_time,
+ struct timeval *start_time
+ )
+{
+ struct timeval temp_diff;
+
+ if(difference==NULL)
+ {
+ difference=&temp_diff;
+ }
+
+ difference->tv_sec =end_time->tv_sec -start_time->tv_sec ;
+ difference->tv_usec=end_time->tv_usec-start_time->tv_usec;
+
+ /* Using while instead of if below makes the code slightly more robust. */
+
+ while(difference->tv_usec<0)
+ {
+ difference->tv_usec+=1000000;
+ difference->tv_sec -=1;
+ }
+
+ return 1000000LL*difference->tv_sec+
+ difference->tv_usec;
+
+} /* timeval_diff() */
+
int main( int argc, char** argv )
{
+ struct timeval tstart;
+ struct timeval tend;
+
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
int contains = -1;
- int datacount = 500000;
+ int datacount = 16000;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
char * newstr = data_vec;
- fprintf(stderr,"test insertion and contains\n");
+ fprintf(stderr,"test add\n");
+ gettimeofday( &tstart, 0x0 );
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
- contains = xr_hash_contains( xr, newstr );
- assert( contains == XRHASH_EXISTS_TRUE );
+/* contains = xr_hash_contains( xr, newstr );*/
+/* assert( contains == XRHASH_EXISTS_TRUE ); */
newstr += 4;
}
-
+ gettimeofday( &tend, 0x0 );
+ fprintf(stderr,"* avg %lld us per add", (timeval_diff(NULL,&tend,&tstart))/datacount );
+ fprintf(stderr,"\n");
fprintf(stderr,"test get\n");
newstr = data_vec;
x = 0;
while ( x++ < datacount ){
int got = -1;
contains = xr_hash_get( xr, newstr, (void**) &got );
assert( contains == XRHASH_EXISTS_TRUE );
assert( got == x );
newstr += 4;
}
fprintf(stderr,"test remove\n");
newstr = data_vec;
x = 0;
while ( x++ < datacount ){
contains = xr_hash_remove( xr, newstr );
assert( contains == XRHASH_REMOVED );
newstr += 4;
}
free( xr );
free(data_vec);
return 0;
}
|
inorton/xrhash | 4d6f896513d0e24a87c41d422364051e8bde8e64 | now more like a dictionary, can add/test/get and remove | diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index bfad1b8..622f23e 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,33 +1,53 @@
#include <stdio.h>
#include "xrhash.h"
#include <assert.h>
#define TEST_STR "foo%d"
int main( int argc, char** argv )
{
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
int contains = -1;
- int datacount = 1000000;
+ int datacount = 500000;
int datalen = 10 + strlen(TEST_STR); /* ten digits */
char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
char * newstr = data_vec;
+ fprintf(stderr,"test insertion and contains\n");
while ( x++ < datacount ){
snprintf(newstr,datalen,TEST_STR,x);
- xr_hash_add( xr, newstr );
+ xr_hash_add( xr, newstr, (void*) x ); /* store value of x as a pointer in $xr{$newstr} */
contains = xr_hash_contains( xr, newstr );
assert( contains == XRHASH_EXISTS_TRUE );
+ newstr += 4;
+ }
- xr_hash_remove( xr, newstr );
+ fprintf(stderr,"test get\n");
+ newstr = data_vec;
+ x = 0;
+ while ( x++ < datacount ){
+ int got = -1;
+ contains = xr_hash_get( xr, newstr, (void**) &got );
+ assert( contains == XRHASH_EXISTS_TRUE );
+ assert( got == x );
+
+ newstr += 4;
+ }
+ fprintf(stderr,"test remove\n");
+ newstr = data_vec;
+ x = 0;
+ while ( x++ < datacount ){
+ contains = xr_hash_remove( xr, newstr );
+ assert( contains == XRHASH_REMOVED );
newstr += 4;
}
free( xr );
+ free(data_vec);
return 0;
}
diff --git a/src/xrhash.c b/src/xrhash.c
index bcdbc5e..690805d 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,202 +1,231 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
-inline int xr__get_hashcode( XRHash * xr, void * data )
+inline int xr__get_hashcode( XRHash * xr, void * key )
{
int ret = 0;
- if ( data == NULL )
- return XRHASH_NULL_DATA;
+ if ( key == NULL )
+ return XRHASH_NULL_KEY;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
- ret = (*xr->hash)( data );
+ ret = (*xr->hash)( key );
if ( ret <= 0 )
return XRHASH_HASHCODE_ERROR;
return ret;
}
inline int xr__get_index( XRHash * xr, int hashcode )
{
int index = 0;
if ( hashcode <= 0 )
return hashcode;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
index = hashcode;
while ( index >= XRHASH_SLOTS )
index = index % XRHASH_MOD;
return index;
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
}
return table;
}
-int xr_hash_add( XRHash * xr, void * data )
+int xr_hash_add( XRHash * xr, void * key, void * value )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
- int hashcode = xr__get_hashcode( xr, data );
+ int hashcode = xr__get_hashcode( xr, key );
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
- slot->data = data;
+ slot->key = key;
+ slot->value = value;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
- if ( (*xr->cmp)(data,slot->data) == 0 ){
+ if ( (*xr->cmp)(key,slot->key) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
- if ( slot->data == NULL ){
+ if ( slot->key == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
- slot->data = data;
+ slot->key = key;
+ slot->value = value;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
}
-int xr_hash_contains( XRHash * xr, void * data )
+int xr_hash_contains( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
- int hashcode = xr__get_hashcode( xr, data );
+ int hashcode = xr__get_hashcode( xr, key );
int index = 0;
if ( hashcode <= 0 ) return hashcode; /* error */
index = xr__get_index( xr, hashcode );
if ( index < 0 ) return index; /* error */
slot = xr->buckets[index];
if ( slot == NULL )
return XRHASH_EXISTS_FALSE;
while ( slot != NULL )
{
- int comp_res = (*xr->cmp)(data, slot->data);
+ int comp_res = (*xr->cmp)(key, slot->key);
if ( comp_res == 0 ){
return XRHASH_EXISTS_TRUE;
}
slot = slot->next;
}
return XRHASH_EXISTS_FALSE;
}
-int xr_hash_remove( XRHash * xr, void * data )
+int xr_hash_remove( XRHash * xr, void * key )
{
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
- int hashcode = xr__get_hashcode(xr,data);
+ int hashcode = xr__get_hashcode(xr,key);
int index = xr__get_index(xr, hashcode);
if ( index <= 0 ) return index; /* one of above failed */
if ( xr->buckets[index] == NULL )
return XRHASH_REMOVED; /* not in hash */
slot = xr->buckets[index];
/* iterate slots until we find our match */
while ( slot != NULL ){
- if ( (*xr->cmp)(data,slot->data) == 0 ) {
+ if ( (*xr->cmp)(key,slot->key) == 0 ) {
/* found object - remove it */
break;
} else {
prev = slot;
slot = slot->next;
}
}
if ( slot != NULL ){ /* remove this slot */
if ( prev == NULL ){
/* remove first link in this bucket */
xr->buckets[index] = slot->next;
} else {
/* remove this link */
prev->next = slot->next;
}
xr->count--;
- slot->data = NULL;
+ slot->key = NULL;
+ slot->value = NULL;
slot->next = NULL;
free(slot);
}
/* if slot == NULL, hashcode matched but the object was
* not in the hash */
return XRHASH_REMOVED;
}
+int xr_hash_get( XRHash * xr, void * key, void **dataout )
+{
+ XRHashLink * slot = NULL;
+
+ int hashcode = xr__get_hashcode( xr, key );
+ int index = 0;
+ if ( hashcode <= 0 ) return hashcode; /* error */
+ index = xr__get_index( xr, hashcode );
+ if ( index < 0 ) return index; /* error */
+
+ slot = xr->buckets[index];
+
+ if ( slot == NULL )
+ return XRHASH_EXISTS_FALSE;
+
+ while ( slot != NULL )
+ {
+ int comp_res = (*xr->cmp)(key, slot->key);
+ if ( comp_res == 0 ){
+ *dataout = slot->value;
+ return XRHASH_EXISTS_TRUE;
+ }
+ slot = slot->next;
+ }
+ return XRHASH_EXISTS_FALSE;
+}
diff --git a/src/xrhash.h b/src/xrhash.h
index 8ec652f..1717612 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,60 +1,63 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#define XRHASH_SLOTS 8192
#define XRHASH_MOD (XRHASH_SLOTS - 91)
#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
-#define XRHASH_NULL_DATA -3 /* tried to insert null */
+#define XRHASH_NULL_KEY -3 /* tried to insert a null key */
#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
#define XRHASH_ADDED_ALREADY 1
#define XRHASH_ADDED 0
#define XRHASH_ADD_FAILED -1
#define XRHASH_REMOVED 0
#define XRHASH_REMOVE_FAILED -1
/* should never return 0, should return -ve on error */
typedef int (*hashfn)(void*);
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
- void * data;
+ void * key;
+ void * value;
int hashcode;
XRHashLink * next;
};
typedef struct xrhash
{
hashfn hash;
cmpfn cmp;
size_t count;
XRHashLink * buckets[XRHASH_SLOTS];
} XRHash;
/* create a new empty hash, return NULL on error */
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
-int xr_hash_add( XRHash * xr, void * data );
+int xr_hash_add( XRHash * xr, void * key, void * value );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
-int xr_hash_contains( XRHash * xr, void * data );
+int xr_hash_contains( XRHash * xr, void * key );
+/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
+int xr_hash_get( XRHash * xr, void * key, void **dataout );
/* returns XRHASH_REMOVED or XRHASH_REMOVE_FAILED */
-int xr_hash_remove( XRHash * xr, void * data );
+int xr_hash_remove( XRHash * xr, void * key );
#endif
|
inorton/xrhash | ce0f38bf8aff2a819a0bf20dc9caad93e025b952 | add/contains/remove works | diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index 3985ffe..bfad1b8 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,17 +1,33 @@
#include <stdio.h>
#include "xrhash.h"
+#include <assert.h>
+
+#define TEST_STR "foo%d"
int main( int argc, char** argv )
{
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
- while ( x++ < 100000 ){
- char * newstr = (char*) malloc( 10 * sizeof(char) );
+ int contains = -1;
+ int datacount = 1000000;
+
+ int datalen = 10 + strlen(TEST_STR); /* ten digits */
+
+ char* data_vec = (char*) malloc ( datacount * datalen * sizeof(char) );
+ char * newstr = data_vec;
+ while ( x++ < datacount ){
+ snprintf(newstr,datalen,TEST_STR,x);
xr_hash_add( xr, newstr );
+ contains = xr_hash_contains( xr, newstr );
+ assert( contains == XRHASH_EXISTS_TRUE );
+
+ xr_hash_remove( xr, newstr );
+ newstr += 4;
}
+ free( xr );
return 0;
}
diff --git a/src/xrhash.c b/src/xrhash.c
index 330330c..bcdbc5e 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,108 +1,202 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
+inline int xr__get_hashcode( XRHash * xr, void * data )
+{
+ int ret = 0;
+ if ( data == NULL )
+ return XRHASH_NULL_DATA;
+
+ if ( xr == NULL )
+ return XRHASH_HASH_INVALID;
+
+ ret = (*xr->hash)( data );
+
+ if ( ret <= 0 )
+ return XRHASH_HASHCODE_ERROR;
+ return ret;
+}
+
+inline int xr__get_index( XRHash * xr, int hashcode )
+{
+ int index = 0;
+ if ( hashcode <= 0 )
+ return hashcode;
+
+ if ( xr == NULL )
+ return XRHASH_HASH_INVALID;
+
+ index = hashcode;
+ while ( index >= XRHASH_SLOTS )
+ index = index % XRHASH_MOD;
+
+ return index;
+}
+
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
}
return table;
}
-
-
int xr_hash_add( XRHash * xr, void * data )
{
- int hashcode = 0;
- int index = 0;
XRHashLink * slot = NULL;
- XRHashLink * prev = NULL;
- if ( data == NULL )
- return XRHASH_ADD_FAILED;
-
- if ( xr == NULL )
- return XRHASH_HASH_INVALID;
+ XRHashLink * prev = NULL;
- hashcode = (*xr->hash)( data );
- index = hashcode;
- while ( index >= XRHASH_SLOTS )
- index = index % XRHASH_MOD;
+ int hashcode = xr__get_hashcode( xr, data );
+ int index = xr__get_index(xr, hashcode);
+ if ( index <= 0 ) return index; /* one of above failed */
+
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->data = data;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
- if ( (data == slot->data) || ( (*xr->cmp)(data,slot->data) == 0 ) ){
+ if ( (*xr->cmp)(data,slot->data) == 0 ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->hashcode == 0 ){
break;
}
if ( slot->data == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->data = data;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
}
+int xr_hash_contains( XRHash * xr, void * data )
+{
+ XRHashLink * slot = NULL;
+
+ int hashcode = xr__get_hashcode( xr, data );
+ int index = 0;
+ if ( hashcode <= 0 ) return hashcode; /* error */
+ index = xr__get_index( xr, hashcode );
+ if ( index < 0 ) return index; /* error */
+
+ slot = xr->buckets[index];
+
+ if ( slot == NULL )
+ return XRHASH_EXISTS_FALSE;
+
+ while ( slot != NULL )
+ {
+ int comp_res = (*xr->cmp)(data, slot->data);
+ if ( comp_res == 0 ){
+ return XRHASH_EXISTS_TRUE;
+ }
+ slot = slot->next;
+ }
+ return XRHASH_EXISTS_FALSE;
+}
+
+int xr_hash_remove( XRHash * xr, void * data )
+{
+ XRHashLink * slot = NULL;
+ XRHashLink * prev = NULL;
+ int hashcode = xr__get_hashcode(xr,data);
+ int index = xr__get_index(xr, hashcode);
+
+ if ( index <= 0 ) return index; /* one of above failed */
+ if ( xr->buckets[index] == NULL )
+ return XRHASH_REMOVED; /* not in hash */
+
+ slot = xr->buckets[index];
+
+ /* iterate slots until we find our match */
+ while ( slot != NULL ){
+ if ( (*xr->cmp)(data,slot->data) == 0 ) {
+ /* found object - remove it */
+ break;
+ } else {
+ prev = slot;
+ slot = slot->next;
+ }
+ }
+
+ if ( slot != NULL ){ /* remove this slot */
+ if ( prev == NULL ){
+ /* remove first link in this bucket */
+ xr->buckets[index] = slot->next;
+ } else {
+ /* remove this link */
+ prev->next = slot->next;
+ }
+ xr->count--;
+ slot->data = NULL;
+ slot->next = NULL;
+ free(slot);
+ }
+
+ /* if slot == NULL, hashcode matched but the object was
+ * not in the hash */
+
+ return XRHASH_REMOVED;
+}
+
+
diff --git a/src/xrhash.h b/src/xrhash.h
index ebf1284..8ec652f 100644
--- a/src/xrhash.h
+++ b/src/xrhash.h
@@ -1,51 +1,60 @@
#ifndef XRHASH_H
#define XRHASH_H
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#define XRHASH_SLOTS 8192
#define XRHASH_MOD (XRHASH_SLOTS - 91)
+#define XRHASH_HASH_INVALID -2 /* hashtable not initialized */
+#define XRHASH_NULL_DATA -3 /* tried to insert null */
+#define XRHASH_HASHCODE_ERROR -4 /* hashfn returned <= 0 */
+
#define XRHASH_EXISTS_TRUE 0
#define XRHASH_EXISTS_FALSE 1
-#define XRHASH_ADDED_ALREADY 1
-#define XRHASH_ADDED 0
-#define XRHASH_ADD_FAILED -1
-#define XRHASH_HASH_INVALID -2
+#define XRHASH_ADDED_ALREADY 1
+#define XRHASH_ADDED 0
+#define XRHASH_ADD_FAILED -1
+
+#define XRHASH_REMOVED 0
+#define XRHASH_REMOVE_FAILED -1
/* should never return 0, should return -ve on error */
typedef int (*hashfn)(void*);
typedef int (*cmpfn)(void*,void*);
typedef struct link XRHashLink;
struct link
{
void * data;
int hashcode;
XRHashLink * next;
};
typedef struct xrhash
{
hashfn hash;
cmpfn cmp;
size_t count;
XRHashLink * buckets[XRHASH_SLOTS];
} XRHash;
/* create a new empty hash, return NULL on error */
XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
int xr_hash_add( XRHash * xr, void * data );
/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
int xr_hash_contains( XRHash * xr, void * data );
+/* returns XRHASH_REMOVED or XRHASH_REMOVE_FAILED */
+int xr_hash_remove( XRHash * xr, void * data );
+
#endif
|
inorton/xrhash | 0127d663bc9119a03434ace83148cc6cd51aa558 | correclty find a hask bucket | diff --git a/src/SConscript b/src/SConscript
index 4fba68c..e94e29d 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,5 +1,6 @@
env = Environment()
env.Append(CFLAGS="-Wall -Werror")
+env.Append(CFLAGS="-g -O0")
env.Library( "xrhash","xrhash.c");
env.Program( "xrhash-test.c",LIBS="xrhash",LIBPATH="." );
diff --git a/src/xrhash-test.c b/src/xrhash-test.c
index 40f1d69..3985ffe 100644
--- a/src/xrhash-test.c
+++ b/src/xrhash-test.c
@@ -1,17 +1,17 @@
#include <stdio.h>
#include "xrhash.h"
int main( int argc, char** argv )
{
XRHash * xr = xr_init_hash( NULL, NULL );
int x = 0;
- while ( x++ < 1000000 ){
+ while ( x++ < 100000 ){
char * newstr = (char*) malloc( 10 * sizeof(char) );
xr_hash_add( xr, newstr );
}
return 0;
}
diff --git a/src/xrhash.c b/src/xrhash.c
index 4059b68..330330c 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,105 +1,108 @@
#include "xrhash.h"
int xr__hash_is_pointer( void * ptr )
{
int ret = 0;
memcpy(&ret,ptr,sizeof(int));
return ret;
}
int xr__cmp_pointers( void * a, void * b )
{
if ( a > b ){
return -1;
} else if ( a < b ){
return 1;
}
return 0;
}
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
/* no hasher or comparitor supplied! just work on pointers */
hash = &xr__hash_is_pointer;
cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
}
return table;
}
int xr_hash_add( XRHash * xr, void * data )
{
int hashcode = 0;
int index = 0;
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
if ( data == NULL )
return XRHASH_ADD_FAILED;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
hashcode = (*xr->hash)( data );
index = hashcode;
while ( index >= XRHASH_SLOTS )
index = index % XRHASH_MOD;
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->data = data;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (data == slot->data) || ( (*xr->cmp)(data,slot->data) == 0 ) ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
+ if ( slot->hashcode == 0 ){
+ break;
+ }
if ( slot->data == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->data = data;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
}
|
inorton/xrhash | c45979c2db345109ec7b767bbef8c63107442258 | add simple test - incomplete | diff --git a/src/SConscript b/src/SConscript
index 7fbe1c2..4fba68c 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -1 +1,5 @@
-Library( "xrhash","xrhash.c");
+env = Environment()
+env.Append(CFLAGS="-Wall -Werror")
+env.Library( "xrhash","xrhash.c");
+
+env.Program( "xrhash-test.c",LIBS="xrhash",LIBPATH="." );
diff --git a/src/xrhash-test.c b/src/xrhash-test.c
new file mode 100644
index 0000000..40f1d69
--- /dev/null
+++ b/src/xrhash-test.c
@@ -0,0 +1,17 @@
+#include <stdio.h>
+#include "xrhash.h"
+
+int main( int argc, char** argv )
+{
+ XRHash * xr = xr_init_hash( NULL, NULL );
+
+ int x = 0;
+ while ( x++ < 1000000 ){
+ char * newstr = (char*) malloc( 10 * sizeof(char) );
+ xr_hash_add( xr, newstr );
+
+ }
+
+
+ return 0;
+}
diff --git a/src/xrhash.c b/src/xrhash.c
index 8daf9db..4059b68 100644
--- a/src/xrhash.c
+++ b/src/xrhash.c
@@ -1,87 +1,105 @@
#include "xrhash.h"
+int xr__hash_is_pointer( void * ptr )
+{
+ int ret = 0;
+ memcpy(&ret,ptr,sizeof(int));
+ return ret;
+}
+
+int xr__cmp_pointers( void * a, void * b )
+{
+ if ( a > b ){
+ return -1;
+ } else if ( a < b ){
+ return 1;
+ }
+ return 0;
+}
+
XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
{
XRHash * table = NULL;
if ( ( hash == NULL ) || ( cmp == NULL ) ){
- /* no hasher or comparitor supplied! */
- return NULL;
+ /* no hasher or comparitor supplied! just work on pointers */
+ hash = &xr__hash_is_pointer;
+ cmp = &xr__cmp_pointers;
}
table = (XRHash*) malloc(1 * sizeof(XRHash));
if ( table != NULL ){
memset( table, 0, sizeof(XRHash));
table->hash = hash;
table->cmp = cmp;
table->count = 0;
}
return table;
}
int xr_hash_add( XRHash * xr, void * data )
{
int hashcode = 0;
int index = 0;
XRHashLink * slot = NULL;
XRHashLink * prev = NULL;
if ( data == NULL )
return XRHASH_ADD_FAILED;
if ( xr == NULL )
return XRHASH_HASH_INVALID;
hashcode = (*xr->hash)( data );
index = hashcode;
while ( index >= XRHASH_SLOTS )
index = index % XRHASH_MOD;
/* new node, first hit */
if ( xr->buckets[index] == NULL ){
xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = xr->buckets[index];
slot->hashcode = hashcode;
slot->data = data;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
} else {
slot = xr->buckets[index];
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
/* collision, add a link */
while ( slot != NULL ){
if ( (data == slot->data) || ( (*xr->cmp)(data,slot->data) == 0 ) ){
/* same object, do nothing */
return XRHASH_ADDED_ALREADY;
} else {
if ( slot->data == NULL ){
break; /* use slot */
}
/* check next slot */
prev = slot;
slot = slot->next;
}
}
/* if slot is null, create a new link */
if ( slot == NULL ){
prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
slot = prev->next;
if ( slot == NULL ){
errno = ENOMEM;
return XRHASH_ADD_FAILED;
}
}
slot->hashcode = hashcode;
slot->data = data;
slot->next = NULL;
xr->count++;
return XRHASH_ADDED;
}
|
inorton/xrhash | 51d851a332aca8884752c6d1f58d4eef8dbc09af | XRHash first checkin | diff --git a/SConstruct b/SConstruct
new file mode 100644
index 0000000..bdadb56
--- /dev/null
+++ b/SConstruct
@@ -0,0 +1 @@
+SConscript("src/SConscript");
diff --git a/src/SConscript b/src/SConscript
new file mode 100644
index 0000000..7fbe1c2
--- /dev/null
+++ b/src/SConscript
@@ -0,0 +1 @@
+Library( "xrhash","xrhash.c");
diff --git a/src/xrhash.c b/src/xrhash.c
new file mode 100644
index 0000000..8daf9db
--- /dev/null
+++ b/src/xrhash.c
@@ -0,0 +1,87 @@
+#include "xrhash.h"
+
+XRHash * xr_init_hash( hashfn hash , cmpfn cmp )
+{
+ XRHash * table = NULL;
+
+ if ( ( hash == NULL ) || ( cmp == NULL ) ){
+ /* no hasher or comparitor supplied! */
+ return NULL;
+ }
+
+ table = (XRHash*) malloc(1 * sizeof(XRHash));
+ if ( table != NULL ){
+ memset( table, 0, sizeof(XRHash));
+ table->hash = hash;
+ table->cmp = cmp;
+ table->count = 0;
+ }
+ return table;
+}
+
+
+
+int xr_hash_add( XRHash * xr, void * data )
+{
+ int hashcode = 0;
+ int index = 0;
+ XRHashLink * slot = NULL;
+ XRHashLink * prev = NULL;
+ if ( data == NULL )
+ return XRHASH_ADD_FAILED;
+
+ if ( xr == NULL )
+ return XRHASH_HASH_INVALID;
+
+ hashcode = (*xr->hash)( data );
+ index = hashcode;
+ while ( index >= XRHASH_SLOTS )
+ index = index % XRHASH_MOD;
+
+ /* new node, first hit */
+ if ( xr->buckets[index] == NULL ){
+ xr->buckets[index] = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
+ slot = xr->buckets[index];
+ slot->hashcode = hashcode;
+ slot->data = data;
+ slot->next = NULL;
+ xr->count++;
+ return XRHASH_ADDED;
+ } else {
+ slot = xr->buckets[index];
+ if ( slot == NULL ){
+ errno = ENOMEM;
+ return XRHASH_ADD_FAILED;
+ }
+ }
+
+ /* collision, add a link */
+ while ( slot != NULL ){
+ if ( (data == slot->data) || ( (*xr->cmp)(data,slot->data) == 0 ) ){
+ /* same object, do nothing */
+ return XRHASH_ADDED_ALREADY;
+ } else {
+ if ( slot->data == NULL ){
+ break; /* use slot */
+ }
+ /* check next slot */
+ prev = slot;
+ slot = slot->next;
+ }
+ }
+ /* if slot is null, create a new link */
+ if ( slot == NULL ){
+ prev->next = (XRHashLink*)malloc(1 * sizeof(XRHashLink));
+ slot = prev->next;
+ if ( slot == NULL ){
+ errno = ENOMEM;
+ return XRHASH_ADD_FAILED;
+ }
+ }
+ slot->hashcode = hashcode;
+ slot->data = data;
+ slot->next = NULL;
+ xr->count++;
+ return XRHASH_ADDED;
+}
+
diff --git a/src/xrhash.h b/src/xrhash.h
new file mode 100644
index 0000000..ebf1284
--- /dev/null
+++ b/src/xrhash.h
@@ -0,0 +1,51 @@
+#ifndef XRHASH_H
+#define XRHASH_H
+
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#define XRHASH_SLOTS 8192
+#define XRHASH_MOD (XRHASH_SLOTS - 91)
+
+#define XRHASH_EXISTS_TRUE 0
+#define XRHASH_EXISTS_FALSE 1
+
+#define XRHASH_ADDED_ALREADY 1
+#define XRHASH_ADDED 0
+#define XRHASH_ADD_FAILED -1
+#define XRHASH_HASH_INVALID -2
+
+/* should never return 0, should return -ve on error */
+typedef int (*hashfn)(void*);
+typedef int (*cmpfn)(void*,void*);
+
+typedef struct link XRHashLink;
+struct link
+{
+ void * data;
+ int hashcode;
+ XRHashLink * next;
+};
+
+typedef struct xrhash
+{
+ hashfn hash;
+ cmpfn cmp;
+ size_t count;
+
+ XRHashLink * buckets[XRHASH_SLOTS];
+} XRHash;
+
+
+/* create a new empty hash, return NULL on error */
+XRHash * xr_init_hash( int (*hash)(void*) , int(*cmp)(void*,void*) );
+
+/* return XRHASH_ADDED on success, else XRHASH_ADD_FAILED */
+int xr_hash_add( XRHash * xr, void * data );
+
+/* returns XRHASH_EXISTS_TRUE or XRHASH_EXISTS_FALSE */
+int xr_hash_contains( XRHash * xr, void * data );
+
+
+#endif
|
emacsattic/fetchmail-mode | ad86a0a9d95c378a3e5b5ab39208d4931484d0a9 | initial import | diff --git a/fetchmail-mode.el b/fetchmail-mode.el
new file mode 100644
index 0000000..31ba5ff
--- /dev/null
+++ b/fetchmail-mode.el
@@ -0,0 +1,153 @@
+;;; fetchmail-mode.el -*- Emacs-Lisp -*-
+;;
+;; Mode for editing .fetchmailrc files
+;;
+;; Created: <Mon Oct 30 20:13:15 EST 2000>
+;; Time-stamp: <17.02.2001 17:59:43>
+;; Version: 0.1
+;; Keywords: fetchmail,config
+;; Author: Alex Shinn <[email protected]>
+;;
+;; This program is free software; you can redistribute it and/or
+;; modify it under the terms of the GNU General Public License as
+;; published by the Free Software Foundation; either version 2 of
+;; the License, or (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public
+;; License along with this program; if not, write to the Free
+;; Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+;; MA 02111-1307 USA
+
+;; Commentary:
+;;
+;; This file provides a major mode for editing .fetchmailrc files.
+;; It offers syntax highlighting and indentation.
+;;
+;; To use it, put the following in your .emacs:
+;;
+;; (autoload 'fetchmail-mode "fetchmail-mode.el" "Mode for editing .fetchmailrc files" t)
+;;
+;; You may also want something like:
+;;
+;; (setq auto-mode-alist
+;; (append '(("\..fetchmailrc$" . fetchmail-mode))
+;; auto-mode-alist))
+
+;; Create mode-specific tables.
+(defvar fetchmail-mode-syntax-table nil
+ "Syntax table used while in fetchmail-mode" )
+(if fetchmail-mode-syntax-table
+ () ; Do not change the table if it is already set up.
+ (setq fetchmail-mode-syntax-table (make-syntax-table))
+ (modify-syntax-entry ?\\ "\\ " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\, "." fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\: "." fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\; "." fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\" "\"" fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\' "\"" fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\n "> " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\# "< " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\( "() " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\) ")( " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\[ "(] " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\] ")[ " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\{ "(} " fetchmail-mode-syntax-table)
+ (modify-syntax-entry ?\} "){ " fetchmail-mode-syntax-table)
+ )
+
+(defvar fetchmail-mode-map nil
+ "Keymap used in fetchmail-mode" )
+
+(if fetchmail-mode-map nil
+ (setq fetchmail-mode-map (make-sparse-keymap))
+ (define-key fetchmail-mode-map "\t" 'fetchmail-complete)
+ (define-key fetchmail-mode-map "\C-c\C-c" 'comment-region) )
+(defvar fetchmail-mode-hook nil
+ "Hooks to run in fetchmail-mode" )
+
+(defvar fetchmail-keywords nil
+ "Keywords used for fetchmail-mode" )
+
+(unless fetchmail-keywords
+ (setq fetchmail-keywords
+ '("poll" "skip" "via" "in" "proto" "protocol" "uidl" "no"
+"port" "auth" "authenticate" "timeout" "envelope" "qvirtual" "envelope"
+"aka" "localdomains" "interface" "monitor" "dns" "user" "username" "is"
+"folder" "pass" "password" "smtp" "smtphost" "smtpaddress" "antispam"
+"mda" "pre" "preconnect" "post" "postconnect" "keep" "flush" "fetchall"
+"rewrite" "forcecr" "stripcr" "pass8bits" "dropstatus" "limit"
+"fetchlimit" "batchlimit" "expunge" "pop2" "POP2" "pop3" "POP3" "imap"
+"IMAP" "imap-k4" "IMAP-K4" "apop" "APOP" "rpop" "RPOP" "kpop" "KPOP"
+"etrn" "ETRN" "login" "kerberos" "kerberos_v5" "logfile" "daemon"
+"syslog" "invisible" "and" "with" "has" "wants" "options" "here" "there"
+"aka" "set")))
+
+(defvar fetchmail-keyword-table nil
+ "Completion table for fetchmail-mode" )
+(unless fetchmail-keyword-table
+ (setq fetchmail-keyword-table (make-vector 8 0))
+ (mapcar (lambda (x) (intern x fetchmail-keyword-table))
+ fetchmail-keywords))
+
+(defvar fetchmail-font-lock-keywords nil
+ "Default expressions to highlight in fetchmail-mode" )
+
+(unless fetchmail-font-lock-keywords
+ (setq fetchmail-font-lock-keywords
+ (list (list (concat "\\b" (regexp-opt
+ fetchmail-keywords t) "\\b")
+ 0 'font-lock-keyword-face ))))
+
+(defun fetchmail-complete ()
+ "Tab completion for fetchmail-mode"
+ (interactive)
+ (let* ((end (point))
+ (beg (save-excursion
+ (skip-syntax-backward "w")
+ (point)))
+ (pattern (buffer-substring beg end))
+ (table fetchmail-keyword-table)
+ (completion (try-completion pattern table)))
+ (cond ((eq completion t))
+ ((null completion)
+ (error "Can't find completion for \"%s\"" pattern))
+ ((not (string-equal pattern completion))
+ (delete-region beg end)
+ (insert completion))
+ (t
+ (message "Making completion list...")
+ (let ((list (all-completions pattern table)))
+ (if (fboundp 'prettify)
+ (setq list (funcall 'prettify list)))
+ (with-output-to-temp-buffer "*Help*"
+ (display-completion-list list)))
+ (message "Making completion list...%s" "done")))))
+
+
+(defun fetchmail-mode ()
+ "Mode for editing .fetchmailrc files"
+ (interactive)
+ (kill-all-local-variables)
+ (use-local-map fetchmail-mode-map) ; This provides the localkeymap.
+ (setq mode-name "Fetchmail") ; This name goes into themodeline.
+ (setq major-mode 'fetchmail-mode) ; Used by `describe-mode'
+ (run-hooks 'fetchmail-mode-hook) ; Run each time mode is called
+ (set-syntax-table fetchmail-mode-syntax-table)
+
+ ;; -cc-
+ ;; Font lock support
+ (make-local-variable 'font-lock-defaults)
+ (setq font-lock-defaults '(fetchmail-font-lock-keywords nil t))
+
+ (setq comment-start "#")
+ )
+
+
+
+(provide 'fetchmail-mode)
+;;; fetchmail-mode.el ends here
|
hinrik/hailo | efbf5d03eddf14007d4736027f0bb21487ff747f | Preserve casing of words like "ATMs" | diff --git a/Changes b/Changes
index e5c619a..57d4d4f 100644
--- a/Changes
+++ b/Changes
@@ -1,523 +1,524 @@
Revision history for Hailo
{{$NEXT}}
- When using --train-fast, remove the "flushing cache" message when done
- Word tokenizer:
* Improve tokenization of email addresses
* Use backspace instead of escape as a magic character when
capitalizing text in multiple passes, since it's less likely to
appear in tokens.
+ * Preserve casing of words like "ATMs"
0.69 2011-05-07 04:02:38
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
- Depend on MouseX::Getopt 0.33 to fix test failures
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index a6cb20c..3ee9176 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,236 +1,236 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/\s/;
my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
-my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
+my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} | \p{Upper}{2,} \p{Lower} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ (?: \. [A-Z]{2,4} )* /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<(?: |[&~]?[@%+~&])?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $IRC_CHAN = qr/[#&+][^ \a\0\012\015,:]{1,199}/;
my $NUMERO = qr/#[0-9]+/;
my $CLOSE_TAG = qr{</[-\w]+>};
my $CASED_WORD = qr/$CLOSE_TAG|$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$IRC_CHAN|$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# This string is added to (and later removed from) the output string when
# capitalizing it in multiple passes. We use backspace, because that is
# unlikely to be in the input. This dirty approach can probably be replaced
# with regex grammars, but I haven't bothered to learn to use those.
my $SEPARATOR = "\x08";
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
- # Mixed-case words like "WoW"
+ # Mixed-case words like "WoW" or "ATMs"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/$SEPARATOR\u$1$SEPARATOR/go;
$reply =~ s/$SEPARATOR$WORD_STRICT$SEPARATOR$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/$SEPARATOR//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 062e52b..c3cafa1 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,532 +1,537 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi@foo>],
'tumi@foo',
],
[
'[email protected]',
[qw<[email protected]>],
'[email protected]',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50>, ',', 'no', ',', 'wait', ',', qw<it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
[
'failo is #1',
[qw<failo is>, '#1'],
'Failo is #1.',
],
[
'We are in #perl',
[qw<we are in>, '#perl'],
'We are in #perl.',
],
[
'</foo>',
[qw{</foo>}],
'</foo>',
],
+ [
+ 'ATMs in Baltimore',
+ [qw{ATMs in baltimore}],
+ 'ATMs in baltimore.',
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
diff --git a/t/tokenizer/Words/utf8-text.t b/t/tokenizer/Words/utf8-text.t
index 6dcd6e2..c260ced 100644
--- a/t/tokenizer/Words/utf8-text.t
+++ b/t/tokenizer/Words/utf8-text.t
@@ -1982,779 +1982,779 @@ __[ YAML::XS result ]__
- áá£á
- - 0
- áá°áªá«
- - 0
- á¨áá
- - 2
- á¥
- - 0
- áááµ
- - 0
- áá
- - 0
- ááá«á
- - 2
- á¢
- - 0
- á¨á¥áµáá
- - 0
- á áá©
- - 0
- áá«
- - 0
- á¨á áá«
- - 0
- á áá©
- - 0
- ááá«
- - 2
- á¢
- - 0
- á°ááá
- - 0
- á¢á°á
- - 0
- á°ááá¶
- - 0
- á£á
- - 2
- á¢
- - 0
- áá³á
á
- - 0
- áá
- - 0
- á¢áá
- - 0
- á¨ááµá
- - 0
- á áµáá°á
- - 2
- á¢
- - 0
- á¥ááá
á
- - 0
- á áá«á½á
- - 0
- áá
- - 0
- ááá
- - 2
- á¢
- - 0
- runes
- - 2
- ':'
- - 0
- á»á
- - 0
- á³á¹á«á¦
- - 0
- á¦á«á
- - 0
- á»á
- - 0
- áá¢áá
- - 0
- á©á¾
- - 0
- á¦á«á
- - 0
- ááªá¾áá
- - 0
- á¾á©á±á¦á¹ááªá±áá¢á
- - 0
- á¹áá¦
- - 0
- á¦áª
- - 0
- á¹áá¥á«
- - 1
- (
- - 0
- old
- - 0
- english
- - 2
- ','
- - 0
- which
- - 0
- transcribed
- - 0
- into
- - 0
- latin
- - 0
- reads
- - 1
- ''''
- - 0
- he
- - 0
- cwaeth
- - 0
- that
- - 0
- he
- - 0
- bude
- - 0
- thaem
- - 0
- lande
- - 0
- northweardum
- - 0
- with
- - 0
- tha
- - 0
- westsae
- - 2
- .'
- - 0
- and
- - 0
- means
- - 1
- ''''
- - 0
- he
- - 0
- said
- - 0
- that
- - 0
- he
- - 0
- lived
- - 0
- in
- - 0
- the
- - 0
- northern
- - 0
- land
- - 0
- near
- - 0
- the
- - 0
- western
- - 0
- sea
- - 2
- .')
- - 0
- braille
- - 2
- ':'
- - 0
- â¡â â §â
- - 0
- â ¼â â
- - 0
- â¡â â â ⠹⠰â
- - 0
- â¡£â â
- - 0
- â¡â â â â ¹
- - 0
- â ºâ â
- - 0
- â â â â â
- - 0
- â â
- - 0
- â â â â
- - 0
- â ºâ ⠹⠲
- - 0
- ⡹⠻â
- - 0
- â â
- - 0
- â â
- - 0
- â â ³â â
- - 0
- â ±â â â â §â »
- - 0
- â â â ³â
- - 0
- â ¹â â â ²
- - 0
- ⡹â
- - 0
- â â â â â â »
- - 0
- â â
- - 0
- â â â
- - 0
- â â ¥â â â â
- - 0
- â ºâ â
- - 0
- â â â â â «
- - 0
- â â ¹
- - 0
- â ¹â
- - 0
- â â â »â â ¹â â â â
- - 0
- â ¹â
- - 0
- â â â »â
â
- - 0
- â ¹â
- - 0
- â ¥â â â »â â â
â »â
- - 0
- â â â
- - 0
- â ¹â
- - 0
- â ¡â â â
- - 0
- â â ³â â ⠻⠲
- - 0
- â¡â â â â â â
- - 0
- â â â â â «
- - 0
- â â â ²
- - 0
- â¡â â
- - 0
- â¡â â â â â â â °â
- - 0
- â â â â
- - 0
- â ºâ â
- - 0
- â â â â
- - 0
- â ¥â â â
- - 0
- â °â¡¡â â â â â
- - 0
- â â â
- - 0
- â â ⠹⠹â â
- - 0
- â â
- - 0
- â ¡â â â
- - 0
- â â
- - 0
- â â ¥â
- - 0
- â â â
- - 0
- â â â â
- - 0
- â â â ²
- - 0
- â¡â â
- - 0
- â¡â â â â ¹
- - 0
- â ºâ â
- - 0
- â â
- - 0
- â â â â
- - 0
- â â
- - 0
- â
- - 0
- â â â â â ¤â â â â â ²
- - 0
- â¡â â â
- - 0
- â¡
- - 0
- â â â â °â
- - 0
- â â â â
- - 0
- â â
- - 0
- â â â ¹
- - 0
- â ¹â â
- - 0
- â¡
- - 0
- â
â â ªâ
- - 0
- â â
- - 0
- â â ¹
- - 0
- â ªâ
- - 0
- â
â â ªâ â «â â â
- - 0
- â ±â â
- - 0
- ⠹⠻â
- - 0
- â â
- - 0
- â â â â â â ¥â â â â ¹
- - 0
- â â â â
- - 0
- â â â ³â
- - 0
- â
- - 0
- â â â â â ¤â â â â â ²
- - 0
- â¡
- - 0
- â â â £â
- - 0
- â â â §â
- - 0
- â â â ²
- - 0
- â â â â â «â
- - 0
- â â ¹â â â â â
- - 0
- â â
- - 0
- â â â â â
- - 0
- â
- - 0
- â â â â â â ¤â â â â
- - 0
- â â
- - 0
- â ¹â
- - 0
- â â â â â â
- - 0
- â â â â â
- - 0
- â â
- - 0
- â â â â â â â â ⠻⠹
- - 0
- â
- - 0
- â ¹â
- - 0
- â â â â â â ²
- - 0
- â¡â ¥â
- - 0
- â ¹â
- - 0
- â ºâ â â â â
- - 0
- â â
- - 0
- â ³â
- - 0
- â â â â â â â â
- - 0
- â â
- - 0
- â
- - 0
- â ¹â
- - 0
- â â â â â â â
- - 0
- â â â
- - 0
- â â ¹
- - 0
- â ¥â â â â â ⠪⠫
- - 0
- â â â â â
- - 0
- â ©â â â
- - 0
- â â â
- - 0
- â â â â ¥â â
- - 0
- â â â
- - 0
- â â
- - 0
- â ¹â
- - 0
- â¡â ³â â â ⠹⠰â
- - 0
- â â â â
- - 0
- â â â â ²
- - 0
- ⡹⠳
- - 0
- â ºâ â â
- - 0
- ⠹⠻â â â â â
- - 0
- â â »â â â
- - 0
- â â
- - 0
- â â
- - 0
- â â â â â â â
- - 0
- â â â â â â â â â â â â ¹â
- - 0
- â ¹â â
- - 0
- â¡â â â â ¹
- - 0
- â ºâ â
- - 0
- â â
- - 0
- â â â â
- - 0
- â â
- - 0
- â
- - 0
- â â â â â ¤â â â â â ²
- - 1
- (
- - 0
- the
- - 0
- first
- - 0
- couple
- - 0
- of
- - 0
- paragraphs
- - 0
- of
- - 1
- '"'
- - 0
- A
- - 0
- christmas
- - 0
- carol
- - 2
- '"'
- - 0
- by
- - 0
- dickens
- - 2
- )
- - 0
- compact
- - 0
- font
- - 0
- selection
- - 0
- example
- - 0
- text
- - 2
- ':'
- - 0
- ABCDEFGHIJKLMNOPQRSTUVWXYZ
- - 0
- /0123456789
- - 0
- abcdefghijklmnopqrstuvwxyz
- - 1
- £©
- - 0
- µÃÃÃÃÃéöÿ
- - 1
- âââââââ â¢â¦â°â¢
- - 0
- ÅŠŸž
- - 2
- â¬
- - 0
- - αβγδÏαβγδÏ
+ - ÎÎÎÎΩαβγδÏ
- - 0
- - абвгдабвгд
+ - ÐÐÐÐÐабвгд
- - 1
- âââ
- - 0
- â
- - 2
- â§âªâ¡â
- - 0
- âââ¨â»â£
- - 0
- ââ¼ââââºâºâ
- - 0
- ï¬
- - 3
- �ââ
- - 0
- á¼ á¸Ó¥áºÉË
- - 3
- â
- - 0
- ×Ô±á
- - 0
- greetings
- - 0
- in
- - 0
- various
- - 0
- languages
- - 2
- ':'
- - 0
- hello
- - 0
- world
- - 2
- ','
- - 0
- καλημέÏα
- - 0
- κόÏμε
- - 2
- ','
- - 0
- ã³ã³ããã
- - 0
- box
- - 0
- drawing
- - 0
- alignment
- - 0
- tests
- - 2
- ':'
- - 0
- â
- - 0
- â
- - 0
- ââââ¦âââ
- - 0
- ââââ¬âââ
- - 0
- ââââ¬âââ®
- - 0
- ââââ¬âââ®
- - 0
- ââââ³âââ
- - 0
- ââââ
- - 0
- â·
- - 0
- â»
- - 0
- ââ¯â
- - 0
- ââ°â
- - 0
- â
- - 0
- â±â²â±â²â³â³â³
- - 0
- ââââ¨âââ
- - 0
- ââââ§âââ
- - 0
- ââââªâââ
- - 0
- âââââââ
- - 0
- âââââââ
- - 0
- ââââ
- - 0
- â¶â¼â´âºââ¸â â¼â¨
- - 0
- âââ¥
- - 0
- â
- - 0
- â²â±â²â±â³â³â³
- - 0
- âââ²
- - 0
- â±ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â¿
- - 0
- ââ
- - 0
- ââ
ââ
- - 0
- âµ
- - 0
- â¹
- - 0
- ââ·â
- - 0
- ââ¸â
- - 0
- â
- - 0
- â±â²â±â²â³â³â³
- - 0
- â â¡
- - 0
- â³
- - 0
- ââ£
- - 0
- ââ¢
- - 0
- ââ¤
- - 0
- ââ¼ââ¼ââ¼â¤
- - 0
- ââ«ââââ«â¤
- - 0
- â£â¿â¾â¼â¼â¿â«
- - 0
- ââââ
- - 0
- ââââ
- - 0
- â
- - 0
- ââ
â
â
- - 0
- â
- - 0
- â
- - 0
- â²â±â²â±â³â³â³
- - 0
- âââ±
- - 0
- â²ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â½
- - 0
- ââ
- - 0
- ââââââââ
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- ââââ¥âââ
- - 0
- ââââ¤âââ
- - 0
- ââââªâââ
- - 0
- âââââââ
- - 0
- âââââââ
- - 0
- ââââââââ
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- ââââ©âââ
- - 0
- ââââ´âââ
- - 0
- â°âââ´âââ¯
- - 0
- â°âââ´âââ¯
- - 0
- ââââ»âââ
- - 0
- ââââââ
- - 0
- ââââ
- - 0
- â
- - 0
- ââââ
- - 0
- â
- - 0
- âââââ
âââ
- - 0
- ââââââ
|
hinrik/hailo | eab11b8c5b94f0b52a609468e236a50bf71264d3 | Reformat the latest Changes entries | diff --git a/Changes b/Changes
index 6b65ecd..e5c619a 100644
--- a/Changes
+++ b/Changes
@@ -1,523 +1,523 @@
Revision history for Hailo
{{$NEXT}}
- - Word tokenizer: Improve tokenization of email addresses
-
- When using --train-fast, remove the "flushing cache" message when done
- - Word tokenizer: Use backspace instead of escape as a magic character
- when capitalizing text in multiple passes, since it's less likely to
- appear in tokens.
+ - Word tokenizer:
+ * Improve tokenization of email addresses
+ * Use backspace instead of escape as a magic character when
+ capitalizing text in multiple passes, since it's less likely to
+ appear in tokens.
0.69 2011-05-07 04:02:38
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
- Depend on MouseX::Getopt 0.33 to fix test failures
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
|
hinrik/hailo | 3f981311d829f5223bdd8d12e7d0b4e1bb8bccb7 | Allow double prefixes on IRC nicks | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index d85dcaf..a6cb20c 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,236 +1,236 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/\s/;
my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ (?: \. [A-Z]{2,4} )* /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
-my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
+my $IRC_NICK = qr/<(?: |[&~]?[@%+~&])?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $IRC_CHAN = qr/[#&+][^ \a\0\012\015,:]{1,199}/;
my $NUMERO = qr/#[0-9]+/;
my $CLOSE_TAG = qr{</[-\w]+>};
my $CASED_WORD = qr/$CLOSE_TAG|$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$IRC_CHAN|$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# This string is added to (and later removed from) the output string when
# capitalizing it in multiple passes. We use backspace, because that is
# unlikely to be in the input. This dirty approach can probably be replaced
# with regex grammars, but I haven't bothered to learn to use those.
my $SEPARATOR = "\x08";
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/$SEPARATOR\u$1$SEPARATOR/go;
$reply =~ s/$SEPARATOR$WORD_STRICT$SEPARATOR$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/$SEPARATOR//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 7986c42ac24bffc8679b0eaa4ce7e103cb18c1d8 | Add README.pod and cover_db to MANIFEST.SKIP | diff --git a/MANIFEST.SKIP b/MANIFEST.SKIP
index 7a55eb3..80e7eb7 100644
--- a/MANIFEST.SKIP
+++ b/MANIFEST.SKIP
@@ -1,15 +1,17 @@
^Hailo-
\.bak$
\.txt$
\.log$
\.patch$
\.diff$
\.db$
\.sql(?:ite)?$
^[^/\\]*\.trn$
\.brn$
\.#
^utils/developer/
^xt/
^RELEASE
^nytprof
+^README.pod$
+^cover_db/
|
hinrik/hailo | 20652105b3f19e16b14f7765382e2682a4b6ae9e | Mention tweetmix, remove dead HALBot link | diff --git a/lib/Hailo.pm b/lib/Hailo.pm
index 7fcb858..abe82de 100644
--- a/lib/Hailo.pm
+++ b/lib/Hailo.pm
@@ -95,554 +95,551 @@ my %has = (
},
);
for my $k (keys %has) {
my $name = $has{$k}->{name};
my $default = $has{$k}->{default};
my $method_class = "${k}_class";
my $method_args = "${k}_args";
# working classes
has "${k}_class" => (
isa => 'Str',
is => "rw",
default => $default,
($k ~~ 'tokenizer'
? (trigger => sub {
my ($self, $class) = @_;
$self->_custom_tokenizer_class(1);
})
: ())
);
# Object arguments
has "${k}_args" => (
documentation => "Arguments for the $name class",
isa => 'HashRef',
is => "ro",
default => sub { +{} },
);
# Working objects
has "_${k}" => (
does => "Hailo::Role::$name",
lazy_build => 1,
is => 'ro',
init_arg => undef,
);
# Generate the object itself
no strict 'refs';
*{"_build__${k}"} = sub {
my ($self) = @_;
my $obj = $self->_new_class(
$name,
$self->$method_class,
{
arguments => $self->$method_args,
($k ~~ [ qw< engine storage > ]
? (order => $self->order)
: ()),
($k ~~ [ qw< engine > ]
? (storage => $self->_storage)
: ()),
(($k ~~ [ qw< storage > ] and defined $self->brain)
? (
hailo => do {
require Scalar::Util;
Scalar::Util::weaken(my $s = $self);
my %callback = (
has_custom_order => sub { $s->_custom_order },
has_custom_tokenizer_class => sub { $s->_custom_tokenizer_class },
set_order => sub {
my ($db_order) = @_;
$s->order($db_order);
$s->_engine->order($db_order);
},
set_tokenizer_class => sub {
my ($db_tokenizer_class) = @_;
$s->tokenizer_class($db_tokenizer_class);
},
);
\%callback;
},
brain => $self->brain
)
: ()),
(($k ~~ [ qw< storage > ]
? (tokenizer_class => $self->tokenizer_class)
: ()))
},
);
return $obj;
};
}
sub _new_class {
my ($self, $type, $class, $args) = @_;
my $pkg;
if ($class =~ m[^\+(?<custom_plugin>.+)$]) {
$pkg = $+{custom_plugin};
} else {
my @plugins = @{ $self->PLUGINS };
# Be fuzzy about includes, e.g. DBD::SQLite or SQLite or sqlite will go
$pkg = first { / $type : .* : $class /ix }
sort { length $a <=> length $b }
@plugins;
unless ($pkg) {
local $" = ', ';
my @p = grep { /$type/ } @plugins;
die "Couldn't find a class name matching '$class' in plugins '@p'";
}
}
my ($success, $error) = try_load_class($pkg);
die $error if !$success;
return $pkg->new(%$args);
}
sub save {
my ($self, @args) = @_;
$self->_storage->save(@args);
return;
}
sub train {
my ($self, $input, $fast) = @_;
$self->_storage->start_training();
given ($input) {
# With STDIN
when (not ref and defined and $_ eq '-') {
die "You must provide STDIN when training from '-'" if $self->_is_interactive(*STDIN);
$self->_train_fh(*STDIN, $fast);
}
# With a filehandle
when (ref eq 'GLOB') {
$self->_train_fh($input, $fast);
}
# With a file
when (not ref) {
open my $fh, '<:encoding(utf8)', $input;
$self->_train_fh($fh, $fast, $input);
}
# With an Array
when (ref eq 'ARRAY') {
for my $line (@$input) {
$self->_learn_one($line, $fast);
$self->_engine->flush_cache if !$fast;
}
$self->_engine->flush_cache if $fast;
}
# With something naughty
default {
die "Unknown input: $input";
}
}
$self->_storage->stop_training();
return;
}
sub _train_fh {
my ($self, $fh, $fast) = @_;
while (my $line = <$fh>) {
chomp $line;
$self->_learn_one($line, $fast);
$self->_engine->flush_cache if !$fast;
}
$self->_engine->flush_cache if $fast;
return;
}
sub learn {
my ($self, $input) = @_;
my $inputs;
given ($input) {
when (not defined) {
die "Cannot learn from undef input";
}
when (not ref) {
$inputs = [$input];
}
# With an Array
when (ref eq 'ARRAY') {
$inputs = $input
}
default {
die "Unknown input: $input";
}
}
my $storage = $self->_storage;
$storage->start_learning();
$self->_learn_one($_) for @$inputs;
$storage->stop_learning();
return;
}
sub _learn_one {
my ($self, $input, $fast) = @_;
my $engine = $self->_engine;
my $tokens = $self->_tokenizer->make_tokens($input);
$fast ? $engine->learn_cached($tokens) : $engine->learn($tokens);
return;
}
sub learn_reply {
my ($self, $input) = @_;
$self->learn($input);
return $self->reply($input);
}
sub reply {
my ($self, $input) = @_;
my $storage = $self->_storage;
# start_training() hasn't been called so we can't guarentee that
# the storage has been engaged at this point. This must be called
# before ->_engine() is called anywhere to ensure that the
# lazy-loading in the engine works.
$storage->_engage() unless $storage->_engaged;
my $engine = $self->_engine;
my $tokenizer = $self->_tokenizer;
my $reply;
if (defined $input) {
my $tokens = $tokenizer->make_tokens($input);
$reply = $engine->reply($tokens);
}
else {
$reply = $engine->reply();
}
return unless defined $reply;
return $tokenizer->make_output($reply);
}
sub stats {
my ($self) = @_;
return $self->_storage->totals();
}
sub DEMOLISH {
my ($self) = @_;
$self->save() if blessed $self->{_storage} and $self->save_on_exit;
return;
}
sub _is_interactive {
require IO::Interactive;
return IO::Interactive::is_interactive();
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo - A pluggable Markov engine analogous to MegaHAL
=head1 SYNOPSIS
This is the synopsis for using Hailo as a module. See L<hailo> for
command-line invocation.
# Hailo requires Perl 5.10
use 5.010;
use Any::Moose;
use Hailo;
# Construct a new in-memory Hailo using the SQLite backend. See
# backend documentation for other options.
my $hailo = Hailo->new;
# Various ways to learn
my @train_this = ("I like big butts", "and I can not lie");
$hailo->learn(\@train_this);
$hailo->learn($_) for @train_this;
# Heavy-duty training interface. Backends may drop some safety
# features like journals or synchronous IO to train faster using
# this mode.
$hailo->train("megahal.trn");
$hailo->train($filehandle);
# Make the brain babble
say $hailo->reply("hello good sir.");
# Just say something at random
say $hailo->reply();
=head1 DESCRIPTION
Hailo is a fast and lightweight markov engine intended to replace
L<AI::MegaHAL|AI::MegaHAL>. It has a L<Mouse|Mouse> (or
L<Moose|Moose>) based core with pluggable
L<storage|Hailo::Role::Storage>, L<tokenizer|Hailo::Role::Tokenizer>
and L<engine|Hailo::Role::Engine> backends.
It is similar to MegaHAL in functionality, the main differences (with the
default backends) being better scalability, drastically less memory usage,
an improved tokenizer, and tidier output.
With this distribution, you can create, modify, and query Hailo brains. To
use Hailo in event-driven POE applications, you can use the
L<POE::Component::Hailo|POE::Component::Hailo> wrapper. One example is
L<POE::Component::IRC::Plugin::Hailo|POE::Component::IRC::Plugin::Hailo>,
which implements an IRC chat bot.
=head2 Etymology
I<Hailo> is a portmanteau of I<HAL> (as in MegaHAL) and
L<failo|http://identi.ca/failo>.
=head1 Backends
Hailo supports pluggable L<storage|Hailo::Role::Storage> and
L<tokenizer|Hailo::Role::Tokenizer> backends, it also supports a
pluggable L<UI|Hailo::Role::UI> backend which is used by the L<hailo>
command-line utility.
=head2 Storage
Hailo can currently store its data in either a
L<SQLite|Hailo::Storage::SQLite>,
L<PostgreSQL|Hailo::Storage::PostgreSQL> or
L<MySQL|Hailo::Storage::MySQL> database. Some NoSQL backends were
supported in earlier versions, but they were removed as they had no
redeeming quality.
SQLite is the primary target for Hailo. It's much faster and uses less
resources than the other two. It's highly recommended that you use it.
See L<Hailo::Storage/"Comparison of backends"> for benchmarks showing
how the various backends compare under different workloads, and how
you can create your own.
=head2 Tokenizer
By default Hailo will use L<the word
tokenizer|Hailo::Tokenizer::Words> to split up input by whitespace,
taking into account things like quotes, sentence terminators and more.
There's also a L<the character
tokenizer|Hailo::Tokenizer::Chars>. It's not generally useful for a
conversation bot but can be used to e.g. generate new words given a
list of existing words.
=head1 UPGRADING
Hailo makes no promises about brains generated with earlier versions
being compatable with future version and due to the way Hailo works
there's no practical way to make that promise. Learning in Hailo is
lossy so an accurate conversion is impossible.
If you're maintaining a Hailo brain that you want to keep using you
should save the input you trained it on and re-train when you upgrade.
Hailo is always going to lose information present in the input you
give it. How input tokens get split up and saved to the storage
backend depends on the version of the tokenizer being used and how
that input gets saved to the database.
For instance if an earlier version of Hailo tokenized C<"foo+bar">
simply as C<"foo+bar"> but a later version split that up into
C<"foo", "+", "bar">, then an input of C<"foo+bar are my favorite
metasyntactic variables"> wouldn't take into account the existing
C<"foo+bar"> string in the database.
Tokenizer changes like this would cause the brains to accumulate
garbage and would leave other parts in a state they wouldn't otherwise
have gotten into.
There have been more drastic changes to the database format itself in
the past.
Having said all that the database format and the tokenizer are
relatively stable. At the time of writing 0.33 is the latest release
and it's compatable with brains down to at least 0.17. If you're
upgrading and there isn't a big notice about the storage format being
incompatable in the F<Changes> file your old brains will probably work
just fine.
=head1 ATTRIBUTES
=head2 C<brain>
The name of the brain (file name, database name) to use as storage.
There is no default. Whether this gets used at all depends on the
storage backend, currently only SQLite uses it.
=head2 C<save_on_exit>
A boolean value indicating whether Hailo should save its state before
its object gets destroyed. This defaults to true and will simply call
L<save|/save> at C<DEMOLISH> time.
See L<Hailo::Storage::SQLite/"in_memory"> for how the SQLite backend
uses this option.
=head2 C<order>
The Markov order (chain length) you want to use for an empty brain.
The default is 2.
=head2 C<engine_class>
=head2 C<storage_class>
=head2 C<tokenizer_class>
=head2 C<ui_class>
A a short name name of the class we use for the engine, storage,
tokenizer or ui backends.
By default this is B<Default> for the engine, B<SQLite> for storage,
B<Words> for the tokenizer and B<ReadLine> for the UI. The UI backend
is only used by the L<hailo> command-line interface.
You can only specify the short name of one of the packages Hailo
itself ships with. If you need another class then just prefix the
package with a plus (Catalyst style), e.g. C<+My::Foreign::Tokenizer>.
=head2 C<engine_args>
=head2 C<storage_args>
=head2 C<tokenizer_args>
=head2 C<ui_args>
A C<HashRef> of arguments for engine/storage/tokenizer/ui
backends. See the documentation for the backends for what sort of
arguments they accept.
=head1 METHODS
=head2 C<new>
This is the constructor. It accepts the attributes specified in
L</ATTRIBUTES>.
=head2 C<learn>
Takes a string or an array reference of strings and learns from them.
=head2 C<train>
Takes a filename, filehandle or array reference and learns from all its
lines. If a filename is passed, the file is assumed to be UTF-8 encoded.
Unlike L<C<learn>|/learn>, this method sacrifices some safety (disables
the database journal, fsyncs, etc) for speed while learning.
You can prove a second parameter which, if true, will use aggressive
caching while training, which will speed things up considerably for large
inputs, but will take up quite a bit of memory.
=head2 C<reply>
Takes an optional line of text and generates a reply that might be relevant.
=head2 C<learn_reply>
Takes a string argument, learns from it, and generates a reply that
might be relevant. This is equivalent to calling L<learn|/learn>
followed by L<reply|/reply>.
=head2 C<save>
Tells the underlying storage backend to L<save its
state|Hailo::Role::Storage/"save">, any arguments to this method will
be passed as-is to the backend.
=head2 C<stats>
Takes no arguments. Returns the number of tokens, expressions, previous
token links and next token links.
=head1 SUPPORT
You can join the IRC channel I<#hailo> on FreeNode if you have questions.
=head1 BUGS
Bugs, feature requests and other issues are tracked in L<Hailo's RT on
rt.cpan.org|https://rt.cpan.org/Dist/Display.html?Name=Hailo>
=head1 SEE ALSO
=over
=item * L<POE::Component::Hailo> - A non-blocking POE wrapper around Hailo
=item * L<POE::Component::IRC::Plugin::Hailo> - A Hailo IRC bot plugin
=item * L<http://github.com/hinrik/failo> - Failo, an IRC bot that uses Hailo
=item * L<http://github.com/bingos/gumbybrain> - GumbyBRAIN, a more famous IRC bot that uses Hailo
=item * L<Hailo::UI::Web> - A L<Catalyst> and jQuery powered web
interface to Hailo available at L<hailo.nix.is|http://hailo.nix.is>
and as L<hailo-ui-web|http://github.com/avar/hailo-ui-web> on
L<GitHub|http://github.com>
-=item * L<HALBot> - Another L<Catalyst> Dojo powered web interface to
-Hailo available at L<bifurcat.es|http://bifurcat.es/> and as
-L<halbot-on-the-web|http://gitorious.org/halbot-on-the-web/halbot-on-the-web>
-at L<gitorious|http://gitorious.org>
+=item * L<tweetmix|http://www.tweetmix.me/>, a random tweet generator powered by Hailo
=item * L<http://github.com/pteichman/cobe> - cobe, a Python port of MegaHAL "inspired by the success of Hailo"
=back
=head1 LINKS
=over
=item * L<hailo.org|http://hailo.org> - Hailo's website
=item * L<http://bit.ly/hailo_rewrite_of_megahal> - Hailo: A Perl rewrite of
MegaHAL, A blog posting about the motivation behind Hailo
=item * L<http://blogs.perl.org/users/aevar_arnfjor_bjarmason/hailo/> -
More blog posts about Hailo on E<AElig>var ArnfjE<ouml>rE<eth>
Bjarmason's L<blogs.perl.org|http://blogs.perl.org> blog
=item * Hailo on L<freshmeat|http://freshmeat.net/projects/hailo> and
L<ohloh|https://www.ohloh.net/p/hailo>
=back
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | cefcde071e0682a7b60ddaa28c2279a2dbad8627 | Use backspace as the magic string, not escape | diff --git a/Changes b/Changes
index 48545d8..6b65ecd 100644
--- a/Changes
+++ b/Changes
@@ -1,520 +1,524 @@
Revision history for Hailo
{{$NEXT}}
- Word tokenizer: Improve tokenization of email addresses
- When using --train-fast, remove the "flushing cache" message when done
+ - Word tokenizer: Use backspace instead of escape as a magic character
+ when capitalizing text in multiple passes, since it's less likely to
+ appear in tokens.
+
0.69 2011-05-07 04:02:38
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
- Depend on MouseX::Getopt 0.33 to fix test failures
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 7d1efc4..d85dcaf 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,230 +1,236 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/\s/;
my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ (?: \. [A-Z]{2,4} )* /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $IRC_CHAN = qr/[#&+][^ \a\0\012\015,:]{1,199}/;
my $NUMERO = qr/#[0-9]+/;
my $CLOSE_TAG = qr{</[-\w]+>};
my $CASED_WORD = qr/$CLOSE_TAG|$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$IRC_CHAN|$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
+# This string is added to (and later removed from) the output string when
+# capitalizing it in multiple passes. We use backspace, because that is
+# unlikely to be in the input. This dirty approach can probably be replaced
+# with regex grammars, but I haven't bothered to learn to use those.
+my $SEPARATOR = "\x08";
+
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
- $reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
- $reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
- $reply =~ s/\x1B//go;
+ $reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/$SEPARATOR\u$1$SEPARATOR/go;
+ $reply =~ s/$SEPARATOR$WORD_STRICT$SEPARATOR$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
+ $reply =~ s/$SEPARATOR//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 6a9f469e5c64b7507d98b9f16205b0f28a9557db | When using --train-fast, remove the "flushing cache" message when done | diff --git a/Changes b/Changes
index 91b7422..48545d8 100644
--- a/Changes
+++ b/Changes
@@ -1,518 +1,520 @@
Revision history for Hailo
{{$NEXT}}
- Word tokenizer: Improve tokenization of email addresses
+ - When using --train-fast, remove the "flushing cache" message when done
+
0.69 2011-05-07 04:02:38
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
- Depend on MouseX::Getopt 0.33 to fix test failures
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
diff --git a/lib/Hailo/Command.pm b/lib/Hailo/Command.pm
index ba8d8fe..be09443 100644
--- a/lib/Hailo/Command.pm
+++ b/lib/Hailo/Command.pm
@@ -1,451 +1,453 @@
package Hailo::Command;
use 5.010;
use Any::Moose;
use Any::Moose 'X::Getopt';
use Any::Moose 'X::StrictConstructor';
use namespace::clean -except => 'meta';
extends 'Hailo';
with any_moose('X::Getopt::Dashes');
## Our internal Getopts method that Hailo.pm doesn't care about.
has help_flag => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'h',
cmd_flag => 'help',
isa => 'Bool',
is => 'ro',
default => 0,
documentation => "You're soaking it in",
);
has _go_version => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'v',
cmd_flag => 'version',
documentation => 'Print version and exit',
isa => 'Bool',
is => 'ro',
);
has _go_examples => (
traits => [ qw/ Getopt / ],
cmd_flag => 'examples',
documentation => 'Print examples along with the help message',
isa => 'Bool',
is => 'ro',
);
has _go_progress => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'p',
cmd_flag => 'progress',
documentation => 'Display progress during the import',
isa => 'Bool',
is => 'ro',
default => sub {
my ($self) = @_;
$self->_is_interactive();
},
);
has _go_learn => (
traits => [ qw/ Getopt / ],
cmd_aliases => "l",
cmd_flag => "learn",
documentation => "Learn from STRING",
isa => 'Str',
is => "ro",
);
has _go_learn_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "L",
cmd_flag => "learn-reply",
documentation => "Learn from STRING and reply to it",
isa => 'Str',
is => "ro",
);
has _go_train => (
traits => [ qw/ Getopt / ],
cmd_aliases => "t",
cmd_flag => "train",
documentation => "Learn from all the lines in FILE, use - for STDIN",
isa => 'Str',
is => "ro",
);
has _go_train_fast => (
traits => [ qw/ Getopt / ],
cmd_aliases => "f",
cmd_flag => "train-fast",
documentation => "Train with aggressive caching (memory-hungry!)",
isa => 'Str',
is => "ro",
);
has _go_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "r",
cmd_flag => "reply",
documentation => "Reply to STRING",
isa => 'Str',
is => "ro",
);
has _go_random_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "R",
cmd_flag => "random-reply",
documentation => "Like --reply but takes no STRING; Babble at random",
isa => 'Bool',
is => "ro",
);
has _go_stats => (
traits => [ qw/ Getopt / ],
cmd_aliases => "s",
cmd_flag => "stats",
documentation => "Print statistics about the brain",
isa => 'Bool',
is => "ro",
);
## Things we have to pass to Hailo.pm via triggers when they're set
has _go_autosave => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'a',
cmd_flag => 'autosave',
documentation => 'Save the brain on exit (on by default)',
isa => 'Bool',
is => 'rw',
trigger => sub {
my ($self, $bool) = @_;
$self->save_on_exit($bool);
},
);
has _go_order => (
traits => [ qw/ Getopt / ],
cmd_aliases => "o",
cmd_flag => "order",
documentation => "Markov order; How deep the rabbit hole goes",
isa => 'Int',
is => "rw",
trigger => sub {
my ($self, $order) = @_;
$self->order($order);
},
);
has _go_brain => (
traits => [ qw/ Getopt / ],
cmd_aliases => "b",
cmd_flag => "brain",
documentation => "Load/save brain to/from FILE",
isa => 'Str',
is => "ro",
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
# working classes
has _go_engine_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "E",
cmd_flag => "engine",
isa => 'Str',
is => "rw",
documentation => "Use engine CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->engine_class($class);
},
);
has _go_storage_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "S",
cmd_flag => "storage",
isa => 'Str',
is => "rw",
documentation => "Use storage CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->storage_class($class);
},
);
has _go_tokenizer_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "T",
cmd_flag => "tokenizer",
isa => 'Str',
is => "rw",
documentation => "Use tokenizer CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->tokenizer_class($class);
},
);
has _go_ui_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "u",
cmd_flag => "ui",
isa => 'Str',
is => "rw",
documentation => "Use UI CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->ui_class($class);
},
);
# Stop Hailo from polluting our command-line interface
for (qw/ save_on_exit order brain /, map { qq[${_}_class] } qw/ engine storage tokenizer ui /) {
has "+$_" => (
traits => [ qw/ NoGetopt / ],
);
}
# Check validity of options
before run => sub {
my ($self) = @_;
if (not $self->_storage->ready and
(defined $self->_go_reply or
defined $self->_go_train or
defined $self->_go_train_fast or
defined $self->_go_stats or
defined $self->_go_learn or
defined $self->_go_learn_reply or
defined $self->_go_random_reply)) {
# TODO: Make this spew out the --help reply just like hailo
# with invalid options does usually, but only if run via
# ->new_with_options
die "To reply/train/learn/stat you must specify options to initialize your storage backend\n";
}
if (defined $self->_go_train and defined $self->_go_train_fast) {
die "You can only specify one of --train and --train-fast\n";
}
return;
};
sub run {
my ($self) = @_;
if ($self->_go_version) {
# Munging strictness because we don't have a version from a
# Git checkout. Dist::Zilla provides it.
no strict 'vars';
my $version = $VERSION // 'dev-git';
say "hailo $version";
return;
}
if ($self->_is_interactive() and
$self->_storage->ready and
not defined $self->_go_train and
not defined $self->_go_train_fast and
not defined $self->_go_learn and
not defined $self->_go_reply and
not defined $self->_go_learn_reply and
not defined $self->_go_stats and
not defined $self->_go_random_reply) {
$self->_ui->run($self);
}
$self->train($self->_go_train) if defined $self->_go_train;
$self->train($self->_go_train_fast, 1) if defined $self->_go_train_fast;
$self->learn($self->_go_learn) if defined $self->_go_learn;
if (defined $self->_go_learn_reply) {
my $answer = $self->learn_reply($self->_go_learn_reply);
say $answer // "I don't know enough to answer you yet.";
}
if (defined $self->_go_random_reply) {
my $answer = $self->reply();
say $answer // "I don't know enough to answer you yet.";
}
elsif (defined $self->_go_reply) {
my $answer = $self->reply($self->_go_reply);
say $answer // "I don't know enough to answer you yet.";
}
if ($self->_go_stats) {
my ($tok, $ex, $prev, $next) = $self->stats();
my $order = $self->_storage->order;
say "Tokens: $tok";
say "Expression length: $order tokens";
say "Expressions: $ex";
say "Links to preceding tokens: $prev";
say "Links to following tokens: $next";
}
return;
}
override _train_fh => sub {
my ($self, $fh, $fast, $filename) = @_;
if ($self->_go_progress and $self->_is_interactive) {
$self->train_progress($fh, $fast, $filename);
} else {
super();
}
};
before train_progress => sub {
require Term::Sk;
require File::CountLines;
File::CountLines->import('count_lines');
require Time::HiRes;
Time::HiRes->import(qw(gettimeofday tv_interval));
return;
};
sub train_progress {
my ($self, $fh, $fast, $filename) = @_;
my $lines = count_lines($filename);
my $progress = Term::Sk->new('%d Elapsed: %8t %21b %4p %2d (%c lines of %m)', {
# Start at line 1, not 0
base => 1,
target => $lines,
# Every 0.1 seconds for long files
freq => ($lines < 10_000 ? 10 : 'd'),
# Override Term::Sk's default 100_000 to 100,000
commify => sub {
my $int = shift;
$int = reverse $int;
$int =~ s/(\d{3})(?=\d)(?!\d*\.)/$1,/g;
$int = reverse $int;
return $int;
},
}) or die "Error in Term::Sk->new: (code $Term::Sk::errcode) $Term::Sk::errmsg";
my $next_update = 0;
my $start_time = [gettimeofday()];
my $i = 0; while (my $line = <$fh>) {
$i++;
chomp $line;
$self->_learn_one($line, $fast);
$self->_engine->flush_cache if !$fast;
$progress->up;
}
$progress->close;
if ($fast) {
- print "Flushing cache (this may take a while for large inputs)\n";
+ my $msg = "Flushing cache (this may take a while for large inputs)";
+ syswrite STDOUT, $msg;
$self->_engine->flush_cache;
+ print "\010" x length $msg;
}
my $elapsed = tv_interval($start_time);
say sprintf "Trained from %d lines in %.2f seconds; %.2f lines/s", $i, $elapsed, ($i / $elapsed);
return;
}
# --i--do-not-exist
sub _getopt_spec_exception { goto &_getopt_full_usage }
# --help
sub _getopt_full_usage {
my ($self, $usage, $plain_str) = @_;
# If called from _getopt_spec_exception we get "Unknown option: foo"
my $warning = ref $usage eq 'ARRAY' ? $usage->[0] : undef;
my ($use, $options) = do {
# $plain_str under _getopt_spec_exception
my $out = $plain_str // $usage->text;
# The default getopt order sucks, use reverse sort order
chomp(my @out = split /^/, $out);
my $opt = join "\n", sort { $b cmp $a } @out[1 .. $#out];
($out[0], $opt);
};
my $synopsis = do {
require Pod::Usage;
my $out;
open my $fh, '>', \$out;
no warnings 'once';
my $hailo = File::Spec->catfile($Hailo::Command::HERE_MOMMY, 'hailo');
# Try not to fail on Win32 or other odd systems which might have hailo.pl not hailo
$hailo = ((glob("$hailo*"))[0]) unless -f $hailo;
Pod::Usage::pod2usage(
-input => $hailo,
-sections => 'SYNOPSIS',
-output => $fh,
-exitval => 'noexit',
);
close $fh;
$out =~ s/\n+$//s;
$out =~ s/^Usage:/examples:/;
$out;
};
# Unknown option provided
print $warning if $warning;
print <<"USAGE";
$use
$options
\n\tNote: All input/output and files are assumed to be UTF-8 encoded.
USAGE
# Hack: We can't get at our object from here so we have to inspect
# @ARGV directly.
say "\n", $synopsis if "@ARGV" ~~ /--examples/;
exit 1;
}
__PACKAGE__->meta->make_immutable;
=head1 NAME
Hailo::Command - Class for the L<hailo> command-line interface to L<Hailo>
=head1 DESCRIPTION
This is an internal class L<hailo> uses for its command-line
interface. See L<Hailo> for the public interface.
=head1 PRIVATE METHODS
=head2 C<run>
Run Hailo in accordance with the the attributes that were passed to
it, this method is called by the L<hailo> command-line utility and the
Hailo test suite, its behavior is subject to change.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | d5ec20f06d0c4b7da88eabdb6a749be3c3e5d2fb | Use consistent formatting in Changes | diff --git a/Changes b/Changes
index c8291ec..91b7422 100644
--- a/Changes
+++ b/Changes
@@ -1,515 +1,516 @@
Revision history for Hailo
{{$NEXT}}
+
- Word tokenizer: Improve tokenization of email addresses
0.69 2011-05-07 04:02:38
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
- Depend on MouseX::Getopt 0.33 to fix test failures
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
|
hinrik/hailo | 8a59ac2e603d1a2bfba79bc1e7110320ab7f6ed0 | Fix typo in comment | diff --git a/lib/Hailo/Command.pm b/lib/Hailo/Command.pm
index a6dec41..ba8d8fe 100644
--- a/lib/Hailo/Command.pm
+++ b/lib/Hailo/Command.pm
@@ -1,451 +1,451 @@
package Hailo::Command;
use 5.010;
use Any::Moose;
use Any::Moose 'X::Getopt';
use Any::Moose 'X::StrictConstructor';
use namespace::clean -except => 'meta';
extends 'Hailo';
with any_moose('X::Getopt::Dashes');
## Our internal Getopts method that Hailo.pm doesn't care about.
has help_flag => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'h',
cmd_flag => 'help',
isa => 'Bool',
is => 'ro',
default => 0,
documentation => "You're soaking it in",
);
has _go_version => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'v',
cmd_flag => 'version',
documentation => 'Print version and exit',
isa => 'Bool',
is => 'ro',
);
has _go_examples => (
traits => [ qw/ Getopt / ],
cmd_flag => 'examples',
documentation => 'Print examples along with the help message',
isa => 'Bool',
is => 'ro',
);
has _go_progress => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'p',
cmd_flag => 'progress',
documentation => 'Display progress during the import',
isa => 'Bool',
is => 'ro',
default => sub {
my ($self) = @_;
$self->_is_interactive();
},
);
has _go_learn => (
traits => [ qw/ Getopt / ],
cmd_aliases => "l",
cmd_flag => "learn",
documentation => "Learn from STRING",
isa => 'Str',
is => "ro",
);
has _go_learn_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "L",
cmd_flag => "learn-reply",
documentation => "Learn from STRING and reply to it",
isa => 'Str',
is => "ro",
);
has _go_train => (
traits => [ qw/ Getopt / ],
cmd_aliases => "t",
cmd_flag => "train",
documentation => "Learn from all the lines in FILE, use - for STDIN",
isa => 'Str',
is => "ro",
);
has _go_train_fast => (
traits => [ qw/ Getopt / ],
cmd_aliases => "f",
cmd_flag => "train-fast",
documentation => "Train with aggressive caching (memory-hungry!)",
isa => 'Str',
is => "ro",
);
has _go_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "r",
cmd_flag => "reply",
documentation => "Reply to STRING",
isa => 'Str',
is => "ro",
);
has _go_random_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "R",
cmd_flag => "random-reply",
documentation => "Like --reply but takes no STRING; Babble at random",
isa => 'Bool',
is => "ro",
);
has _go_stats => (
traits => [ qw/ Getopt / ],
cmd_aliases => "s",
cmd_flag => "stats",
documentation => "Print statistics about the brain",
isa => 'Bool',
is => "ro",
);
## Things we have to pass to Hailo.pm via triggers when they're set
has _go_autosave => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'a',
cmd_flag => 'autosave',
documentation => 'Save the brain on exit (on by default)',
isa => 'Bool',
is => 'rw',
trigger => sub {
my ($self, $bool) = @_;
$self->save_on_exit($bool);
},
);
has _go_order => (
traits => [ qw/ Getopt / ],
cmd_aliases => "o",
cmd_flag => "order",
documentation => "Markov order; How deep the rabbit hole goes",
isa => 'Int',
is => "rw",
trigger => sub {
my ($self, $order) = @_;
$self->order($order);
},
);
has _go_brain => (
traits => [ qw/ Getopt / ],
cmd_aliases => "b",
cmd_flag => "brain",
documentation => "Load/save brain to/from FILE",
isa => 'Str',
is => "ro",
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
# working classes
has _go_engine_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "E",
cmd_flag => "engine",
isa => 'Str',
is => "rw",
documentation => "Use engine CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->engine_class($class);
},
);
has _go_storage_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "S",
cmd_flag => "storage",
isa => 'Str',
is => "rw",
documentation => "Use storage CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->storage_class($class);
},
);
has _go_tokenizer_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "T",
cmd_flag => "tokenizer",
isa => 'Str',
is => "rw",
documentation => "Use tokenizer CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->tokenizer_class($class);
},
);
has _go_ui_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "u",
cmd_flag => "ui",
isa => 'Str',
is => "rw",
documentation => "Use UI CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->ui_class($class);
},
);
# Stop Hailo from polluting our command-line interface
for (qw/ save_on_exit order brain /, map { qq[${_}_class] } qw/ engine storage tokenizer ui /) {
has "+$_" => (
traits => [ qw/ NoGetopt / ],
);
}
# Check validity of options
before run => sub {
my ($self) = @_;
if (not $self->_storage->ready and
(defined $self->_go_reply or
defined $self->_go_train or
defined $self->_go_train_fast or
defined $self->_go_stats or
defined $self->_go_learn or
defined $self->_go_learn_reply or
defined $self->_go_random_reply)) {
# TODO: Make this spew out the --help reply just like hailo
# with invalid options does usually, but only if run via
# ->new_with_options
die "To reply/train/learn/stat you must specify options to initialize your storage backend\n";
}
if (defined $self->_go_train and defined $self->_go_train_fast) {
die "You can only specify one of --train and --train-fast\n";
}
return;
};
sub run {
my ($self) = @_;
if ($self->_go_version) {
# Munging strictness because we don't have a version from a
# Git checkout. Dist::Zilla provides it.
no strict 'vars';
my $version = $VERSION // 'dev-git';
say "hailo $version";
return;
}
if ($self->_is_interactive() and
$self->_storage->ready and
not defined $self->_go_train and
not defined $self->_go_train_fast and
not defined $self->_go_learn and
not defined $self->_go_reply and
not defined $self->_go_learn_reply and
not defined $self->_go_stats and
not defined $self->_go_random_reply) {
$self->_ui->run($self);
}
$self->train($self->_go_train) if defined $self->_go_train;
$self->train($self->_go_train_fast, 1) if defined $self->_go_train_fast;
$self->learn($self->_go_learn) if defined $self->_go_learn;
if (defined $self->_go_learn_reply) {
my $answer = $self->learn_reply($self->_go_learn_reply);
say $answer // "I don't know enough to answer you yet.";
}
if (defined $self->_go_random_reply) {
my $answer = $self->reply();
say $answer // "I don't know enough to answer you yet.";
}
elsif (defined $self->_go_reply) {
my $answer = $self->reply($self->_go_reply);
say $answer // "I don't know enough to answer you yet.";
}
if ($self->_go_stats) {
my ($tok, $ex, $prev, $next) = $self->stats();
my $order = $self->_storage->order;
say "Tokens: $tok";
say "Expression length: $order tokens";
say "Expressions: $ex";
say "Links to preceding tokens: $prev";
say "Links to following tokens: $next";
}
return;
}
override _train_fh => sub {
my ($self, $fh, $fast, $filename) = @_;
if ($self->_go_progress and $self->_is_interactive) {
$self->train_progress($fh, $fast, $filename);
} else {
super();
}
};
before train_progress => sub {
require Term::Sk;
require File::CountLines;
File::CountLines->import('count_lines');
require Time::HiRes;
Time::HiRes->import(qw(gettimeofday tv_interval));
return;
};
sub train_progress {
my ($self, $fh, $fast, $filename) = @_;
my $lines = count_lines($filename);
my $progress = Term::Sk->new('%d Elapsed: %8t %21b %4p %2d (%c lines of %m)', {
# Start at line 1, not 0
base => 1,
target => $lines,
# Every 0.1 seconds for long files
freq => ($lines < 10_000 ? 10 : 'd'),
- # Override Term::Sk's default 100_100 to 100,000
+ # Override Term::Sk's default 100_000 to 100,000
commify => sub {
my $int = shift;
$int = reverse $int;
$int =~ s/(\d{3})(?=\d)(?!\d*\.)/$1,/g;
$int = reverse $int;
return $int;
},
}) or die "Error in Term::Sk->new: (code $Term::Sk::errcode) $Term::Sk::errmsg";
my $next_update = 0;
my $start_time = [gettimeofday()];
my $i = 0; while (my $line = <$fh>) {
$i++;
chomp $line;
$self->_learn_one($line, $fast);
$self->_engine->flush_cache if !$fast;
$progress->up;
}
$progress->close;
if ($fast) {
print "Flushing cache (this may take a while for large inputs)\n";
$self->_engine->flush_cache;
}
my $elapsed = tv_interval($start_time);
say sprintf "Trained from %d lines in %.2f seconds; %.2f lines/s", $i, $elapsed, ($i / $elapsed);
return;
}
# --i--do-not-exist
sub _getopt_spec_exception { goto &_getopt_full_usage }
# --help
sub _getopt_full_usage {
my ($self, $usage, $plain_str) = @_;
# If called from _getopt_spec_exception we get "Unknown option: foo"
my $warning = ref $usage eq 'ARRAY' ? $usage->[0] : undef;
my ($use, $options) = do {
# $plain_str under _getopt_spec_exception
my $out = $plain_str // $usage->text;
# The default getopt order sucks, use reverse sort order
chomp(my @out = split /^/, $out);
my $opt = join "\n", sort { $b cmp $a } @out[1 .. $#out];
($out[0], $opt);
};
my $synopsis = do {
require Pod::Usage;
my $out;
open my $fh, '>', \$out;
no warnings 'once';
my $hailo = File::Spec->catfile($Hailo::Command::HERE_MOMMY, 'hailo');
# Try not to fail on Win32 or other odd systems which might have hailo.pl not hailo
$hailo = ((glob("$hailo*"))[0]) unless -f $hailo;
Pod::Usage::pod2usage(
-input => $hailo,
-sections => 'SYNOPSIS',
-output => $fh,
-exitval => 'noexit',
);
close $fh;
$out =~ s/\n+$//s;
$out =~ s/^Usage:/examples:/;
$out;
};
# Unknown option provided
print $warning if $warning;
print <<"USAGE";
$use
$options
\n\tNote: All input/output and files are assumed to be UTF-8 encoded.
USAGE
# Hack: We can't get at our object from here so we have to inspect
# @ARGV directly.
say "\n", $synopsis if "@ARGV" ~~ /--examples/;
exit 1;
}
__PACKAGE__->meta->make_immutable;
=head1 NAME
Hailo::Command - Class for the L<hailo> command-line interface to L<Hailo>
=head1 DESCRIPTION
This is an internal class L<hailo> uses for its command-line
interface. See L<Hailo> for the public interface.
=head1 PRIVATE METHODS
=head2 C<run>
Run Hailo in accordance with the the attributes that were passed to
it, this method is called by the L<hailo> command-line utility and the
Hailo test suite, its behavior is subject to change.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 8fc304dc9c001eaf7fb395ee8af6108c2edf4aab | Word tokenizer: Improve tokenization of email addresses | diff --git a/Changes b/Changes
index 38bbe49..c8291ec 100644
--- a/Changes
+++ b/Changes
@@ -1,515 +1,516 @@
Revision history for Hailo
{{$NEXT}}
+ - Word tokenizer: Improve tokenization of email addresses
0.69 2011-05-07 04:02:38
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
- Depend on MouseX::Getopt 0.33 to fix test failures
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 7cc65fd..7d1efc4 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,230 +1,230 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/\s/;
my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
-my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
+my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ (?: \. [A-Z]{2,4} )* /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $IRC_CHAN = qr/[#&+][^ \a\0\012\015,:]{1,199}/;
my $NUMERO = qr/#[0-9]+/;
my $CLOSE_TAG = qr{</[-\w]+>};
my $CASED_WORD = qr/$CLOSE_TAG|$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$IRC_CHAN|$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index e0ad92a..062e52b 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,527 +1,532 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
- [qw<tumi @ foo>],
+ [qw<tumi@foo>],
'tumi@foo',
],
+ [
+ '[email protected]',
+ [qw<[email protected]>],
+ '[email protected]',
+ ],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50>, ',', 'no', ',', 'wait', ',', qw<it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
[
'failo is #1',
[qw<failo is>, '#1'],
'Failo is #1.',
],
[
'We are in #perl',
[qw<we are in>, '#perl'],
'We are in #perl.',
],
[
'</foo>',
[qw{</foo>}],
'</foo>',
],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 03c1393f5df6fbbbe1fc07a69696b947a18ad73f | Shorten this | diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
index 9354f37..9451bac 100644
--- a/lib/Hailo/Engine/Scored.pm
+++ b/lib/Hailo/Engine/Scored.pm
@@ -1,279 +1,278 @@
package Hailo::Engine::Scored;
use 5.010;
use Any::Moose;
use List::Util qw<sum>;
use List::MoreUtils qw<any>;
use Time::HiRes qw<gettimeofday tv_interval>;
extends 'Hailo::Engine::Default';
after BUILD => sub {
my ($self) = @_;
my %args = $self->arguments;
if (defined $args{iterations} && defined $args{interval}) {
die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
}
return;
};
sub reply {
my $self = shift;
my $tokens = shift // [];
# see if we recognize any of the input tokens
my $token_cache = $self->_resolve_input_tokens($tokens);
my @input_token_ids = keys %$token_cache;
my @token_counts;
# let's select potential pivot tokens from the input
if (keys %$token_cache) {
# we only want the ones with normal spacing (usually normal words)
@token_counts = map {
$token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
} keys %$token_cache;
}
my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
my @started = gettimeofday();
my $iterations = 0;
my $done;
my %args = $self->arguments;
if (!defined $args{iterations} && !defined $args{interval}) {
# construct replies for half a second by default
$args{interval} = 0.5;
}
if (defined $args{iterations}) {
$done = sub {
return 1 if $iterations == $args{iterations};
};
}
else {
$done = sub {
my $elapsed = tv_interval(\@started, [gettimeofday]);
return 1 if $elapsed >= $args{interval};
};
}
my (%link_cache, %expr_cache, $best_score, $best_reply);
while (1) {
$iterations++;
my $reply = $self->_generate_reply($token_probs, \%expr_cache);
return if !defined $reply; # we don't know any expressions yet
my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
last if $done->();
next;
}
if (!defined $best_score || $score > $best_score) {
$best_score = $score;
$best_reply = $reply;
}
last if $done->();
}
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @$best_reply;
return \@output;
}
# Calculate the probability we wish to pick each token as the pivot.
# This uses -log2(p) as a method for inverting token probability,
# ensuring that our rarer tokens are picked more often.
sub _get_pivot_probabilites {
my ($self, $token_counts) = @_;
return [] if !@$token_counts;
return [[$token_counts->[0], 1]] if @$token_counts == 1;
# calculate the (non-normalized) probability we want each to occur
my $count_sum = sum(map { $_->[1] } @$token_counts);
my $p = [];
my $p_sum = 0;
for my $token_count (map { $_->[1] } @$token_counts) {
my $token_p = -log(($token_count/$count_sum))/log(2);
push @$p, $token_p;
$p_sum += $token_p;
}
# normalize the probabilities
my @probs = map {
[$token_counts->[$_], $p->[$_] / $p_sum];
} 0..$#{ $token_counts };
return \@probs;
}
sub _generate_reply {
my ($self, $token_probs, $expr_cache) = @_;
my ($pivot_expr_id, @token_ids) = @_;
if (@$token_probs) {
my $pivot_token_id = $self->_choose_pivot($token_probs);
($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
}
else {
($pivot_expr_id, @token_ids) = $self->_random_expr();
return if !defined $pivot_expr_id; # no expressions in the database
}
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
return \@token_ids;
}
sub _evaluate_reply {
my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
my $order = $self->order;
my $score = 0;
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $next_token_id = $reply_token_ids->[$idx];
if (any { $_ == $next_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
my $key = join('_', @expr)."-$next_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $prev_token_id = $reply_token_ids->[$idx];
if (any { $_ == $prev_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
my $key = "$prev_token_id-".join('_', @expr);
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
# Prefer shorter replies. This behavior is present but not
# documented in recent MegaHAL.
my $score_divider = 1;
if (@$reply_token_ids >= 8) {
- $score_divider = sqrt(@$reply_token_ids - 1);
+ $score /= sqrt(@$reply_token_ids - 1);
}
elsif (@$reply_token_ids >= 16) {
- $score_divider = @$reply_token_ids;
+ $score /= @$reply_token_ids;
}
- $score = $score / $score_divider;
return $score;
}
sub _expr_token_probability {
my ($self, $pos, $expr, $token_id) = @_;
my $order = $self->order;
my $expr_id = $self->_expr_id_add($expr);
$self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
return 0 if !$expr2token;
$self->{"_sth_${pos}_token_links"}->execute($expr_id);
my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
return $expr2token / $expr2all;
}
sub _choose_pivot {
my ($self, $token_probs) = @_;
my $random = rand;
my $p = 0;
for my $token (@$token_probs) {
$p += $token->[1];
return $token->[0][0] if $p > $random;
}
return;
}
sub _too_similar {
my ($self, $input_token_ids, $reply_token_ids) = @_;
my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
for my $reply_token_id (@$reply_token_ids) {
return if !$input_token_ids{$reply_token_id};
}
return 1;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>. It is inherits from
L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
C<reply> method.
It generates multiple replies and applies a scoring algorithm to them, then
returns the best one, similar to MegaHAL.
=head1 ATTRIBUTES
=head2 C<engine_args>
This is a hash reference which can have the following keys:
=head3 C<iterations>
The number of replies to generate before returning the best one.
=head3 C<interval>
The time (in seconds) to spend on generating replies before returning the
best one.
You can not specify both C<iterations> and C<interval> at the same time. If
neither is specified, a default C<interval> of 0.5 seconds will be used.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
This module was based on code from Peter Teichman's Cobe project.
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | f89b23aad6798a8698d14f024a369aae1b4a4cfc | Remove unused variable | diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
index 85d0d45..9354f37 100644
--- a/lib/Hailo/Engine/Scored.pm
+++ b/lib/Hailo/Engine/Scored.pm
@@ -1,280 +1,279 @@
package Hailo::Engine::Scored;
use 5.010;
use Any::Moose;
use List::Util qw<sum>;
use List::MoreUtils qw<any>;
use Time::HiRes qw<gettimeofday tv_interval>;
extends 'Hailo::Engine::Default';
after BUILD => sub {
my ($self) = @_;
my %args = $self->arguments;
if (defined $args{iterations} && defined $args{interval}) {
die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
}
return;
};
sub reply {
my $self = shift;
my $tokens = shift // [];
# see if we recognize any of the input tokens
my $token_cache = $self->_resolve_input_tokens($tokens);
my @input_token_ids = keys %$token_cache;
my @token_counts;
# let's select potential pivot tokens from the input
if (keys %$token_cache) {
# we only want the ones with normal spacing (usually normal words)
@token_counts = map {
$token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
} keys %$token_cache;
}
my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
my @started = gettimeofday();
my $iterations = 0;
my $done;
my %args = $self->arguments;
if (!defined $args{iterations} && !defined $args{interval}) {
# construct replies for half a second by default
$args{interval} = 0.5;
}
if (defined $args{iterations}) {
$done = sub {
return 1 if $iterations == $args{iterations};
};
}
else {
$done = sub {
my $elapsed = tv_interval(\@started, [gettimeofday]);
return 1 if $elapsed >= $args{interval};
};
}
my (%link_cache, %expr_cache, $best_score, $best_reply);
while (1) {
$iterations++;
my $reply = $self->_generate_reply($token_probs, \%expr_cache);
return if !defined $reply; # we don't know any expressions yet
my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
last if $done->();
next;
}
if (!defined $best_score || $score > $best_score) {
$best_score = $score;
$best_reply = $reply;
}
last if $done->();
}
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @$best_reply;
return \@output;
}
# Calculate the probability we wish to pick each token as the pivot.
# This uses -log2(p) as a method for inverting token probability,
# ensuring that our rarer tokens are picked more often.
sub _get_pivot_probabilites {
my ($self, $token_counts) = @_;
return [] if !@$token_counts;
return [[$token_counts->[0], 1]] if @$token_counts == 1;
# calculate the (non-normalized) probability we want each to occur
my $count_sum = sum(map { $_->[1] } @$token_counts);
my $p = [];
my $p_sum = 0;
for my $token_count (map { $_->[1] } @$token_counts) {
my $token_p = -log(($token_count/$count_sum))/log(2);
push @$p, $token_p;
$p_sum += $token_p;
}
# normalize the probabilities
my @probs = map {
[$token_counts->[$_], $p->[$_] / $p_sum];
} 0..$#{ $token_counts };
return \@probs;
}
sub _generate_reply {
my ($self, $token_probs, $expr_cache) = @_;
my ($pivot_expr_id, @token_ids) = @_;
if (@$token_probs) {
my $pivot_token_id = $self->_choose_pivot($token_probs);
($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
}
else {
($pivot_expr_id, @token_ids) = $self->_random_expr();
return if !defined $pivot_expr_id; # no expressions in the database
}
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
return \@token_ids;
}
sub _evaluate_reply {
my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
my $order = $self->order;
my $score = 0;
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $next_token_id = $reply_token_ids->[$idx];
if (any { $_ == $next_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
my $key = join('_', @expr)."-$next_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $prev_token_id = $reply_token_ids->[$idx];
if (any { $_ == $prev_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
my $key = "$prev_token_id-".join('_', @expr);
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
# Prefer shorter replies. This behavior is present but not
# documented in recent MegaHAL.
my $score_divider = 1;
- my $n_tokens = @$reply_token_ids;
if (@$reply_token_ids >= 8) {
$score_divider = sqrt(@$reply_token_ids - 1);
}
elsif (@$reply_token_ids >= 16) {
$score_divider = @$reply_token_ids;
}
$score = $score / $score_divider;
return $score;
}
sub _expr_token_probability {
my ($self, $pos, $expr, $token_id) = @_;
my $order = $self->order;
my $expr_id = $self->_expr_id_add($expr);
$self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
return 0 if !$expr2token;
$self->{"_sth_${pos}_token_links"}->execute($expr_id);
my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
return $expr2token / $expr2all;
}
sub _choose_pivot {
my ($self, $token_probs) = @_;
my $random = rand;
my $p = 0;
for my $token (@$token_probs) {
$p += $token->[1];
return $token->[0][0] if $p > $random;
}
return;
}
sub _too_similar {
my ($self, $input_token_ids, $reply_token_ids) = @_;
my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
for my $reply_token_id (@$reply_token_ids) {
return if !$input_token_ids{$reply_token_id};
}
return 1;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>. It is inherits from
L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
C<reply> method.
It generates multiple replies and applies a scoring algorithm to them, then
returns the best one, similar to MegaHAL.
=head1 ATTRIBUTES
=head2 C<engine_args>
This is a hash reference which can have the following keys:
=head3 C<iterations>
The number of replies to generate before returning the best one.
=head3 C<interval>
The time (in seconds) to spend on generating replies before returning the
best one.
You can not specify both C<iterations> and C<interval> at the same time. If
neither is specified, a default C<interval> of 0.5 seconds will be used.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
This module was based on code from Peter Teichman's Cobe project.
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | ff707ee9223305f984a9090f8acaf99f2b422d86 | Depend on MouseX::Getopt 0.33 to fix test failures | diff --git a/Changes b/Changes
index 7893da3..ceb15e8 100644
--- a/Changes
+++ b/Changes
@@ -1,527 +1,529 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
* Match </foo> as a single token
+ - Depend on MouseX::Getopt 0.33 to fix test failures
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/dist.ini b/dist.ini
index 95bb363..48b4f0d 100644
--- a/dist.ini
+++ b/dist.ini
@@ -1,84 +1,84 @@
name = Hailo
author = Hinrik Ãrn Sigurðsson <[email protected]>
author = Ãvar Arnfjörð Bjarmason <[email protected]>
copyright_holder = Hinrik Ãrn Sigurðsson and Ãvar Arnfjörð Bjarmason
license = Perl_5
[@AVAR]
dist = Hailo
bugtracker = rt
homepage = http://hailo.org
github_user = hailo
no_AutoPrereq = 1
use_MakeMaker = 0
use_CompileTests = 0
[=inc::HailoMakeMaker / HailoMakeMaker]
[Prereqs / RuntimeRequires]
perl = 5.010
; Core stuff
Time::HiRes = 0
File::Spec::Functions = 0
;; Depend on Mouse and Moose, we can use either one
; Mouse
Mouse = 0.62
MouseX::StrictConstructor = 0.02
-MouseX::Getopt = 0.2601
+MouseX::Getopt = 0.33
; Moose
Moose = 1.08
MooseX::StrictConstructor = 0.16
MooseX::Getopt = 0.37
; Hailo.pm
Any::Moose = 0.13
autodie = 2.08
Class::Load = 0.06
IPC::System::Simple = 1.21
File::CountLines = 0.0.2
IO::Interactive = 0.0.6
; Command.pm
Getopt::Long::Descriptive = 0.085
Dir::Self = 0.10
Term::Sk = 0.06
; ReadLine.pm
Term::ReadLine = 0
Data::Dump = 1.17
; DBD.pm
List::MoreUtils = 0.22
; SQLite.pm
DBD::SQLite = 1.29
; Words.pm
Regexp::Common = 2010010201
; everywhere
namespace::clean = 0.18
[Prereqs / TestRequires]
File::Slurp = 9999.13
Test::Exception = 0.29
Test::Expect = 0.31
Test::More = 0.94
Test::Output = 0.16
Test::Script = 1.07
Test::Script::Run = 0.04
Test::Synopsis = 0.06
Data::Section = 0.101620
; Data to babble on
Bot::Training = 0
Bot::Training::MegaHAL = 0
Bot::Training::StarCraft = 0
[Prereqs / RuntimeRecommends]
;; Pg/mysql: optional backends
DBD::mysql = 4.013
DBD::Pg = 2.16.1
|
hinrik/hailo | d882f1ca81de2419d60068554b95f386df22b981 | Match </foo> as a single token | diff --git a/Changes b/Changes
index 05b6136..7893da3 100644
--- a/Changes
+++ b/Changes
@@ -1,525 +1,526 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
+ * Match </foo> as a single token
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 29d890a..7cc65fd 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,229 +1,230 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/\s/;
my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $IRC_CHAN = qr/[#&+][^ \a\0\012\015,:]{1,199}/;
my $NUMERO = qr/#[0-9]+/;
+my $CLOSE_TAG = qr{</[-\w]+>};
-my $CASED_WORD = qr/$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
+my $CASED_WORD = qr/$CLOSE_TAG|$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$IRC_CHAN|$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 6195edf..e0ad92a 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,522 +1,527 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50>, ',', 'no', ',', 'wait', ',', qw<it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
[
'failo is #1',
[qw<failo is>, '#1'],
'Failo is #1.',
],
[
'We are in #perl',
[qw<we are in>, '#perl'],
'We are in #perl.',
],
+ [
+ '</foo>',
+ [qw{</foo>}],
+ '</foo>',
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 44ebb153142297eae99e20488edf020006a772ad | Match IRC channel names (#foo, &bar, +baz) | diff --git a/Changes b/Changes
index c06c4b3..05b6136 100644
--- a/Changes
+++ b/Changes
@@ -1,522 +1,523 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
+ * Match IRC channel names (#foo, &bar, +baz)
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index ec8c32b..29d890a 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,228 +1,229 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/\s/;
my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
+my $IRC_CHAN = qr/[#&+][^ \a\0\012\015,:]{1,199}/;
my $NUMERO = qr/#[0-9]+/;
-my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
+my $CASED_WORD = qr/$IRC_NICK|$IRC_CHAN|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
+my $LOOSE_WORD = qr/$IRC_CHAN|$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 1315891..6195edf 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,517 +1,522 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50>, ',', 'no', ',', 'wait', ',', qw<it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
[
'failo is #1',
[qw<failo is>, '#1'],
'Failo is #1.',
],
+ [
+ 'We are in #perl',
+ [qw<we are in>, '#perl'],
+ 'We are in #perl.',
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 3ba8351bb28d57f649ab64505fcb9bdaf16b86bf | Forget the tabs matching | diff --git a/Changes b/Changes
index d5c9103..c06c4b3 100644
--- a/Changes
+++ b/Changes
@@ -1,521 +1,520 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- Word tokenizer:
* Improve matching/capitalization of filenames and domain names
- * Match tabs as tokens
* Match timestamps as single tokens
* Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
* Match various prefixes and postfixes with numbers
* Match "#1" and "#1234" as single tokens
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 69bb014..ec8c32b 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,228 +1,228 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
-my $SPACE = qr/[\n ]/;
-my $NONSPACE = qr/[^\n ]/;
+my $SPACE = qr/\s/;
+my $NONSPACE = qr/\S/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
-my $NON_WORD = qr/[^\w\n ]+/;
+my $NON_WORD = qr/[^\w\s]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $NUMERO = qr/#[0-9]+/;
my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
- $input =~ s/$DASH\K *\n+ *//;
- $input =~ s/ *\n+ */ /gm;
+ $input =~ s/$DASH\K\s*\n+\s*//;
+ $input =~ s/\s*\n+\s*/ /gm;
while (length $input) {
# remove the next chunk of whitespace
- $input =~ s/^[\n ]+//;
+ $input =~ s/^$SPACE+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index e7118aa..1315891 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,522 +1,517 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
- [
- "Tabs\ttabs\ttabs.",
- ['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
- "Tabs\ttabs\ttabs.",
- ],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50>, ',', 'no', ',', 'wait', ',', qw<it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
[
'failo is #1',
[qw<failo is>, '#1'],
'Failo is #1.',
],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | c46d027b96d3fb17fb93341604621046f84632e5 | Prevent some cache collisions here | diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
index ffc5dc7..85d0d45 100644
--- a/lib/Hailo/Engine/Scored.pm
+++ b/lib/Hailo/Engine/Scored.pm
@@ -1,280 +1,280 @@
package Hailo::Engine::Scored;
use 5.010;
use Any::Moose;
use List::Util qw<sum>;
use List::MoreUtils qw<any>;
use Time::HiRes qw<gettimeofday tv_interval>;
extends 'Hailo::Engine::Default';
after BUILD => sub {
my ($self) = @_;
my %args = $self->arguments;
if (defined $args{iterations} && defined $args{interval}) {
die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
}
return;
};
sub reply {
my $self = shift;
my $tokens = shift // [];
# see if we recognize any of the input tokens
my $token_cache = $self->_resolve_input_tokens($tokens);
my @input_token_ids = keys %$token_cache;
my @token_counts;
# let's select potential pivot tokens from the input
if (keys %$token_cache) {
# we only want the ones with normal spacing (usually normal words)
@token_counts = map {
$token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
} keys %$token_cache;
}
my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
my @started = gettimeofday();
my $iterations = 0;
my $done;
my %args = $self->arguments;
if (!defined $args{iterations} && !defined $args{interval}) {
# construct replies for half a second by default
$args{interval} = 0.5;
}
if (defined $args{iterations}) {
$done = sub {
return 1 if $iterations == $args{iterations};
};
}
else {
$done = sub {
my $elapsed = tv_interval(\@started, [gettimeofday]);
return 1 if $elapsed >= $args{interval};
};
}
my (%link_cache, %expr_cache, $best_score, $best_reply);
while (1) {
$iterations++;
my $reply = $self->_generate_reply($token_probs, \%expr_cache);
return if !defined $reply; # we don't know any expressions yet
my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
last if $done->();
next;
}
if (!defined $best_score || $score > $best_score) {
$best_score = $score;
$best_reply = $reply;
}
last if $done->();
}
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @$best_reply;
return \@output;
}
# Calculate the probability we wish to pick each token as the pivot.
# This uses -log2(p) as a method for inverting token probability,
# ensuring that our rarer tokens are picked more often.
sub _get_pivot_probabilites {
my ($self, $token_counts) = @_;
return [] if !@$token_counts;
return [[$token_counts->[0], 1]] if @$token_counts == 1;
# calculate the (non-normalized) probability we want each to occur
my $count_sum = sum(map { $_->[1] } @$token_counts);
my $p = [];
my $p_sum = 0;
for my $token_count (map { $_->[1] } @$token_counts) {
my $token_p = -log(($token_count/$count_sum))/log(2);
push @$p, $token_p;
$p_sum += $token_p;
}
# normalize the probabilities
my @probs = map {
[$token_counts->[$_], $p->[$_] / $p_sum];
} 0..$#{ $token_counts };
return \@probs;
}
sub _generate_reply {
my ($self, $token_probs, $expr_cache) = @_;
my ($pivot_expr_id, @token_ids) = @_;
if (@$token_probs) {
my $pivot_token_id = $self->_choose_pivot($token_probs);
($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
}
else {
($pivot_expr_id, @token_ids) = $self->_random_expr();
return if !defined $pivot_expr_id; # no expressions in the database
}
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
return \@token_ids;
}
sub _evaluate_reply {
my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
my $order = $self->order;
my $score = 0;
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $next_token_id = $reply_token_ids->[$idx];
if (any { $_ == $next_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
my $key = join('_', @expr)."-$next_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $prev_token_id = $reply_token_ids->[$idx];
if (any { $_ == $prev_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
- my $key = join('_', @expr)."-$prev_token_id";
+ my $key = "$prev_token_id-".join('_', @expr);
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
# Prefer shorter replies. This behavior is present but not
# documented in recent MegaHAL.
my $score_divider = 1;
my $n_tokens = @$reply_token_ids;
if (@$reply_token_ids >= 8) {
$score_divider = sqrt(@$reply_token_ids - 1);
}
elsif (@$reply_token_ids >= 16) {
$score_divider = @$reply_token_ids;
}
$score = $score / $score_divider;
return $score;
}
sub _expr_token_probability {
my ($self, $pos, $expr, $token_id) = @_;
my $order = $self->order;
my $expr_id = $self->_expr_id_add($expr);
$self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
return 0 if !$expr2token;
$self->{"_sth_${pos}_token_links"}->execute($expr_id);
my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
return $expr2token / $expr2all;
}
sub _choose_pivot {
my ($self, $token_probs) = @_;
my $random = rand;
my $p = 0;
for my $token (@$token_probs) {
$p += $token->[1];
return $token->[0][0] if $p > $random;
}
return;
}
sub _too_similar {
my ($self, $input_token_ids, $reply_token_ids) = @_;
my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
for my $reply_token_id (@$reply_token_ids) {
return if !$input_token_ids{$reply_token_id};
}
return 1;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>. It is inherits from
L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
C<reply> method.
It generates multiple replies and applies a scoring algorithm to them, then
returns the best one, similar to MegaHAL.
=head1 ATTRIBUTES
=head2 C<engine_args>
This is a hash reference which can have the following keys:
=head3 C<iterations>
The number of replies to generate before returning the best one.
=head3 C<interval>
The time (in seconds) to spend on generating replies before returning the
best one.
You can not specify both C<iterations> and C<interval> at the same time. If
neither is specified, a default C<interval> of 0.5 seconds will be used.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
This module was based on code from Peter Teichman's Cobe project.
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 75f8785d2cca35a628c5febb667595f138749da4 | Silence some Perl syntax warnings | diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index b5f57b1..e7118aa 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,522 +1,522 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"Tabs\ttabs\ttabs.",
['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
"Tabs\ttabs\ttabs.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
- [qw<it costs $.50 , no , wait , it cost $2.50 ... or 50¢>],
+ [qw<it costs $.50>, ',', 'no', ',', 'wait', ',', qw<it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
[
'failo is #1',
- [qw<failo is #1>],
+ [qw<failo is>, '#1'],
'Failo is #1.',
],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 8d355fce6da5a0852694f173b2f64853672b800b | Prettify the Changes file | diff --git a/Changes b/Changes
index 2e12750..d5c9103 100644
--- a/Changes
+++ b/Changes
@@ -1,530 +1,525 @@
Revision history for Hailo
{{$NEXT}}
- - Scored engine: Prefer shorter replies, like MegaHAL/cobe.
+ - Scored engine: Prefer shorter replies, like MegaHAL/cobe do
- - Word tokenizer: Improve matching/capitalization of filenames and
- domain names
-
- - Word tokenizer: Match tabs as tokens
-
- - Word tokenizer: Match timestamps as single tokens
-
- - Word tokenizer: Match IRC nicks (<foobar>, <@foobar>, etc)
-
- - Word tokenizer: Match various prefixes and postfixes with numbers
-
- - Word tokenizer: Match "#1" and "#1234" as single tokens
+ - Word tokenizer:
+ * Improve matching/capitalization of filenames and domain names
+ * Match tabs as tokens
+ * Match timestamps as single tokens
+ * Match IRC nicks (<foobar>, <@foobar>, etc) as single tokens
+ * Match various prefixes and postfixes with numbers
+ * Match "#1" and "#1234" as single tokens
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
|
hinrik/hailo | b0d7a17a9dccfdc41db9b2bc096b70ddae72484c | Match "#1" and "#1234" as single tokens | diff --git a/Changes b/Changes
index 4097dd0..2e12750 100644
--- a/Changes
+++ b/Changes
@@ -1,529 +1,531 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe.
- Word tokenizer: Improve matching/capitalization of filenames and
domain names
- Word tokenizer: Match tabs as tokens
- Word tokenizer: Match timestamps as single tokens
- Word tokenizer: Match IRC nicks (<foobar>, <@foobar>, etc)
- Word tokenizer: Match various prefixes and postfixes with numbers
+ - Word tokenizer: Match "#1" and "#1234" as single tokens
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index c2ce850..69bb014 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,227 +1,228 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/[\n ]/;
my $NONSPACE = qr/[^\n ]/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\n ]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
+my $NUMERO = qr/#[0-9]+/;
-my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
+my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH|$NUMERO/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
+my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$NUMERO|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K *\n+ *//;
$input =~ s/ *\n+ */ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^[\n ]+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index cc06ec6..b5f57b1 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,517 +1,522 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"Tabs\ttabs\ttabs.",
['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
"Tabs\ttabs\ttabs.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50 , no , wait , it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
+ [
+ 'failo is #1',
+ [qw<failo is #1>],
+ 'Failo is #1.',
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 5be1357b296a89b821303a8512686be066b23364 | Allow whitespace in timestamp tokens | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 2b0ea62..c2ce850 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,227 +1,227 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/[\n ]/;
my $NONSPACE = qr/[^\n ]/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\n ]+/;
my $BARE_WORD = qr/\w+/;
my $CURRENCY = qr/[¤¥¢£\$]/;
my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
-my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z|[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
+my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z| ?[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K *\n+ *//;
$input =~ s/ *\n+ */ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^[\n ]+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index a6aa043..cc06ec6 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,517 +1,517 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"Tabs\ttabs\ttabs.",
['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
"Tabs\ttabs\ttabs.",
],
[
- "2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00",
- [qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>],
- "2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00.",
+ "2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM",
+ [qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>, '22:55 PM'],
+ "2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00 22:55 PM.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
[
'It costs $.50, no, wait, it cost $2.50... or 50¢',
[qw<it costs $.50 , no , wait , it cost $2.50 ... or 50¢>],
'It costs $.50, no, wait, it cost $2.50... or 50¢.',
],
[
'10pt or 12em or 15cm',
[qw<10pt or 12em or 15cm>],
'10pt or 12em or 15cm.',
],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | fa5ad7d6adddb5948cae23dfab96812a42f9c4a6 | Match various prefixes and postfixes with numbers | diff --git a/Changes b/Changes
index 41724ad..4097dd0 100644
--- a/Changes
+++ b/Changes
@@ -1,527 +1,529 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe.
- Word tokenizer: Improve matching/capitalization of filenames and
domain names
- Word tokenizer: Match tabs as tokens
- Word tokenizer: Match timestamps as single tokens
- Word tokenizer: Match IRC nicks (<foobar>, <@foobar>, etc)
+ - Word tokenizer: Match various prefixes and postfixes with numbers
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index d0f86b9..2b0ea62 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,226 +1,227 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $SPACE = qr/[\n ]/;
my $NONSPACE = qr/[^\n ]/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/[^\w\n ]+/;
my $BARE_WORD = qr/\w+/;
-my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+$ALPHABET*/;
+my $CURRENCY = qr/[¤¥¢£\$]/;
+my $NUMBER = qr/$CURRENCY?$POINT\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?|$CURRENCY?\d+(?:$POINT\d+)*(?:$CURRENCY|$ALPHABET+)?(?!\d|$ALPHABET)/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z|[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
my $DATETIME = qr/${DATE}T$TIME/;
my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K *\n+ *//;
$input =~ s/ *\n+ */ /gm;
while (length $input) {
# remove the next chunk of whitespace
$input =~ s/^[\n ]+//;
my $got_word;
while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $input, 0, $uri_end;
$input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens?
elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index bd9ff22..a6aa043 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,507 +1,517 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"Tabs\ttabs\ttabs.",
['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
"Tabs\ttabs\ttabs.",
],
[
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00",
[qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>],
"2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00.",
],
[
'<@literal> oh hi < literal> what is going on?',
[qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
'<@literal> oh hi < literal> what is going on?',
],
+ [
+ 'It costs $.50, no, wait, it cost $2.50... or 50¢',
+ [qw<it costs $.50 , no , wait , it cost $2.50 ... or 50¢>],
+ 'It costs $.50, no, wait, it cost $2.50... or 50¢.',
+ ],
+ [
+ '10pt or 12em or 15cm',
+ [qw<10pt or 12em or 15cm>],
+ '10pt or 12em or 15cm.',
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | f92dc1cca2a401fd6a7f68bce6255776a3ade429 | Match timestamps and IRC nicks | diff --git a/Changes b/Changes
index 8d04d87..41724ad 100644
--- a/Changes
+++ b/Changes
@@ -1,523 +1,527 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe.
- Word tokenizer: Improve matching/capitalization of filenames and
domain names
- Word tokenizer: Match tabs as tokens
+ - Word tokenizer: Match timestamps as single tokens
+
+ - Word tokenizer: Match IRC nicks (<foobar>, <@foobar>, etc)
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index edc1be5..d0f86b9 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,219 +1,226 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
+my $SPACE = qr/[\n ]/;
+my $NONSPACE = qr/[^\n ]/;
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
-my $NON_WORD = qr/\W+/;
+my $NON_WORD = qr/[^\w\n ]+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+$ALPHABET*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
-my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
+my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// $NONSPACE+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
+my $DATE = qr/[0-9]{4}-W?[0-9]{1,2}-[0-9]{1,2}/i;
+my $TIME = qr/[0-9]{1,2}:[0-9]{2}(?::[0-9]{2})?(?:Z|[AP]M|[-+±][0-9]{2}(?::?[0-9]{2})?)?/i;
+my $DATETIME = qr/${DATE}T$TIME/;
+my $IRC_NICK = qr/<[ @%+~&]?[A-Za-z_`\-^\|\\\{}\[\]][A-Za-z_0-9`\-^\|\\\{}\[\]]+>/;
-my $CASED_WORD = qr/$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
+my $CASED_WORD = qr/$IRC_NICK|$DATETIME|$DATE|$TIME|$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
+my $LOOSE_WORD = qr/$DATETIME|$DATE|$TIME|$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
- $input =~ s/$DASH\K\s*\n+\s*//;
- $input =~ s/\s*\n+\s*/ /gm;
- my @chunks = split /[\n ]+/, $input;
+ $input =~ s/$DASH\K *\n+ *//;
+ $input =~ s/ *\n+ */ /gm;
- # process all whitespace-delimited chunks
- for my $chunk (@chunks) {
+ while (length $input) {
+ # remove the next chunk of whitespace
+ $input =~ s/^[\n ]+//;
my $got_word;
- while (length $chunk) {
+ while (length $input && $input =~ /^$NONSPACE/) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
- my $ascii = $chunk;
+ my ($ascii) = $input =~ /^($NONSPACE+)/;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
- my $uri = substr $chunk, 0, $uri_end;
- $chunk =~ s/^\Q$uri//;
+ my $uri = substr $input, 0, $uri_end;
+ $input =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
- elsif (!$got_word && $chunk =~ s/ ^ (?<word> $CASED_WORD )//xo) {
+ elsif (!$got_word && $input =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
- elsif ($chunk =~ / ^ $WORD /xo) {
+ elsif ($input =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
- if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
+ if (!@tokens && $input =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
- && $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
+ && $input =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
- $chunk =~ s/^($WORD)//o and $word = $1;
+ $input =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
- elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
+ elsif ($input =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
- $spacing = length $chunk
+ $spacing = $input =~ /^$NONSPACE/
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
- # do we still have more tokens in this chunk?
- elsif (length $chunk) {
+ # do we still have more tokens?
+ elsif ($input =~ /^$NONSPACE/) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
+
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index c24a302..bd9ff22 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,497 +1,507 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
[
"Tabs\ttabs\ttabs.",
['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
"Tabs\ttabs\ttabs.",
],
+ [
+ "2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00",
+ [qw<2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00>],
+ "2011-05-05 22:55 22:55Z 2011-05-05T22:55Z 2011-W18-4 2011-125 12:00±05:00.",
+ ],
+ [
+ '<@literal> oh hi < literal> what is going on?',
+ [qw{<@literal> oh hi}, '< literal>', qw<what is going on ?>],
+ '<@literal> oh hi < literal> what is going on?',
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 69fdf1ac410fcb635edadffc7317e9c1cdd6256a | Match tabs as tokens | diff --git a/Changes b/Changes
index d2fa474..8d04d87 100644
--- a/Changes
+++ b/Changes
@@ -1,521 +1,523 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe.
- Word tokenizer: Improve matching/capitalization of filenames and
domain names
+ - Word tokenizer: Match tabs as tokens
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index be8de62..edc1be5 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,219 +1,219 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+$ALPHABET*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
my $ESC_SPACE = qr/(?:\\ )+/;
my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
my $CASED_WORD = qr/$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
- my @chunks = split /\s+/, $input;
+ my @chunks = split /[\n ]+/, $input;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# special words for which we preserve case
elsif (!$got_word && $chunk =~ s/ ^ (?<word> $CASED_WORD )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index fa98916..c24a302 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,492 +1,497 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I wrote theres_no_place_like_home.ly. And then some.",
[qw<I wrote theres_no_place_like_home.ly . and then some .>],
"I wrote theres_no_place_like_home.ly. And then some.",
],
[
"The file is /hlagh/bar/foo.txt. Just read it.",
[qw<the file is /hlagh/bar/foo.txt . just read it .>],
"The file is /hlagh/bar/foo.txt. Just read it.",
],
[
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
[qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
"The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
],
+ [
+ "Tabs\ttabs\ttabs.",
+ ['tabs', "\t", 'tabs', "\t", 'tabs', '.'],
+ "Tabs\ttabs\ttabs.",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | a4af08726c750a13a51db57f20965ac9c2c8e043 | Improve matching/capitalization of filenames/domains | diff --git a/Changes b/Changes
index b5f48c3..d2fa474 100644
--- a/Changes
+++ b/Changes
@@ -1,518 +1,521 @@
Revision history for Hailo
{{$NEXT}}
- Scored engine: Prefer shorter replies, like MegaHAL/cobe.
+ - Word tokenizer: Improve matching/capitalization of filenames and
+ domain names
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index e7d04f6..be8de62 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,228 +1,219 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
-my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+\w*/;
+my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+$ALPHABET*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
-my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
-my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
+my $ABBREV = qr/$ALPHABET(?:\.$ALPHABET)+\./;
+my $DOTTED = qr/$BARE_WORD?\.$BARE_WORD(?:\.$BARE_WORD)*/;
+my $WORD_TYPES = qr/$NUMBER|$ABBREV|$DOTTED|$APOST_WORD|$BARE_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
+my $ESC_SPACE = qr/(?:\\ )+/;
+my $NAME = qr/(?:$BARE_WORD|$ESC_SPACE)+/;
+my $FILENAME = qr/ $NAME? \. $NAME (?: \. $NAME )* | $NAME/x;
+my $UNIX_PATH = qr{ / $FILENAME (?: / $FILENAME )* /? }x;
+my $WIN_PATH = qr{ $ALPHABET : \\ $FILENAME (?: \\ $FILENAME )* \\?}x;
+my $PATH = qr/$UNIX_PATH|$WIN_PATH/;
+
+my $CASED_WORD = qr/$PERL_CLASS|$EXTRA_URI|$EMAIL|$TWAT_NAME|$PATH/;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
+my $LOOSE_WORD = qr/$PATH|$NUMBER|$ABBREV|$APOST_WORD|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $input) = @_;
my @tokens;
$input =~ s/$DASH\K\s*\n+\s*//;
$input =~ s/\s*\n+\s*/ /gm;
my @chunks = split /\s+/, $input;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
- # Perl class names
- elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
- push @tokens, [$self->{_spacing_normal}, $+{class}];
- $got_word = 1;
- }
- # ssh:// (and foo+ssh://) URIs
- elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
- push @tokens, [$self->{_spacing_normal}, $+{uri}];
- $got_word = 1;
- }
- # email addresses
- elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
- push @tokens, [$self->{_spacing_normal}, $+{email}];
- $got_word = 1;
- }
- # Twitter names
- elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
- # Names on Twitter/Identi.ca can only match
- # @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
- # names.
- push @tokens, [$self->{_spacing_normal}, $+{twat}];
+ # special words for which we preserve case
+ elsif (!$got_word && $chunk =~ s/ ^ (?<word> $CASED_WORD )//xo) {
+ push @tokens, [$self->{_spacing_normal}, $+{word}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 69e48e4..fa98916 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,477 +1,492 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
- [qw<on example . com ? yes>],
+ [qw<on example.com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
- [qw<. com bubble>],
+ [qw<.com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
- [qw<taka úr sources . list>],
+ [qw<taka úr sources.list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
[
"I use\nPOE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"I use POE-Component- \n IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
+ [
+ "I wrote theres_no_place_like_home.ly. And then some.",
+ [qw<I wrote theres_no_place_like_home.ly . and then some .>],
+ "I wrote theres_no_place_like_home.ly. And then some.",
+ ],
+ [
+ "The file is /hlagh/bar/foo.txt. Just read it.",
+ [qw<the file is /hlagh/bar/foo.txt . just read it .>],
+ "The file is /hlagh/bar/foo.txt. Just read it.",
+ ],
+ [
+ "The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
+ [qw<the file is C:\\hlagh\\bar\\foo.txt . just read it .>],
+ "The file is C:\\hlagh\\bar\\foo.txt. Just read it.",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
diff --git a/t/tokenizer/Words/utf8-text.t b/t/tokenizer/Words/utf8-text.t
index 8940149..6dcd6e2 100644
--- a/t/tokenizer/Words/utf8-text.t
+++ b/t/tokenizer/Words/utf8-text.t
@@ -1,538 +1,538 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Encode qw<encode_utf8>;
use Data::Section -setup;
use Test::More;
use Hailo::Tokenizer::Words;
binmode $_, ':encoding(utf8)' for (*STDOUT, *STDERR);
BEGIN {
if ($] < 5.012000) {
plan skip_all => "This test relies on Perl >=5.12's Unicode matching";
}
my $got_yaml;
eval {
require YAML::XS;
YAML::XS->import('Dump', 'Load');
$got_yaml = 1;
};
plan skip_all => "Haven't got YAML::XS" if !$got_yaml;
}
-plan tests => '2503';
+plan tests => '2501';
my $self = bless {} => __PACKAGE__;
my $text = ${ $self->section_data("UTF-8 encoded sample plain-text file") };
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($text);
# This is how the YAML::XS output was produced:
#binmode *STDERR;
#print STDERR Dump($parsed);
#exit;
my $yaml = Load(encode_utf8(${ $self->section_data("YAML::XS result") }));
for (my $i = 0; $i < @$yaml; $i++) {
is($parsed->[$i][0], $yaml->[$i][0], "Token #$i: type matches");
is($parsed->[$i][1], $yaml->[$i][1], "Token #$i: content matches");
}
is(scalar(@$parsed), scalar(@$yaml), "Number of tokens matches");
__DATA__
__[ UTF-8 encoded sample plain-text file ]__
UTF-8 encoded sample plain-text file
â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾
Markus Kuhn [ËmaʳkÊs kuËn] <http://www.cl.cam.ac.uk/~mgk25/> â 2002-07-25
The ASCII compatible UTF-8 encoding used in this plain-text file
is defined in Unicode, ISO 10646-1, and RFC 2279.
Using Unicode/UTF-8, you can write in emails and source code things such as
Mathematics and sciences:
â® Eâ
da = Q, n â â, â f(i) = â g(i), â§â¡ââââââââââ¤â«
âªâ¢ââa²+b³ ââ¥âª
âxââ: âxâ = âââxâ, α ⧠¬β = ¬(¬α ⨠β), âªâ¢âââââââ ââ¥âª
âªâ¢ââ· câ ââ¥âª
â â ââ â ⤠â â â â â â, â¨â¢â ââ¥â¬
âªâ¢â â ââ¥âª
⥠< a â b â¡ c ⤠d ⪠⤠â (â¦Aâ§ â âªBâ«), âªâ¢â â² ââ¥âª
âªâ¢â â³aâ±-bâ±ââ¥âª
2Hâ + Oâ â 2HâO, R = 4.7 kΩ, â 200 mm â©â£âi=1 â â¦â
Linguistics and dictionaries:
ði ıntÉËnæÊÉnÉl fÉËnÉtık ÉsoÊsiËeıÊn
Y [ËÊpsilÉn], Yen [jÉn], Yoga [ËjoËgÉ]
APL:
((Vâ³V)=â³â´V)/Vâ,V â·ââ³ââ´ââââ¾âââ
Nicer typography in plain text files:
ââââââââââââââââââââââââââââââââââââââââââââ
â â
â ⢠âsingleâ and âdoubleâ quotes â
â â
â ⢠Curly apostrophes: âWeâve been hereâ â
â â
â ⢠Latin-1 apostrophe and accents: '´` â
â â
â ⢠âdeutscheâ âAnführungszeichenâ â
â â
â ⢠â , â¡, â°, â¢, 3â4, â, â5/+5, â¢, ⦠â
â â
â ⢠ASCII safety test: 1lI|, 0OD, 8B â
â âââââââââââ® â
â ⢠the euro symbol: â 14.95 ⬠â â
â â°âââââââââ⯠â
ââââââââââââââââââââââââââââââââââââââââââââ
Combining characters:
STARGÎÌTE SG-1, a = vÌ = rÌ, aâ ⥠bâ
Greek (in Polytonic):
The Greek anthem:
Σὲ γνÏÏá½·Î¶Ï á¼Ïὸ Ïὴν κόÏη
Ïοῦ ÏÏαθιοῦ Ïὴν ÏÏομεÏá½µ,
Ïá½² γνÏÏá½·Î¶Ï á¼Ïὸ Ïὴν á½Ïη
Ïοὺ μὲ βία μεÏÏάει Ïá½´ γá¿.
᾿ÎÏ᾿ Ïá½° κόκκαλα βγαλμένη
Ïῶν ῾ÎλλήνÏν Ïá½° ἱεÏá½±
καὶ Ïὰν ÏÏá¿¶Ïα á¼Î½Î´ÏειÏμένη
Ïαá¿Ïε, ὦ Ïαá¿Ïε, ᾿ÎλεÏ
θεÏιά!
From a speech of Demosthenes in the 4th century BC:
Îá½Ïá½¶ Ïαá½Ïá½° ÏαÏá½·ÏÏαÏαί μοι γιγνώÏκειν, ὦ á¼Î½Î´ÏÎµÏ á¾¿Îθηναá¿Î¿Î¹,
á½
Ïαν Ï᾿ Îµá¼°Ï Ïá½° ÏÏάγμαÏα á¼ÏοβλέÏÏ ÎºÎ±á½¶ á½
Ïαν ÏÏá½¸Ï ÏοὺÏ
λόγοÏ
Ï Î¿á½Ï á¼ÎºÎ¿á½»ÏÎ ÏÎ¿á½ºÏ Î¼á½²Î½ Î³á½°Ï Î»á½¹Î³Î¿Ï
Ï ÏεÏá½¶ Ïοῦ
ÏιμÏÏá½µÏαÏθαι ΦίλιÏÏον á½Ïá¿¶ γιγνομένοÏ
Ï, Ïá½° δὲ ÏÏάγμαÏ᾿
Îµá¼°Ï ÏοῦÏο ÏÏοήκονÏα, á½¥Ïθ᾿ á½
ÏÏÏ Î¼á½´ ÏειÏόμεθ᾿ αá½Ïοὶ
ÏÏá½¹ÏεÏον ÎºÎ±Îºá¿¶Ï ÏκέÏαÏθαι δέον. οá½Î´á½³Î½ οá½Î½ á¼Î»Î»Î¿ μοι δοκοῦÏιν
οἱ Ïá½° ÏοιαῦÏα λέγονÏÎµÏ á¼¢ Ïὴν á½ÏόθεÏιν, ÏεÏá½¶ á¼§Ï Î²Î¿Ï
λεύεÏθαι,
οá½Ïá½¶ Ïὴν οá½Ïαν ÏαÏιÏÏάνÏÎµÏ á½Î¼á¿Î½ á¼Î¼Î±ÏÏάνειν. á¼Î³á½¼ δέ, á½
Ïι μέν
ÏοÏ᾿ á¼Î¾á¿Î½ Ïá¿ Ïόλει καὶ Ïá½° αá½Ïá¿Ï á¼Ïειν á¼ÏÏÎ±Î»á¿¶Ï ÎºÎ±á½¶ ΦίλιÏÏον
ÏιμÏÏá½µÏαÏθαι, καὶ μάλ᾿ á¼ÎºÏÎ¹Î²á¿¶Ï Î¿á¼¶Î´Î±Î á¼Ï᾿ á¼Î¼Î¿á¿¦ γάÏ, οὠÏάλαι
γέγονεν ÏαῦÏ᾿ á¼Î¼Ïá½¹ÏεÏαΠνῦν μένÏοι Ïá½³ÏειÏμαι Ïοῦθ᾿ ἱκανὸν
ÏÏολαβεá¿Î½ ἡμá¿Î½ εἶναι Ïὴν ÏÏá½½Ïην, á½
ÏÏÏ ÏÎ¿á½ºÏ ÏÏ
μμάÏοÏ
Ï
Ïá½½Ïομεν. á¼á½°Î½ Î³á½°Ï ÏοῦÏο βεβαίÏÏ á½Ïá½±Ïξá¿, Ïá½¹Ïε καὶ ÏεÏá½¶ Ïοῦ
Ïίνα ÏιμÏÏá½µÏεÏαί ÏÎ¹Ï ÎºÎ±á½¶ á½Î½ ÏÏá½¹Ïον á¼Î¾á½³ÏÏαι ÏκοÏεá¿Î½Î ÏÏὶν δὲ
Ïὴν á¼ÏÏὴν á½ÏÎ¸á¿¶Ï á½ÏοθέÏθαι, μάÏαιον ἡγοῦμαι ÏεÏá½¶ Ïá¿Ï
ÏελεÏ
Ïá¿Ï á½Î½Ïινοῦν Ïοιεá¿Ïθαι λόγον.
ÎημοÏθένοÏ
Ï, δ ᾿ÎλÏ
νθιακὸÏ
Georgian:
From a Unicode conference invitation:
ááá®ááá áá®áááá ááááá áá á áááá¡á¢á ááªáá Unicode-áá¡ ááááá á¡ááá ááá¨áá áá¡á
áááá¤áá áááªáááá ááá¡áá¡á¬á áááá, á ááááá᪠áááááá áááá 10-12 ááá á¢á¡,
á¥. áááááªá¨á, ááá áááááá¨á. áááá¤áá áááªáá á¨áá°áá ááá¡ áá ááá áá¡áá¤áááá¡
áá¥á¡ááá á¢ááá¡ áá¡áá ááá áááá¨á á áááá ááªáá ááá¢áá ááá¢á áá Unicode-á,
ááá¢áá áááªáááááááááªáá áá áááááááááªáá, Unicode-áá¡ ááááá§ááááá
áááá ááªáá£á á¡áá¡á¢ááááá¡á, áá ááááá§áááááá áá ááá ááááá¨á, á¨á áá¤á¢ááá¨á,
á¢áá¥á¡á¢áááá¡ áááá£á¨áááááá¡á áá áá áááááááááá áááááá£á¢áá á£á á¡áá¡á¢ááááá¨á.
Russian:
From a Unicode conference invitation:
ÐаÑегиÑÑÑиÑÑйÑеÑÑ ÑейÑÐ°Ñ Ð½Ð° ÐеÑÑÑÑÑ ÐеждÑнаÑоднÑÑ ÐонÑеÑенÑÐ¸Ñ Ð¿Ð¾
Unicode, коÑоÑÐ°Ñ ÑоÑÑоиÑÑÑ 10-12 маÑÑа 1997 года в ÐайнÑе в ÐеÑмании.
ÐонÑеÑенÑÐ¸Ñ ÑобеÑÐµÑ ÑиÑокий кÑÑг ÑкÑпеÑÑов по вопÑоÑам глобалÑного
ÐнÑеÑнеÑа и Unicode, локализаÑии и инÑеÑнаÑионализаÑии, воплоÑÐµÐ½Ð¸Ñ Ð¸
пÑÐ¸Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Unicode в ÑазлиÑнÑÑ
опеÑаÑионнÑÑ
ÑиÑÑемаÑ
и пÑогÑаммнÑÑ
пÑиложениÑÑ
, ÑÑиÑÑаÑ
, веÑÑÑке и многоÑзÑÑнÑÑ
компÑÑÑеÑнÑÑ
ÑиÑÑемаÑ
.
Thai (UCS Level 2):
Excerpt from a poetry on The Romance of The Three Kingdoms (a Chinese
classic 'San Gua'):
[----------------------------|------------------------]
๠à¹à¸à¹à¸à¸à¸´à¸à¸®à¸±à¹à¸à¹à¸ªà¸·à¹à¸à¸¡à¹à¸à¸£à¸¡à¹à¸ªà¸à¸ªà¸±à¸à¹à¸§à¸ à¸à¸£à¸°à¸à¸à¹à¸à¸¨à¸à¸à¸à¸à¸¹à¹à¸à¸¹à¹à¸à¸¶à¹à¸à¹à¸«à¸¡à¹
สิà¸à¸ªà¸à¸à¸à¸©à¸±à¸à¸£à¸´à¸¢à¹à¸à¹à¸à¸à¸«à¸à¹à¸²à¹à¸¥à¸à¸±à¸à¹à¸ สà¸à¸à¸à¸à¸à¹à¹à¸à¸£à¹à¹à¸à¹à¹à¸à¸¥à¸²à¹à¸à¸²à¸à¸±à¸à¸à¸²
à¸à¸£à¸à¸à¸±à¸à¸à¸·à¸à¸à¸±à¸à¸à¸µà¹à¸à¹à¸à¸à¸µà¹à¸à¸¶à¹à¸ à¸à¹à¸²à¸à¹à¸¡à¸·à¸à¸à¸à¸¶à¸à¸§à¸´à¸à¸£à¸´à¸à¹à¸à¹à¸à¸à¸±à¸à¸«à¸à¸²
à¹à¸®à¸à¸´à¹à¸à¹à¸£à¸µà¸¢à¸à¸à¸±à¸à¸à¸±à¹à¸§à¸«à¸±à¸§à¹à¸¡à¸·à¸à¸à¸¡à¸² หมายà¸à¸°à¸à¹à¸²à¸¡à¸à¸à¸±à¹à¸§à¸à¸±à¸§à¸ªà¸³à¸à¸±à¸
à¹à¸«à¸¡à¸·à¸à¸à¸à¸±à¸à¹à¸ªà¹à¸¥à¹à¹à¸ªà¸·à¸à¸à¸²à¸à¹à¸à¸«à¸² รัà¸à¸«à¸¡à¸²à¸à¹à¸²à¹à¸à¹à¸²à¸¡à¸²à¹à¸¥à¸¢à¸à¸²à¸ªà¸±à¸
à¸à¹à¸²à¸¢à¸à¹à¸à¸à¸à¸¸à¹à¸à¸¢à¸¸à¹à¸¢à¸à¹à¸«à¹à¹à¸à¸à¸à¸±à¸ à¹à¸à¹à¸ªà¸²à¸§à¸à¸±à¹à¸à¹à¸à¹à¸à¸à¸à¸§à¸à¸à¸·à¹à¸à¸à¸§à¸à¹à¸
à¸à¸¥à¸±à¸à¸¥à¸´à¸à¸¸à¸¢à¸à¸¸à¸¢à¸à¸µà¸à¸¥à¸±à¸à¸à¹à¸à¹à¸«à¸à¸¸ à¸à¹à¸²à¸à¸à¸²à¹à¸à¸¨à¸à¸£à¸´à¸à¸«à¸à¸²à¸à¹à¸²à¸£à¹à¸à¸à¹à¸«à¹
à¸à¹à¸à¸à¸£à¸à¸£à¸²à¸à¹à¸²à¸à¸±à¸à¸à¸à¸à¸£à¸£à¸¥à¸±à¸¢ ฤà¹
หาà¹à¸à¸£à¸à¹à¸³à¸à¸¹à¸à¸¹à¹à¸à¸£à¸£à¸¥à¸±à¸à¸à¹ ฯ
(The above is a two-column text. If combining characters are handled
correctly, the lines of the second column should be aligned with the
| character above.)
Ethiopian:
Proverbs in the Amharic language:
á°áá á áá³á¨áµ ááᥠá áá¨á°áµá¢
á¥á á«áá á¥áá°á á£á´ á ááá áá¢
áᥠá«áá¤á± ááá¥á ááá¢
á°á á ááá á
ᤠá£áá ᣠáá£áµ á áá°ááá¢
á¨á á áááá³ á á
ᤠá áá³á½áá¢
á áᥠá á á á³á á°áá³á¢
á²á°á¨áá áá°á¨ááá¢
ááµ á ááµá¥ ááááá á á¥áá© ááá³áá¢
áµá á¢á«á¥á á áá á³ á«áµáá¢
á°á á¥áá°á¤á± á¥áá
á¥áá° áá¨á¤á± á áá°á³á°ááá¢
á¥ááá á¨á¨áá°áá áá®á® á³áááá á ááµááá¢
á¨áá¨á¤áµ áá£á¥ á¢á«á©áµ ááµá
á£á«á©áµ á«á áá
á¢
á¥á« á¨ááá³áµ ááá ááá³áµá¢
áá£á áá°áªá« á¨ááᥠáááµ áá ááá«áá¢
á¨á¥áµáá á áá© áá« á¨á áá« á áá© ááá«á¢
á°ááá á¢á°á á°ááá¶ á£áá¢
áá³á
á
áá á¢áá á¨ááµá
á áµáá°áá¢
á¥ááá
á á áá«á½á
áá áááá¢
Runes:
á»á á³á¹á«á¦ á¦á«á á»á áá¢áá á©á¾ á¦á«á ááªá¾áá á¾á©á±á¦á¹ááªá±áá¢á á¹áᦠá¦áª á¹áá¥á«
(Old English, which transcribed into Latin reads 'He cwaeth that he
bude thaem lande northweardum with tha Westsae.' and means 'He said
that he lived in the northern land near the Western Sea.')
Braille:
â¡â â §â â ¼â â â¡â â â ⠹⠰â â¡£â â
â¡â â â â ¹ â ºâ â â â â â â â â â â â â â ºâ ⠹⠲ ⡹⠻â â â â â â â ³â â
â ±â â â â §â » â â â ³â â ¹â â â ² ⡹â â â â â â â » â â â â â â â ¥â â â â â ºâ â
â â â â â « â â ¹ â ¹â â â â »â â ¹â â â â â ¹â â â â »â
â â ¹â â ¥â â â »â â â
â »â
â â â â ¹â â ¡â â â â â ³â â ⠻⠲ â¡â â â â â â â â â â â « â â â ² â¡â â
â¡â â â â â â â °â â â â â â ºâ â â â â â â ¥â â â â °â¡¡â â â â â â â â â â ⠹⠹â â â â
â ¡â â â â â â â ¥â â â â â â â â â â â ²
â¡â â â¡â â â â ¹ â ºâ â â â â â â â â â â â â â â â ¤â â â â â ²
â¡â â â â¡ â â â â °â â â â â â â â â â ¹ â ¹â â â¡ â
â â ªâ â â â â ¹
â ªâ â
â â ªâ â «â â â â ±â â ⠹⠻â â â â â â â â â ¥â â â â ¹ â â â â â â â ³â
â â â â â â ¤â â â â â ² â¡ â â â £â â â â §â â â â ² â â â â â «â â â ¹â â â â â â â
â â â â â â â â â â â â ¤â â â â â â â ¹â â â â â â â â â â â â â â â â â â â â â â ⠻⠹
â â ¹â â â â â â â ² â¡â ¥â â ¹â â ºâ â â â â â â â ³â â â â â â â â â
â â â â ¹â â â â â â â â â â â â â ¹ â ¥â â â â â ⠪⠫ â â â â â
â ©â â â â â â â â â â ¥â â â â â â â â ¹â â¡â ³â â â ⠹⠰â â â â â â â â â ² ⡹⠳
â ºâ â â ⠹⠻â â â â â â â »â â â â â â â â â â â â â â â â â â â â â â â â â â ¹â â ¹â â
â¡â â â â ¹ â ºâ â â â â â â â â â â â â â â â ¤â â â â â ²
(The first couple of paragraphs of "A Christmas Carol" by Dickens)
Compact font selection example text:
ABCDEFGHIJKLMNOPQRSTUVWXYZ /0123456789
abcdefghijklmnopqrstuvwxyz £©µÃÃÃÃÃéöÿ
âââââââ â¢â¦â°â¢ÅŠŸž⬠ÎÎÎÎÎ©Î±Î²Î³Î´Ï ÐÐÐÐÐабвгд
âââââ§âªâ¡â âââ¨â»â£ ââ¼ââââºâºâ ï¬ï¿½ââá¼ á¸Ó¥áºÉËâ×Ô±á
Greetings in various languages:
Hello world, ÎαλημέÏα κόÏμε, ã³ã³ããã
Box drawing alignment tests: â
â
ââââ¦âââ ââââ¬âââ ââââ¬âââ® ââââ¬âââ® ââââ³âââ ââââ â· â» ââ¯â ââ°â â â±â²â±â²â³â³â³
ââââ¨âââ ââââ§âââ ââââªâââ âââââââ âââââââ ââââ â¶â¼â´âºââ¸â â¼â¨ ââ⥠â â²â±â²â±â³â³â³
âââ² â±ââ ââ ââ ââ â ââ ââ â ââ ââ â¿ ââ ââ
ââ âµ â¹ ââ·â ââ¸â â â±â²â±â²â³â³â³
â â¡ â³ â⣠â⢠â⤠ââ¼ââ¼ââ¼â¤ ââ«ââââ«â¤ â£â¿â¾â¼â¼â¿â« ââââ ââââ â ââ
â
â â â â²â±â²â±â³â³â³
âââ± â²ââ ââ ââ ââ â ââ ââ â ââ ââ â½ ââ ââââââââ â â â â â â â
ââââ¥âââ ââââ¤âââ ââââªâââ âââââââ âââââââ ââââââââ â â â â â â â
ââââ©âââ ââââ´âââ â°âââ´ââ⯠â°âââ´ââ⯠ââââ»âââ ââââââ ââââ â ââââ â âââââ
âââ
ââââââ
__[ YAML::XS result ]__
---
- - 0
- UTF-8
- - 0
- encoded
- - 0
- sample
- - 0
- plain-text
- - 0
- file
- - 0
- â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾
- - 0
- markus
- - 0
- kuhn
- - 1
- '['
- - 0
- ËmaʳkÊs
- - 0
- kuËn
- - 2
- ']'
- - 1
- <
- - 0
- http://www.cl.cam.ac.uk/~mgk25/
- - 2
- '>'
- - 0
- â
- - 0
- 2002-07-25
- - 0
- the
- - 0
- ASCII
- - 0
- compatible
- - 0
- UTF-8
- - 0
- encoding
- - 0
- used
- - 0
- in
- - 0
- this
- - 0
- plain-text
- - 0
- file
- - 0
- is
- - 0
- defined
- - 0
- in
- - 0
- unicode
- - 2
- ','
- - 0
- ISO
- - 0
- 10646-1
- - 2
- ','
- - 0
- and
- - 0
- RFC
- - 0
- '2279'
- - 2
- .
- - 0
- using
- - 0
- unicode
- - 3
- /
- - 0
- UTF-8
- - 2
- ','
- - 0
- you
- - 0
- can
- - 0
- write
- - 0
- in
- - 0
- emails
- - 0
- and
- - 0
- source
- - 0
- code
- - 0
- things
- - 0
- such
- - 0
- as
- - 0
- mathematics
- - 0
- and
- - 0
- sciences
- - 2
- ':'
- - 0
- â®
- - 0
- E
- - 3
- â
- - 0
- da
- - 0
- =
- - 0
- Q
- - 2
- ','
- - 0
- n
- - 0
- â
- - 0
- â,
- - 0
- â
- - 0
- f
- - 3
- (
- - 0
- i
- - 2
- )
- - 0
- =
- - 0
- â
- - 0
- g
- - 3
- (
- - 0
- i
- - 2
- ),
- - 0
- â§â¡ââââââââââ¤â«
- - 1
- âªâ¢ââ
- - 0
- a
- - 3
- ²+
- - 0
- b
- - 2
- ³
- - 0
- ââ¥âª
- - 1
- â
- - 0
- x
- - 3
- â
- - 0
- â
- - 2
- ':'
- - 1
- â
- - 0
- x
- - 2
- â
- - 0
- =
- - 1
- âââ
- - 0
- x
- - 2
- â,
- - 0
- α
- - 0
- â§
- - 1
- ¬
- - 0
- β
- - 0
- =
- - 1
- ¬(¬
- - 0
- α
- - 0
- â¨
- - 0
- β
- - 2
- ),
- - 0
- âªâ¢âââââââ
- - 0
- ââ¥âª
- - 0
- âªâ¢ââ·
- - 0
- c
- - 2
- â
- - 0
- ââ¥âª
- - 0
- â
- - 0
- â
- - 0
- â
- - 2
- â
- - 0
- â
- - 0
- â¤
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 2
- ','
- - 0
- â¨â¢â
- - 0
- ââ¥â¬
- - 0
- âªâ¢â
- - 0
- â
- - 0
- ââ¥âª
- - 0
- â¥
- - 0
- <
- - 0
- a
- - 0
- â
- - 0
- b
- - 0
- â¡
@@ -1967,796 +1967,794 @@ __[ YAML::XS result ]__
- - 0
- á«á áá
- - 2
- á¢
- - 0
- á¥á«
- - 0
- á¨ááá³áµ
- - 0
- ááá
- - 0
- ááá³áµ
- - 2
- á¢
- - 0
- áá£á
- - 0
- áá°áªá«
- - 0
- á¨áá
- - 2
- á¥
- - 0
- áááµ
- - 0
- áá
- - 0
- ááá«á
- - 2
- á¢
- - 0
- á¨á¥áµáá
- - 0
- á áá©
- - 0
- áá«
- - 0
- á¨á áá«
- - 0
- á áá©
- - 0
- ááá«
- - 2
- á¢
- - 0
- á°ááá
- - 0
- á¢á°á
- - 0
- á°ááá¶
- - 0
- á£á
- - 2
- á¢
- - 0
- áá³á
á
- - 0
- áá
- - 0
- á¢áá
- - 0
- á¨ááµá
- - 0
- á áµáá°á
- - 2
- á¢
- - 0
- á¥ááá
á
- - 0
- á áá«á½á
- - 0
- áá
- - 0
- ááá
- - 2
- á¢
- - 0
- runes
- - 2
- ':'
- - 0
- á»á
- - 0
- á³á¹á«á¦
- - 0
- á¦á«á
- - 0
- á»á
- - 0
- áá¢áá
- - 0
- á©á¾
- - 0
- á¦á«á
- - 0
- ááªá¾áá
- - 0
- á¾á©á±á¦á¹ááªá±áá¢á
- - 0
- á¹áá¦
- - 0
- á¦áª
- - 0
- á¹áá¥á«
- - 1
- (
- - 0
- old
- - 0
- english
- - 2
- ','
- - 0
- which
- - 0
- transcribed
- - 0
- into
- - 0
- latin
- - 0
- reads
- - 1
- ''''
- - 0
- he
- - 0
- cwaeth
- - 0
- that
- - 0
- he
- - 0
- bude
- - 0
- thaem
- - 0
- lande
- - 0
- northweardum
- - 0
- with
- - 0
- tha
- - 0
- westsae
- - 2
- .'
- - 0
- and
- - 0
- means
- - 1
- ''''
- - 0
- he
- - 0
- said
- - 0
- that
- - 0
- he
- - 0
- lived
- - 0
- in
- - 0
- the
- - 0
- northern
- - 0
- land
- - 0
- near
- - 0
- the
- - 0
- western
- - 0
- sea
- - 2
- .')
- - 0
- braille
- - 2
- ':'
- - 0
- â¡â â §â
- - 0
- â ¼â â
- - 0
- â¡â â â ⠹⠰â
- - 0
- â¡£â â
- - 0
- â¡â â â â ¹
- - 0
- â ºâ â
- - 0
- â â â â â
- - 0
- â â
- - 0
- â â â â
- - 0
- â ºâ ⠹⠲
- - 0
- ⡹⠻â
- - 0
- â â
- - 0
- â â
- - 0
- â â ³â â
- - 0
- â ±â â â â §â »
- - 0
- â â â ³â
- - 0
- â ¹â â â ²
- - 0
- ⡹â
- - 0
- â â â â â â »
- - 0
- â â
- - 0
- â â â
- - 0
- â â ¥â â â â
- - 0
- â ºâ â
- - 0
- â â â â â «
- - 0
- â â ¹
- - 0
- â ¹â
- - 0
- â â â »â â ¹â â â â
- - 0
- â ¹â
- - 0
- â â â »â
â
- - 0
- â ¹â
- - 0
- â ¥â â â »â â â
â »â
- - 0
- â â â
- - 0
- â ¹â
- - 0
- â ¡â â â
- - 0
- â â ³â â ⠻⠲
- - 0
- â¡â â â â â â
- - 0
- â â â â â «
- - 0
- â â â ²
- - 0
- â¡â â
- - 0
- â¡â â â â â â â °â
- - 0
- â â â â
- - 0
- â ºâ â
- - 0
- â â â â
- - 0
- â ¥â â â
- - 0
- â °â¡¡â â â â â
- - 0
- â â â
- - 0
- â â ⠹⠹â â
- - 0
- â â
- - 0
- â ¡â â â
- - 0
- â â
- - 0
- â â ¥â
- - 0
- â â â
- - 0
- â â â â
- - 0
- â â â ²
- - 0
- â¡â â
- - 0
- â¡â â â â ¹
- - 0
- â ºâ â
- - 0
- â â
- - 0
- â â â â
- - 0
- â â
- - 0
- â
- - 0
- â â â â â ¤â â â â â ²
- - 0
- â¡â â â
- - 0
- â¡
- - 0
- â â â â °â
- - 0
- â â â â
- - 0
- â â
- - 0
- â â â ¹
- - 0
- â ¹â â
- - 0
- â¡
- - 0
- â
â â ªâ
- - 0
- â â
- - 0
- â â ¹
- - 0
- â ªâ
- - 0
- â
â â ªâ â «â â â
- - 0
- â ±â â
- - 0
- ⠹⠻â
- - 0
- â â
- - 0
- â â â â â â ¥â â â â ¹
- - 0
- â â â â
- - 0
- â â â ³â
- - 0
- â
- - 0
- â â â â â ¤â â â â â ²
- - 0
- â¡
- - 0
- â â â £â
- - 0
- â â â §â
- - 0
- â â â ²
- - 0
- â â â â â «â
- - 0
- â â ¹â â â â â
- - 0
- â â
- - 0
- â â â â â
- - 0
- â
- - 0
- â â â â â â ¤â â â â
- - 0
- â â
- - 0
- â ¹â
- - 0
- â â â â â â
- - 0
- â â â â â
- - 0
- â â
- - 0
- â â â â â â â â ⠻⠹
- - 0
- â
- - 0
- â ¹â
- - 0
- â â â â â â ²
- - 0
- â¡â ¥â
- - 0
- â ¹â
- - 0
- â ºâ â â â â
- - 0
- â â
- - 0
- â ³â
- - 0
- â â â â â â â â
- - 0
- â â
- - 0
- â
- - 0
- â ¹â
- - 0
- â â â â â â â
- - 0
- â â â
- - 0
- â â ¹
- - 0
- â ¥â â â â â ⠪⠫
- - 0
- â â â â â
- - 0
- â ©â â â
- - 0
- â â â
- - 0
- â â â â ¥â â
- - 0
- â â â
- - 0
- â â
- - 0
- â ¹â
- - 0
- â¡â ³â â â ⠹⠰â
- - 0
- â â â â
- - 0
- â â â â ²
- - 0
- ⡹⠳
- - 0
- â ºâ â â
- - 0
- ⠹⠻â â â â â
- - 0
- â â »â â â
- - 0
- â â
- - 0
- â â
- - 0
- â â â â â â â
- - 0
- â â â â â â â â â â â â ¹â
- - 0
- â ¹â â
- - 0
- â¡â â â â ¹
- - 0
- â ºâ â
- - 0
- â â
- - 0
- â â â â
- - 0
- â â
- - 0
- â
- - 0
- â â â â â ¤â â â â â ²
- - 1
- (
- - 0
- the
- - 0
- first
- - 0
- couple
- - 0
- of
- - 0
- paragraphs
- - 0
- of
- - 1
- '"'
- - 0
- A
- - 0
- christmas
- - 0
- carol
- - 2
- '"'
- - 0
- by
- - 0
- dickens
- - 2
- )
- - 0
- compact
- - 0
- font
- - 0
- selection
- - 0
- example
- - 0
- text
- - 2
- ':'
- - 0
- ABCDEFGHIJKLMNOPQRSTUVWXYZ
-- - 1
- - /
- - 0
- - '0123456789'
+ - /0123456789
- - 0
- abcdefghijklmnopqrstuvwxyz
- - 1
- £©
- - 0
- µÃÃÃÃÃéöÿ
- - 1
- âââââââ â¢â¦â°â¢
- - 0
- ÅŠŸž
- - 2
- â¬
- - 0
- αβγδÏαβγδÏ
- - 0
- абвгдабвгд
- - 1
- âââ
- - 0
- â
- - 2
- â§âªâ¡â
- - 0
- âââ¨â»â£
- - 0
- ââ¼ââââºâºâ
- - 0
- ï¬
- - 3
- �ââ
- - 0
- á¼ á¸Ó¥áºÉË
- - 3
- â
- - 0
- ×Ô±á
- - 0
- greetings
- - 0
- in
- - 0
- various
- - 0
- languages
- - 2
- ':'
- - 0
- hello
- - 0
- world
- - 2
- ','
- - 0
- καλημέÏα
- - 0
- κόÏμε
- - 2
- ','
- - 0
- ã³ã³ããã
- - 0
- box
- - 0
- drawing
- - 0
- alignment
- - 0
- tests
- - 2
- ':'
- - 0
- â
- - 0
- â
- - 0
- ââââ¦âââ
- - 0
- ââââ¬âââ
- - 0
- ââââ¬âââ®
- - 0
- ââââ¬âââ®
- - 0
- ââââ³âââ
- - 0
- ââââ
- - 0
- â·
- - 0
- â»
- - 0
- ââ¯â
- - 0
- ââ°â
- - 0
- â
- - 0
- â±â²â±â²â³â³â³
- - 0
- ââââ¨âââ
- - 0
- ââââ§âââ
- - 0
- ââââªâââ
- - 0
- âââââââ
- - 0
- âââââââ
- - 0
- ââââ
- - 0
- â¶â¼â´âºââ¸â â¼â¨
- - 0
- âââ¥
- - 0
- â
- - 0
- â²â±â²â±â³â³â³
- - 0
- âââ²
- - 0
- â±ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â¿
- - 0
- ââ
- - 0
- ââ
ââ
- - 0
- âµ
- - 0
- â¹
- - 0
- ââ·â
- - 0
- ââ¸â
- - 0
- â
- - 0
- â±â²â±â²â³â³â³
- - 0
- â â¡
- - 0
- â³
- - 0
- ââ£
- - 0
- ââ¢
- - 0
- ââ¤
- - 0
- ââ¼ââ¼ââ¼â¤
- - 0
- ââ«ââââ«â¤
- - 0
- â£â¿â¾â¼â¼â¿â«
- - 0
- ââââ
- - 0
- ââââ
- - 0
- â
- - 0
- ââ
â
â
- - 0
- â
- - 0
- â
- - 0
- â²â±â²â±â³â³â³
- - 0
- âââ±
- - 0
- â²ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â
- - 0
- ââ
- - 0
- ââ
- - 0
- â½
- - 0
- ââ
- - 0
- ââââââââ
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- ââââ¥âââ
- - 0
- ââââ¤âââ
- - 0
- ââââªâââ
- - 0
- âââââââ
- - 0
- âââââââ
- - 0
- ââââââââ
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- ââââ©âââ
- - 0
- ââââ´âââ
- - 0
- â°âââ´âââ¯
- - 0
- â°âââ´âââ¯
- - 0
- ââââ»âââ
- - 0
- ââââââ
- - 0
- ââââ
- - 0
- â
- - 0
- ââââ
- - 0
- â
- - 0
- âââââ
âââ
- - 0
- ââââââ
|
hinrik/hailo | 851b8a65fa6f3be2878a6a350e951dad4775910b | Prefer shorter replies | diff --git a/Changes b/Changes
index 6625230..b5f48c3 100644
--- a/Changes
+++ b/Changes
@@ -1,516 +1,518 @@
Revision history for Hailo
{{$NEXT}}
+ - Scored engine: Prefer shorter replies, like MegaHAL/cobe.
+
0.68 2011-05-03 13:16:05
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Add --train-fast to speed up learning by up to an additional 45% on
large brains by using aggressive caching. This uses a lot of memory.
Almost 600MB with SQLite on a 64bit machine for a brain which
eventually takes 134MB on disk (trained from a 350k line IRC log).
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
index 237c695..ffc5dc7 100644
--- a/lib/Hailo/Engine/Scored.pm
+++ b/lib/Hailo/Engine/Scored.pm
@@ -1,268 +1,280 @@
package Hailo::Engine::Scored;
use 5.010;
use Any::Moose;
use List::Util qw<sum>;
use List::MoreUtils qw<any>;
use Time::HiRes qw<gettimeofday tv_interval>;
extends 'Hailo::Engine::Default';
after BUILD => sub {
my ($self) = @_;
my %args = $self->arguments;
if (defined $args{iterations} && defined $args{interval}) {
die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
}
return;
};
sub reply {
my $self = shift;
my $tokens = shift // [];
# see if we recognize any of the input tokens
my $token_cache = $self->_resolve_input_tokens($tokens);
my @input_token_ids = keys %$token_cache;
my @token_counts;
# let's select potential pivot tokens from the input
if (keys %$token_cache) {
# we only want the ones with normal spacing (usually normal words)
@token_counts = map {
$token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
} keys %$token_cache;
}
my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
my @started = gettimeofday();
my $iterations = 0;
my $done;
my %args = $self->arguments;
if (!defined $args{iterations} && !defined $args{interval}) {
# construct replies for half a second by default
$args{interval} = 0.5;
}
if (defined $args{iterations}) {
$done = sub {
return 1 if $iterations == $args{iterations};
};
}
else {
$done = sub {
my $elapsed = tv_interval(\@started, [gettimeofday]);
return 1 if $elapsed >= $args{interval};
};
}
my (%link_cache, %expr_cache, $best_score, $best_reply);
while (1) {
$iterations++;
my $reply = $self->_generate_reply($token_probs, \%expr_cache);
return if !defined $reply; # we don't know any expressions yet
my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
last if $done->();
next;
}
if (!defined $best_score || $score > $best_score) {
$best_score = $score;
$best_reply = $reply;
}
last if $done->();
}
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @$best_reply;
return \@output;
}
# Calculate the probability we wish to pick each token as the pivot.
# This uses -log2(p) as a method for inverting token probability,
# ensuring that our rarer tokens are picked more often.
sub _get_pivot_probabilites {
my ($self, $token_counts) = @_;
return [] if !@$token_counts;
return [[$token_counts->[0], 1]] if @$token_counts == 1;
# calculate the (non-normalized) probability we want each to occur
my $count_sum = sum(map { $_->[1] } @$token_counts);
my $p = [];
my $p_sum = 0;
for my $token_count (map { $_->[1] } @$token_counts) {
my $token_p = -log(($token_count/$count_sum))/log(2);
push @$p, $token_p;
$p_sum += $token_p;
}
# normalize the probabilities
my @probs = map {
[$token_counts->[$_], $p->[$_] / $p_sum];
} 0..$#{ $token_counts };
return \@probs;
}
sub _generate_reply {
my ($self, $token_probs, $expr_cache) = @_;
my ($pivot_expr_id, @token_ids) = @_;
if (@$token_probs) {
my $pivot_token_id = $self->_choose_pivot($token_probs);
($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
}
else {
($pivot_expr_id, @token_ids) = $self->_random_expr();
return if !defined $pivot_expr_id; # no expressions in the database
}
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
return \@token_ids;
}
sub _evaluate_reply {
my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
my $order = $self->order;
my $score = 0;
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $next_token_id = $reply_token_ids->[$idx];
if (any { $_ == $next_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
my $key = join('_', @expr)."-$next_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $prev_token_id = $reply_token_ids->[$idx];
if (any { $_ == $prev_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
my $key = join('_', @expr)."-$prev_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
+ # Prefer shorter replies. This behavior is present but not
+ # documented in recent MegaHAL.
+ my $score_divider = 1;
+ my $n_tokens = @$reply_token_ids;
+ if (@$reply_token_ids >= 8) {
+ $score_divider = sqrt(@$reply_token_ids - 1);
+ }
+ elsif (@$reply_token_ids >= 16) {
+ $score_divider = @$reply_token_ids;
+ }
+
+ $score = $score / $score_divider;
return $score;
}
sub _expr_token_probability {
my ($self, $pos, $expr, $token_id) = @_;
my $order = $self->order;
my $expr_id = $self->_expr_id_add($expr);
$self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
return 0 if !$expr2token;
$self->{"_sth_${pos}_token_links"}->execute($expr_id);
my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
return $expr2token / $expr2all;
}
sub _choose_pivot {
my ($self, $token_probs) = @_;
my $random = rand;
my $p = 0;
for my $token (@$token_probs) {
$p += $token->[1];
return $token->[0][0] if $p > $random;
}
return;
}
sub _too_similar {
my ($self, $input_token_ids, $reply_token_ids) = @_;
my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
for my $reply_token_id (@$reply_token_ids) {
return if !$input_token_ids{$reply_token_id};
}
return 1;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>. It is inherits from
L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
C<reply> method.
It generates multiple replies and applies a scoring algorithm to them, then
returns the best one, similar to MegaHAL.
=head1 ATTRIBUTES
=head2 C<engine_args>
This is a hash reference which can have the following keys:
=head3 C<iterations>
The number of replies to generate before returning the best one.
=head3 C<interval>
The time (in seconds) to spend on generating replies before returning the
best one.
You can not specify both C<iterations> and C<interval> at the same time. If
neither is specified, a default C<interval> of 0.5 seconds will be used.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
This module was based on code from Peter Teichman's Cobe project.
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 5142367509977dea0292b0287ee9287615c527d0 | Remove dead code | diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
index 43085b7..237c695 100644
--- a/lib/Hailo/Engine/Scored.pm
+++ b/lib/Hailo/Engine/Scored.pm
@@ -1,275 +1,268 @@
package Hailo::Engine::Scored;
use 5.010;
use Any::Moose;
use List::Util qw<sum>;
use List::MoreUtils qw<any>;
use Time::HiRes qw<gettimeofday tv_interval>;
extends 'Hailo::Engine::Default';
after BUILD => sub {
my ($self) = @_;
my %args = $self->arguments;
if (defined $args{iterations} && defined $args{interval}) {
die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
}
return;
};
sub reply {
my $self = shift;
my $tokens = shift // [];
# see if we recognize any of the input tokens
my $token_cache = $self->_resolve_input_tokens($tokens);
my @input_token_ids = keys %$token_cache;
my @token_counts;
- # let's select potential pivot tokens
+ # let's select potential pivot tokens from the input
if (keys %$token_cache) {
- # we got some known tokens, let's prefer the ones with normal
- # spacing, i.e. words instead of things like ',' or '('.
+ # we only want the ones with normal spacing (usually normal words)
@token_counts = map {
$token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
} keys %$token_cache;
-
- if (!@token_counts) {
- # no known words in the input, so we'll settle for the rest
- @token_counts = map { [$_, $token_cache->{$_}[2]] } keys %$token_cache;
- }
-
}
my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
my @started = gettimeofday();
my $iterations = 0;
my $done;
my %args = $self->arguments;
if (!defined $args{iterations} && !defined $args{interval}) {
# construct replies for half a second by default
$args{interval} = 0.5;
}
if (defined $args{iterations}) {
$done = sub {
return 1 if $iterations == $args{iterations};
};
}
else {
$done = sub {
my $elapsed = tv_interval(\@started, [gettimeofday]);
return 1 if $elapsed >= $args{interval};
};
}
my (%link_cache, %expr_cache, $best_score, $best_reply);
while (1) {
$iterations++;
my $reply = $self->_generate_reply($token_probs, \%expr_cache);
return if !defined $reply; # we don't know any expressions yet
my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
last if $done->();
next;
}
if (!defined $best_score || $score > $best_score) {
$best_score = $score;
$best_reply = $reply;
}
last if $done->();
}
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @$best_reply;
return \@output;
}
# Calculate the probability we wish to pick each token as the pivot.
# This uses -log2(p) as a method for inverting token probability,
# ensuring that our rarer tokens are picked more often.
sub _get_pivot_probabilites {
my ($self, $token_counts) = @_;
return [] if !@$token_counts;
return [[$token_counts->[0], 1]] if @$token_counts == 1;
# calculate the (non-normalized) probability we want each to occur
my $count_sum = sum(map { $_->[1] } @$token_counts);
my $p = [];
my $p_sum = 0;
for my $token_count (map { $_->[1] } @$token_counts) {
my $token_p = -log(($token_count/$count_sum))/log(2);
push @$p, $token_p;
$p_sum += $token_p;
}
# normalize the probabilities
my @probs = map {
[$token_counts->[$_], $p->[$_] / $p_sum];
} 0..$#{ $token_counts };
return \@probs;
}
sub _generate_reply {
my ($self, $token_probs, $expr_cache) = @_;
my ($pivot_expr_id, @token_ids) = @_;
if (@$token_probs) {
my $pivot_token_id = $self->_choose_pivot($token_probs);
($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
}
else {
($pivot_expr_id, @token_ids) = $self->_random_expr();
return if !defined $pivot_expr_id; # no expressions in the database
}
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
return \@token_ids;
}
sub _evaluate_reply {
my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
my $order = $self->order;
my $score = 0;
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $next_token_id = $reply_token_ids->[$idx];
if (any { $_ == $next_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
my $key = join('_', @expr)."-$next_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $prev_token_id = $reply_token_ids->[$idx];
if (any { $_ == $prev_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
my $key = join('_', @expr)."-$prev_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
return $score;
}
sub _expr_token_probability {
my ($self, $pos, $expr, $token_id) = @_;
my $order = $self->order;
my $expr_id = $self->_expr_id_add($expr);
$self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
return 0 if !$expr2token;
$self->{"_sth_${pos}_token_links"}->execute($expr_id);
my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
return $expr2token / $expr2all;
}
sub _choose_pivot {
my ($self, $token_probs) = @_;
my $random = rand;
my $p = 0;
for my $token (@$token_probs) {
$p += $token->[1];
return $token->[0][0] if $p > $random;
}
return;
}
sub _too_similar {
my ($self, $input_token_ids, $reply_token_ids) = @_;
my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
for my $reply_token_id (@$reply_token_ids) {
return if !$input_token_ids{$reply_token_id};
}
return 1;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>. It is inherits from
L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
C<reply> method.
It generates multiple replies and applies a scoring algorithm to them, then
returns the best one, similar to MegaHAL.
=head1 ATTRIBUTES
=head2 C<engine_args>
This is a hash reference which can have the following keys:
=head3 C<iterations>
The number of replies to generate before returning the best one.
=head3 C<interval>
The time (in seconds) to spend on generating replies before returning the
best one.
You can not specify both C<iterations> and C<interval> at the same time. If
neither is specified, a default C<interval> of 0.5 seconds will be used.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
This module was based on code from Peter Teichman's Cobe project.
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 8b8bc9cb698acfc72bf17cf5a75b95a101ce2eb9 | Add --train-fast for great justice | diff --git a/Changes b/Changes
index 138a782..a02e01a 100644
--- a/Changes
+++ b/Changes
@@ -1,527 +1,532 @@
Revision history for Hailo
{{$NEXT}}
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
+ - Add --train-fast to speed up learning by up to an additional 45% on
+ large brains by using aggressive caching. This uses a lot of memory.
+ Almost 600MB with SQLite on a 64bit machine for a brain which
+ eventually takes 134MB on disk (trained from a 350k line IRC log).
+
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
diff --git a/bin/hailo b/bin/hailo
index 2b3f5eb..968be40 100755
--- a/bin/hailo
+++ b/bin/hailo
@@ -1,111 +1,112 @@
#!/usr/bin/env perl
package hailo;
use 5.010;
use open qw< :encoding(utf8) :std >;
use Dir::Self;
use Any::Moose;
use Encode qw(decode);
use Hailo::Command;
use namespace::clean -except => 'meta';
# Nothing to see here
__PACKAGE__->meta->make_immutable;
# use utf8 everywhere
$_ = decode('utf8', $_) for @ARGV;
# Set programname
$0 = 'hailo';
if ($] < 5.013000 and $^O eq 'linux') {
local $@;
eval {
require Sys::Prctl;
Sys::Prctl::prctl_name('hailo');
};
}
# I want my mommy!
$Hailo::Command::HERE_MOMMY = __DIR__;
# Show help if run without arguments
@ARGV = qw(--help) unless @ARGV;
# Hailing frequencies open
Hailo::Command->new_with_options->run;
=encoding utf8
=head1 NAME
hailo - Command-line interface to the L<Hailo|Hailo> Markov bot
=head1 USAGE
- usage: hailo [-abEhLlopRrSsTtuv] [long options...]
+ usage: hailo [-abEfhLlopRrSsTtuv] [long options...]
-v --version Print version and exit
-u --ui Use UI CLASS
-t --train Learn from all the lines in FILE, use - for STDIN
-s --stats Print statistics about the brain
-r --reply Reply to STRING
-p --progress Display progress during the import
-o --order Markov order; How deep the rabbit hole goes
-l --learn Learn from STRING
-h --help You're soaking it in
+ -f --train-fast Train with aggressive caching (memory-hungry!)
-b --brain Load/save brain to/from FILE
-a --autosave Save the brain on exit (on by default)
-T --tokenizer Use tokenizer CLASS
-S --storage Use storage CLASS
-R --random-reply Like --reply but takes no STRING; Babble at random
-L --learn-reply Learn from STRING and reply to it
-E --engine Use engine CLASS
--ui-args Arguments for the UI class
--tokenizer-args Arguments for the Tokenizer class
--storage-args Arguments for the Storage class
--examples Print examples along with the help message
--engine-args Arguments for the Engine class
--brain-resource Alias for `brain' for backwards compatibility
Note: All input/output and files are assumed to be UTF-8 encoded.
=head1 SYNOPSIS
Train in-memory using L<bot-training|Bot::Training>'s F<megahal.trn>
and reply:
bot-training -f megahal | hailo --brain :memory: --train - --random-reply
# ==> Time flies like a banana.
Create an on-disk brain for later use:
bot-training -f megahal > megahal.trn
hailo --brain hailo.sqlite --train megahal.trn
hailo --brain hailo.sqlite --reply 'Reply to this, silly robot!'
# ==> No way! I don't want to think.
Interact with the brain using readline:
hailo --brain hailo.sqlite
Welcome to the Hailo interactive shell!
Enter ".help" to show the built-in commands.
Input that's not a command will be passed to Hailo to learn, and it'll
reply back.
Hailo> Hello there
# ==> Wife asked "what have you got there?" replied he, "just my cup of fur".
=head1 DESCRIPTION
See the documentation for L<Hailo|Hailo> for more information.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/lib/Hailo.pm b/lib/Hailo.pm
index 617b977..7fcb858 100644
--- a/lib/Hailo.pm
+++ b/lib/Hailo.pm
@@ -1,638 +1,648 @@
package Hailo;
use 5.010;
use autodie qw(open close);
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use File::Glob ':glob';
use Class::Load qw(try_load_class);
use Scalar::Util qw(blessed);
use List::Util qw(first);
use namespace::clean -except => 'meta';
use constant PLUGINS => [ qw[
Hailo::Engine::Default
Hailo::Engine::Scored
Hailo::Storage::MySQL
Hailo::Storage::PostgreSQL
Hailo::Storage::SQLite
Hailo::Tokenizer::Chars
Hailo::Tokenizer::Words
Hailo::UI::ReadLine
] ];
has brain => (
isa => 'Str',
is => 'rw',
);
has order => (
isa => 'Int',
is => 'rw',
default => 2,
trigger => sub {
my ($self, $order) = @_;
$self->_custom_order(1);
},
);
has _custom_order => (
isa => 'Bool',
is => 'rw',
default => 0,
init_arg => undef,
documentation => "Here so we can differentiate between the default value of order being explictly set and being set by default",
);
has _custom_tokenizer_class => (
isa => 'Bool',
is => 'rw',
default => 0,
init_arg => undef,
documentation => "Here so we can differentiate between the default value of tokenizer_class being explictly set and being set by default",
);
has save_on_exit => (
isa => 'Bool',
is => 'rw',
default => 1,
);
has brain_resource => (
documentation => "Alias for `brain' for backwards compatibility",
isa => 'Str',
is => 'rw',
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
sub BUILD {
my ($self) = @_;
my $brain = $self->brain;
return if !defined $brain;
$self->brain(bsd_glob($brain));
return;
}
my %has = (
engine => {
name => 'Engine',
default => 'Default',
},
storage => {
name => 'Storage',
default => 'SQLite',
},
tokenizer => {
name => 'Tokenizer',
default => 'Words',
},
ui => {
name => 'UI',
default => 'ReadLine',
},
);
for my $k (keys %has) {
my $name = $has{$k}->{name};
my $default = $has{$k}->{default};
my $method_class = "${k}_class";
my $method_args = "${k}_args";
# working classes
has "${k}_class" => (
isa => 'Str',
is => "rw",
default => $default,
($k ~~ 'tokenizer'
? (trigger => sub {
my ($self, $class) = @_;
$self->_custom_tokenizer_class(1);
})
: ())
);
# Object arguments
has "${k}_args" => (
documentation => "Arguments for the $name class",
isa => 'HashRef',
is => "ro",
default => sub { +{} },
);
# Working objects
has "_${k}" => (
does => "Hailo::Role::$name",
lazy_build => 1,
is => 'ro',
init_arg => undef,
);
# Generate the object itself
no strict 'refs';
*{"_build__${k}"} = sub {
my ($self) = @_;
my $obj = $self->_new_class(
$name,
$self->$method_class,
{
arguments => $self->$method_args,
($k ~~ [ qw< engine storage > ]
? (order => $self->order)
: ()),
($k ~~ [ qw< engine > ]
? (storage => $self->_storage)
: ()),
(($k ~~ [ qw< storage > ] and defined $self->brain)
? (
hailo => do {
require Scalar::Util;
Scalar::Util::weaken(my $s = $self);
my %callback = (
has_custom_order => sub { $s->_custom_order },
has_custom_tokenizer_class => sub { $s->_custom_tokenizer_class },
set_order => sub {
my ($db_order) = @_;
$s->order($db_order);
$s->_engine->order($db_order);
},
set_tokenizer_class => sub {
my ($db_tokenizer_class) = @_;
$s->tokenizer_class($db_tokenizer_class);
},
);
\%callback;
},
brain => $self->brain
)
: ()),
(($k ~~ [ qw< storage > ]
? (tokenizer_class => $self->tokenizer_class)
: ()))
},
);
return $obj;
};
}
sub _new_class {
my ($self, $type, $class, $args) = @_;
my $pkg;
if ($class =~ m[^\+(?<custom_plugin>.+)$]) {
$pkg = $+{custom_plugin};
} else {
my @plugins = @{ $self->PLUGINS };
# Be fuzzy about includes, e.g. DBD::SQLite or SQLite or sqlite will go
$pkg = first { / $type : .* : $class /ix }
sort { length $a <=> length $b }
@plugins;
unless ($pkg) {
local $" = ', ';
my @p = grep { /$type/ } @plugins;
die "Couldn't find a class name matching '$class' in plugins '@p'";
}
}
my ($success, $error) = try_load_class($pkg);
die $error if !$success;
return $pkg->new(%$args);
}
sub save {
my ($self, @args) = @_;
$self->_storage->save(@args);
return;
}
sub train {
- my ($self, $input) = @_;
+ my ($self, $input, $fast) = @_;
$self->_storage->start_training();
given ($input) {
# With STDIN
when (not ref and defined and $_ eq '-') {
die "You must provide STDIN when training from '-'" if $self->_is_interactive(*STDIN);
- $self->_train_fh(*STDIN);
+ $self->_train_fh(*STDIN, $fast);
}
# With a filehandle
when (ref eq 'GLOB') {
- $self->_train_fh($input);
+ $self->_train_fh($input, $fast);
}
# With a file
when (not ref) {
open my $fh, '<:encoding(utf8)', $input;
- $self->_train_fh($fh, $input);
+ $self->_train_fh($fh, $fast, $input);
}
# With an Array
when (ref eq 'ARRAY') {
- $self->_learn_one($_) for @$input;
+ for my $line (@$input) {
+ $self->_learn_one($line, $fast);
+ $self->_engine->flush_cache if !$fast;
+ }
+ $self->_engine->flush_cache if $fast;
}
# With something naughty
default {
die "Unknown input: $input";
}
}
$self->_storage->stop_training();
return;
}
sub _train_fh {
- my ($self, $fh, $filename) = @_;
+ my ($self, $fh, $fast) = @_;
while (my $line = <$fh>) {
chomp $line;
- $self->_learn_one($line);
+ $self->_learn_one($line, $fast);
+ $self->_engine->flush_cache if !$fast;
}
+ $self->_engine->flush_cache if $fast;
return;
}
sub learn {
my ($self, $input) = @_;
my $inputs;
given ($input) {
when (not defined) {
die "Cannot learn from undef input";
}
when (not ref) {
$inputs = [$input];
}
# With an Array
when (ref eq 'ARRAY') {
$inputs = $input
}
default {
die "Unknown input: $input";
}
}
my $storage = $self->_storage;
$storage->start_learning();
$self->_learn_one($_) for @$inputs;
$storage->stop_learning();
return;
}
sub _learn_one {
- my ($self, $input) = @_;
+ my ($self, $input, $fast) = @_;
my $engine = $self->_engine;
my $tokens = $self->_tokenizer->make_tokens($input);
- $engine->learn($tokens);
+ $fast ? $engine->learn_cached($tokens) : $engine->learn($tokens);
return;
}
sub learn_reply {
my ($self, $input) = @_;
$self->learn($input);
return $self->reply($input);
}
sub reply {
my ($self, $input) = @_;
my $storage = $self->_storage;
# start_training() hasn't been called so we can't guarentee that
# the storage has been engaged at this point. This must be called
# before ->_engine() is called anywhere to ensure that the
# lazy-loading in the engine works.
$storage->_engage() unless $storage->_engaged;
my $engine = $self->_engine;
my $tokenizer = $self->_tokenizer;
my $reply;
if (defined $input) {
my $tokens = $tokenizer->make_tokens($input);
$reply = $engine->reply($tokens);
}
else {
$reply = $engine->reply();
}
return unless defined $reply;
return $tokenizer->make_output($reply);
}
sub stats {
my ($self) = @_;
return $self->_storage->totals();
}
sub DEMOLISH {
my ($self) = @_;
$self->save() if blessed $self->{_storage} and $self->save_on_exit;
return;
}
sub _is_interactive {
require IO::Interactive;
return IO::Interactive::is_interactive();
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo - A pluggable Markov engine analogous to MegaHAL
=head1 SYNOPSIS
This is the synopsis for using Hailo as a module. See L<hailo> for
command-line invocation.
# Hailo requires Perl 5.10
use 5.010;
use Any::Moose;
use Hailo;
# Construct a new in-memory Hailo using the SQLite backend. See
# backend documentation for other options.
my $hailo = Hailo->new;
# Various ways to learn
my @train_this = ("I like big butts", "and I can not lie");
$hailo->learn(\@train_this);
$hailo->learn($_) for @train_this;
# Heavy-duty training interface. Backends may drop some safety
# features like journals or synchronous IO to train faster using
# this mode.
$hailo->train("megahal.trn");
$hailo->train($filehandle);
# Make the brain babble
say $hailo->reply("hello good sir.");
# Just say something at random
say $hailo->reply();
=head1 DESCRIPTION
Hailo is a fast and lightweight markov engine intended to replace
L<AI::MegaHAL|AI::MegaHAL>. It has a L<Mouse|Mouse> (or
L<Moose|Moose>) based core with pluggable
L<storage|Hailo::Role::Storage>, L<tokenizer|Hailo::Role::Tokenizer>
and L<engine|Hailo::Role::Engine> backends.
It is similar to MegaHAL in functionality, the main differences (with the
default backends) being better scalability, drastically less memory usage,
an improved tokenizer, and tidier output.
With this distribution, you can create, modify, and query Hailo brains. To
use Hailo in event-driven POE applications, you can use the
L<POE::Component::Hailo|POE::Component::Hailo> wrapper. One example is
L<POE::Component::IRC::Plugin::Hailo|POE::Component::IRC::Plugin::Hailo>,
which implements an IRC chat bot.
=head2 Etymology
I<Hailo> is a portmanteau of I<HAL> (as in MegaHAL) and
L<failo|http://identi.ca/failo>.
=head1 Backends
Hailo supports pluggable L<storage|Hailo::Role::Storage> and
L<tokenizer|Hailo::Role::Tokenizer> backends, it also supports a
pluggable L<UI|Hailo::Role::UI> backend which is used by the L<hailo>
command-line utility.
=head2 Storage
Hailo can currently store its data in either a
L<SQLite|Hailo::Storage::SQLite>,
L<PostgreSQL|Hailo::Storage::PostgreSQL> or
L<MySQL|Hailo::Storage::MySQL> database. Some NoSQL backends were
supported in earlier versions, but they were removed as they had no
redeeming quality.
SQLite is the primary target for Hailo. It's much faster and uses less
resources than the other two. It's highly recommended that you use it.
See L<Hailo::Storage/"Comparison of backends"> for benchmarks showing
how the various backends compare under different workloads, and how
you can create your own.
=head2 Tokenizer
By default Hailo will use L<the word
tokenizer|Hailo::Tokenizer::Words> to split up input by whitespace,
taking into account things like quotes, sentence terminators and more.
There's also a L<the character
tokenizer|Hailo::Tokenizer::Chars>. It's not generally useful for a
conversation bot but can be used to e.g. generate new words given a
list of existing words.
=head1 UPGRADING
Hailo makes no promises about brains generated with earlier versions
being compatable with future version and due to the way Hailo works
there's no practical way to make that promise. Learning in Hailo is
lossy so an accurate conversion is impossible.
If you're maintaining a Hailo brain that you want to keep using you
should save the input you trained it on and re-train when you upgrade.
Hailo is always going to lose information present in the input you
give it. How input tokens get split up and saved to the storage
backend depends on the version of the tokenizer being used and how
that input gets saved to the database.
For instance if an earlier version of Hailo tokenized C<"foo+bar">
simply as C<"foo+bar"> but a later version split that up into
C<"foo", "+", "bar">, then an input of C<"foo+bar are my favorite
metasyntactic variables"> wouldn't take into account the existing
C<"foo+bar"> string in the database.
Tokenizer changes like this would cause the brains to accumulate
garbage and would leave other parts in a state they wouldn't otherwise
have gotten into.
There have been more drastic changes to the database format itself in
the past.
Having said all that the database format and the tokenizer are
relatively stable. At the time of writing 0.33 is the latest release
and it's compatable with brains down to at least 0.17. If you're
upgrading and there isn't a big notice about the storage format being
incompatable in the F<Changes> file your old brains will probably work
just fine.
=head1 ATTRIBUTES
=head2 C<brain>
The name of the brain (file name, database name) to use as storage.
There is no default. Whether this gets used at all depends on the
storage backend, currently only SQLite uses it.
=head2 C<save_on_exit>
A boolean value indicating whether Hailo should save its state before
its object gets destroyed. This defaults to true and will simply call
L<save|/save> at C<DEMOLISH> time.
See L<Hailo::Storage::SQLite/"in_memory"> for how the SQLite backend
uses this option.
=head2 C<order>
The Markov order (chain length) you want to use for an empty brain.
The default is 2.
=head2 C<engine_class>
=head2 C<storage_class>
=head2 C<tokenizer_class>
=head2 C<ui_class>
A a short name name of the class we use for the engine, storage,
tokenizer or ui backends.
By default this is B<Default> for the engine, B<SQLite> for storage,
B<Words> for the tokenizer and B<ReadLine> for the UI. The UI backend
is only used by the L<hailo> command-line interface.
You can only specify the short name of one of the packages Hailo
itself ships with. If you need another class then just prefix the
package with a plus (Catalyst style), e.g. C<+My::Foreign::Tokenizer>.
=head2 C<engine_args>
=head2 C<storage_args>
=head2 C<tokenizer_args>
=head2 C<ui_args>
A C<HashRef> of arguments for engine/storage/tokenizer/ui
backends. See the documentation for the backends for what sort of
arguments they accept.
=head1 METHODS
=head2 C<new>
This is the constructor. It accepts the attributes specified in
L</ATTRIBUTES>.
=head2 C<learn>
Takes a string or an array reference of strings and learns from them.
=head2 C<train>
Takes a filename, filehandle or array reference and learns from all its
lines. If a filename is passed, the file is assumed to be UTF-8 encoded.
Unlike L<C<learn>|/learn>, this method sacrifices some safety (disables
the database journal, fsyncs, etc) for speed while learning.
+You can prove a second parameter which, if true, will use aggressive
+caching while training, which will speed things up considerably for large
+inputs, but will take up quite a bit of memory.
+
=head2 C<reply>
Takes an optional line of text and generates a reply that might be relevant.
=head2 C<learn_reply>
Takes a string argument, learns from it, and generates a reply that
might be relevant. This is equivalent to calling L<learn|/learn>
followed by L<reply|/reply>.
=head2 C<save>
Tells the underlying storage backend to L<save its
state|Hailo::Role::Storage/"save">, any arguments to this method will
be passed as-is to the backend.
=head2 C<stats>
Takes no arguments. Returns the number of tokens, expressions, previous
token links and next token links.
=head1 SUPPORT
You can join the IRC channel I<#hailo> on FreeNode if you have questions.
=head1 BUGS
Bugs, feature requests and other issues are tracked in L<Hailo's RT on
rt.cpan.org|https://rt.cpan.org/Dist/Display.html?Name=Hailo>
=head1 SEE ALSO
=over
=item * L<POE::Component::Hailo> - A non-blocking POE wrapper around Hailo
=item * L<POE::Component::IRC::Plugin::Hailo> - A Hailo IRC bot plugin
=item * L<http://github.com/hinrik/failo> - Failo, an IRC bot that uses Hailo
=item * L<http://github.com/bingos/gumbybrain> - GumbyBRAIN, a more famous IRC bot that uses Hailo
=item * L<Hailo::UI::Web> - A L<Catalyst> and jQuery powered web
interface to Hailo available at L<hailo.nix.is|http://hailo.nix.is>
and as L<hailo-ui-web|http://github.com/avar/hailo-ui-web> on
L<GitHub|http://github.com>
=item * L<HALBot> - Another L<Catalyst> Dojo powered web interface to
Hailo available at L<bifurcat.es|http://bifurcat.es/> and as
L<halbot-on-the-web|http://gitorious.org/halbot-on-the-web/halbot-on-the-web>
at L<gitorious|http://gitorious.org>
=item * L<http://github.com/pteichman/cobe> - cobe, a Python port of MegaHAL "inspired by the success of Hailo"
=back
=head1 LINKS
=over
=item * L<hailo.org|http://hailo.org> - Hailo's website
=item * L<http://bit.ly/hailo_rewrite_of_megahal> - Hailo: A Perl rewrite of
MegaHAL, A blog posting about the motivation behind Hailo
=item * L<http://blogs.perl.org/users/aevar_arnfjor_bjarmason/hailo/> -
More blog posts about Hailo on E<AElig>var ArnfjE<ouml>rE<eth>
Bjarmason's L<blogs.perl.org|http://blogs.perl.org> blog
=item * Hailo on L<freshmeat|http://freshmeat.net/projects/hailo> and
L<ohloh|https://www.ohloh.net/p/hailo>
=back
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/lib/Hailo/Command.pm b/lib/Hailo/Command.pm
index 1c548cb..a6dec41 100644
--- a/lib/Hailo/Command.pm
+++ b/lib/Hailo/Command.pm
@@ -1,429 +1,451 @@
package Hailo::Command;
use 5.010;
use Any::Moose;
use Any::Moose 'X::Getopt';
use Any::Moose 'X::StrictConstructor';
use namespace::clean -except => 'meta';
extends 'Hailo';
with any_moose('X::Getopt::Dashes');
## Our internal Getopts method that Hailo.pm doesn't care about.
has help_flag => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'h',
cmd_flag => 'help',
isa => 'Bool',
is => 'ro',
default => 0,
documentation => "You're soaking it in",
);
has _go_version => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'v',
cmd_flag => 'version',
documentation => 'Print version and exit',
isa => 'Bool',
is => 'ro',
);
has _go_examples => (
traits => [ qw/ Getopt / ],
cmd_flag => 'examples',
documentation => 'Print examples along with the help message',
isa => 'Bool',
is => 'ro',
);
has _go_progress => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'p',
cmd_flag => 'progress',
documentation => 'Display progress during the import',
isa => 'Bool',
is => 'ro',
default => sub {
my ($self) = @_;
$self->_is_interactive();
},
);
has _go_learn => (
traits => [ qw/ Getopt / ],
cmd_aliases => "l",
cmd_flag => "learn",
documentation => "Learn from STRING",
isa => 'Str',
is => "ro",
);
has _go_learn_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "L",
cmd_flag => "learn-reply",
documentation => "Learn from STRING and reply to it",
isa => 'Str',
is => "ro",
);
has _go_train => (
traits => [ qw/ Getopt / ],
cmd_aliases => "t",
cmd_flag => "train",
documentation => "Learn from all the lines in FILE, use - for STDIN",
isa => 'Str',
is => "ro",
);
+has _go_train_fast => (
+ traits => [ qw/ Getopt / ],
+ cmd_aliases => "f",
+ cmd_flag => "train-fast",
+ documentation => "Train with aggressive caching (memory-hungry!)",
+ isa => 'Str',
+ is => "ro",
+);
+
has _go_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "r",
cmd_flag => "reply",
documentation => "Reply to STRING",
isa => 'Str',
is => "ro",
);
has _go_random_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "R",
cmd_flag => "random-reply",
documentation => "Like --reply but takes no STRING; Babble at random",
isa => 'Bool',
is => "ro",
);
has _go_stats => (
traits => [ qw/ Getopt / ],
cmd_aliases => "s",
cmd_flag => "stats",
documentation => "Print statistics about the brain",
isa => 'Bool',
is => "ro",
);
## Things we have to pass to Hailo.pm via triggers when they're set
has _go_autosave => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'a',
cmd_flag => 'autosave',
documentation => 'Save the brain on exit (on by default)',
isa => 'Bool',
is => 'rw',
trigger => sub {
my ($self, $bool) = @_;
$self->save_on_exit($bool);
},
);
has _go_order => (
traits => [ qw/ Getopt / ],
cmd_aliases => "o",
cmd_flag => "order",
documentation => "Markov order; How deep the rabbit hole goes",
isa => 'Int',
is => "rw",
trigger => sub {
my ($self, $order) = @_;
$self->order($order);
},
);
has _go_brain => (
traits => [ qw/ Getopt / ],
cmd_aliases => "b",
cmd_flag => "brain",
documentation => "Load/save brain to/from FILE",
isa => 'Str',
is => "ro",
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
# working classes
has _go_engine_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "E",
cmd_flag => "engine",
isa => 'Str',
is => "rw",
documentation => "Use engine CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->engine_class($class);
},
);
has _go_storage_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "S",
cmd_flag => "storage",
isa => 'Str',
is => "rw",
documentation => "Use storage CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->storage_class($class);
},
);
has _go_tokenizer_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "T",
cmd_flag => "tokenizer",
isa => 'Str',
is => "rw",
documentation => "Use tokenizer CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->tokenizer_class($class);
},
);
has _go_ui_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "u",
cmd_flag => "ui",
isa => 'Str',
is => "rw",
documentation => "Use UI CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->ui_class($class);
},
);
# Stop Hailo from polluting our command-line interface
for (qw/ save_on_exit order brain /, map { qq[${_}_class] } qw/ engine storage tokenizer ui /) {
has "+$_" => (
traits => [ qw/ NoGetopt / ],
);
}
# Check validity of options
before run => sub {
my ($self) = @_;
if (not $self->_storage->ready and
(defined $self->_go_reply or
defined $self->_go_train or
+ defined $self->_go_train_fast or
defined $self->_go_stats or
defined $self->_go_learn or
defined $self->_go_learn_reply or
defined $self->_go_random_reply)) {
# TODO: Make this spew out the --help reply just like hailo
# with invalid options does usually, but only if run via
# ->new_with_options
die "To reply/train/learn/stat you must specify options to initialize your storage backend\n";
}
+ if (defined $self->_go_train and defined $self->_go_train_fast) {
+ die "You can only specify one of --train and --train-fast\n";
+ }
+
return;
};
sub run {
my ($self) = @_;
if ($self->_go_version) {
# Munging strictness because we don't have a version from a
# Git checkout. Dist::Zilla provides it.
no strict 'vars';
my $version = $VERSION // 'dev-git';
say "hailo $version";
return;
}
if ($self->_is_interactive() and
$self->_storage->ready and
not defined $self->_go_train and
+ not defined $self->_go_train_fast and
not defined $self->_go_learn and
not defined $self->_go_reply and
not defined $self->_go_learn_reply and
not defined $self->_go_stats and
not defined $self->_go_random_reply) {
$self->_ui->run($self);
}
$self->train($self->_go_train) if defined $self->_go_train;
+ $self->train($self->_go_train_fast, 1) if defined $self->_go_train_fast;
$self->learn($self->_go_learn) if defined $self->_go_learn;
if (defined $self->_go_learn_reply) {
my $answer = $self->learn_reply($self->_go_learn_reply);
say $answer // "I don't know enough to answer you yet.";
}
if (defined $self->_go_random_reply) {
my $answer = $self->reply();
say $answer // "I don't know enough to answer you yet.";
}
elsif (defined $self->_go_reply) {
my $answer = $self->reply($self->_go_reply);
say $answer // "I don't know enough to answer you yet.";
}
if ($self->_go_stats) {
my ($tok, $ex, $prev, $next) = $self->stats();
my $order = $self->_storage->order;
say "Tokens: $tok";
say "Expression length: $order tokens";
say "Expressions: $ex";
say "Links to preceding tokens: $prev";
say "Links to following tokens: $next";
}
return;
}
override _train_fh => sub {
- my ($self, $fh, $filename) = @_;
+ my ($self, $fh, $fast, $filename) = @_;
if ($self->_go_progress and $self->_is_interactive) {
- $self->train_progress($fh, $filename);
+ $self->train_progress($fh, $fast, $filename);
} else {
super();
}
};
before train_progress => sub {
require Term::Sk;
require File::CountLines;
File::CountLines->import('count_lines');
require Time::HiRes;
Time::HiRes->import(qw(gettimeofday tv_interval));
return;
};
sub train_progress {
- my ($self, $fh, $filename) = @_;
+ my ($self, $fh, $fast, $filename) = @_;
my $lines = count_lines($filename);
my $progress = Term::Sk->new('%d Elapsed: %8t %21b %4p %2d (%c lines of %m)', {
# Start at line 1, not 0
base => 1,
target => $lines,
# Every 0.1 seconds for long files
freq => ($lines < 10_000 ? 10 : 'd'),
# Override Term::Sk's default 100_100 to 100,000
commify => sub {
my $int = shift;
$int = reverse $int;
$int =~ s/(\d{3})(?=\d)(?!\d*\.)/$1,/g;
$int = reverse $int;
return $int;
},
}) or die "Error in Term::Sk->new: (code $Term::Sk::errcode) $Term::Sk::errmsg";
my $next_update = 0;
my $start_time = [gettimeofday()];
my $i = 0; while (my $line = <$fh>) {
$i++;
chomp $line;
- $self->_learn_one($line);
+ $self->_learn_one($line, $fast);
+ $self->_engine->flush_cache if !$fast;
$progress->up;
}
$progress->close;
+ if ($fast) {
+ print "Flushing cache (this may take a while for large inputs)\n";
+ $self->_engine->flush_cache;
+ }
+
my $elapsed = tv_interval($start_time);
say sprintf "Trained from %d lines in %.2f seconds; %.2f lines/s", $i, $elapsed, ($i / $elapsed);
return;
}
# --i--do-not-exist
sub _getopt_spec_exception { goto &_getopt_full_usage }
# --help
sub _getopt_full_usage {
my ($self, $usage, $plain_str) = @_;
# If called from _getopt_spec_exception we get "Unknown option: foo"
my $warning = ref $usage eq 'ARRAY' ? $usage->[0] : undef;
my ($use, $options) = do {
# $plain_str under _getopt_spec_exception
my $out = $plain_str // $usage->text;
# The default getopt order sucks, use reverse sort order
chomp(my @out = split /^/, $out);
my $opt = join "\n", sort { $b cmp $a } @out[1 .. $#out];
($out[0], $opt);
};
my $synopsis = do {
require Pod::Usage;
my $out;
open my $fh, '>', \$out;
no warnings 'once';
my $hailo = File::Spec->catfile($Hailo::Command::HERE_MOMMY, 'hailo');
# Try not to fail on Win32 or other odd systems which might have hailo.pl not hailo
$hailo = ((glob("$hailo*"))[0]) unless -f $hailo;
Pod::Usage::pod2usage(
-input => $hailo,
-sections => 'SYNOPSIS',
-output => $fh,
-exitval => 'noexit',
);
close $fh;
$out =~ s/\n+$//s;
$out =~ s/^Usage:/examples:/;
$out;
};
# Unknown option provided
print $warning if $warning;
print <<"USAGE";
$use
$options
\n\tNote: All input/output and files are assumed to be UTF-8 encoded.
USAGE
# Hack: We can't get at our object from here so we have to inspect
# @ARGV directly.
say "\n", $synopsis if "@ARGV" ~~ /--examples/;
exit 1;
}
__PACKAGE__->meta->make_immutable;
=head1 NAME
Hailo::Command - Class for the L<hailo> command-line interface to L<Hailo>
=head1 DESCRIPTION
This is an internal class L<hailo> uses for its command-line
interface. See L<Hailo> for the public interface.
=head1 PRIVATE METHODS
=head2 C<run>
Run Hailo in accordance with the the attributes that were passed to
it, this method is called by the L<hailo> command-line utility and the
Hailo test suite, its behavior is subject to change.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/lib/Hailo/Engine/Default.pm b/lib/Hailo/Engine/Default.pm
index b1cd7b0..499e4da 100644
--- a/lib/Hailo/Engine/Default.pm
+++ b/lib/Hailo/Engine/Default.pm
@@ -1,382 +1,455 @@
package Hailo::Engine::Default;
use 5.010;
use Any::Moose;
use List::Util qw<min first shuffle>;
use List::MoreUtils qw<uniq>;
with qw[ Hailo::Role::Arguments Hailo::Role::Engine ];
has repeat_limit => (
isa => 'Int',
is => 'rw',
lazy => 1,
default => sub {
my ($self) = @_;
my $order = $self->order;
return min(($order * 10), 50);
}
);
sub BUILD {
my ($self) = @_;
# This performance hack is here because in our tight loops calling
# $self->storage->sth->{...} is actually a significant part of the
# overall program execution time since we're doing two method
# calls and hash dereferences for each call to the database.
my $sth = $self->storage->sth;
while (my ($k, $v) = each %$sth) {
$self->{"_sth_$k"} = $v;
}
return;
}
## no critic (Subroutines::ProhibitExcessComplexity)
sub reply {
my $self = shift;
my $tokens = shift // [];
# we will favor these tokens when making the reply. Shuffle them
# and discard half.
my @key_tokens = do {
my $i = 0;
grep { $i++ % 2 == 0 } shuffle(@$tokens);
};
my $token_cache = $self->_resolve_input_tokens($tokens);
my @key_ids = keys %$token_cache;
# sort the rest by rareness
@key_ids = $self->_find_rare_tokens(\@key_ids, 2);
# get the middle expression
my $pivot_token_id = shift @key_ids;
my ($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
return unless defined $pivot_expr_id; # we don't know any expressions yet
# remove key tokens we're already using
@key_ids = grep { my $used = $_; !first { $_ == $used } @token_ids } @key_ids;
my %expr_cache;
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @token_ids;
return \@output;
}
sub _resolve_input_tokens {
my ($self, $tokens) = @_;
my %token_cache;
if (@$tokens == 1) {
my ($spacing, $text) = @{ $tokens->[0] };
my $token_info = $self->_token_resolve($spacing, $text);
if (defined $token_info) {
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
else {
# when there's just one token, it could be ';' for example,
# which will have normal spacing when it appears alone, but
# suffix spacing in a sentence like "those things; foo, bar",
# so we'll be a bit more lax here by also looking for any
# token that has the same text
$token_info = $self->_token_similar($text);
if (defined $token_info) {
my ($id, $spacing, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
}
else {
for my $token (@$tokens) {
my ($spacing, $text) = @$token;
my $token_info = $self->_token_resolve($spacing, $text);
next if !defined $token_info;
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
return \%token_cache;
}
sub _token_resolve {
my ($self, $spacing, $text) = @_;
$self->{_sth_token_resolve}->execute($spacing, $text);
return $self->{_sth_token_resolve}->fetchrow_arrayref;
}
sub _token_info {
my ($self, $id) = @_;
$self->{_sth_token_info}->execute($id);
my @res = $self->{_sth_token_info}->fetchrow_array;
return \@res;
}
sub learn {
my ($self, $tokens) = @_;
my $order = $self->order;
# only learn from inputs which are long enough
return if @$tokens < $order;
my (%token_cache, %expr_cache);
# resolve/add tokens and update their counter
for my $token (@$tokens) {
my $key = join '', @$token; # the key is "$spacing$text"
if (!exists $token_cache{$key}) {
$token_cache{$key} = $self->_token_id_add($token);
}
- $self->{_sth_inc_token_count}->execute($token_cache{$key});
+ $self->{_sth_inc_token_count}->execute(1, $token_cache{$key});
}
# process every expression of length $order
for my $i (0 .. @$tokens - $order) {
my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
my $key = join('_', @expr);
if (!defined $expr_cache{$key}) {
$expr_cache{$key} = $self->_expr_id_add(\@expr);
}
my $expr_id = $expr_cache{$key};
# add link to next token for this expression, if any
if ($i < @$tokens - $order) {
my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
- $self->_inc_link('next_token', $expr_id, $next_id);
+ $self->_inc_link('next_token', $expr_id, $next_id, 1);
}
# add link to previous token for this expression, if any
if ($i > 0) {
my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
- $self->_inc_link('prev_token', $expr_id, $prev_id);
+ $self->_inc_link('prev_token', $expr_id, $prev_id, 1);
}
# add links to boundary token if appropriate
my $b = $self->storage->_boundary_token_id;
- $self->_inc_link('prev_token', $expr_id, $b) if $i == 0;
- $self->_inc_link('next_token', $expr_id, $b) if $i == @$tokens-$order;
+ $self->_inc_link('prev_token', $expr_id, $b, 1) if $i == 0;
+ $self->_inc_link('next_token', $expr_id, $b, 1) if $i == @$tokens-$order;
}
return;
}
+sub learn_cached {
+ my ($self, $tokens) = @_;
+ my $order = $self->order;
+
+ # only learn from inputs which are long enough
+ return if @$tokens < $order;
+
+ my (%token_cache, %expr_cache);
+
+ # resolve/add tokens and update their counter
+ for my $token (@$tokens) {
+ my $key = join '', @$token; # the key is "$spacing$text"
+ if (!exists $token_cache{$key}) {
+ my $token_id = $self->_token_id_add($token);
+ $token_cache{$key} = $token_id;
+ $self->{_updates}{token_count}{$token_id}++;
+ }
+ }
+
+ # process every expression of length $order
+ for my $i (0 .. @$tokens - $order) {
+ my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
+ my $key = join('_', @expr);
+
+ if (!defined $expr_cache{$key}) {
+ $expr_cache{$key} = $self->_expr_id_add(\@expr);
+ }
+ my $expr_id = $expr_cache{$key};
+
+ # add link to next token for this expression, if any
+ if ($i < @$tokens - $order) {
+ my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
+ $self->{_updates}{next_token}{$expr_id}{$next_id}++;
+ }
+
+ # add link to previous token for this expression, if any
+ if ($i > 0) {
+ my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
+ $self->{_updates}{prev_token}{$expr_id}{$prev_id}++;
+ }
+
+ # add links to boundary token if appropriate
+ my $b = $self->storage->_boundary_token_id;
+ $self->{_updates}{prev_token}{$expr_id}{$b}++ if $i == 0;
+ $self->{_updates}{next_token}{$expr_id}{$b}++ if $i == @$tokens-$order;
+ }
+
+ return;
+}
+
+sub flush_cache {
+ my ($self) = @_;
+
+ my $updates = $self->{_updates};
+ return if !$updates;
+
+ while (my ($token_id, $count) = each %{ $updates->{token_count} }) {
+ $self->{_sth_inc_token_count}->execute($count, $token_id);
+ }
+
+ while (my ($expr_id, $links) = each %{ $updates->{next_token} }) {
+ while (my ($next_token_id, $count) = each %$links) {
+ $self->_inc_link('next_token', $expr_id, $next_token_id, $count);
+ }
+ }
+
+ while (my ($expr_id, $links) = each %{ $updates->{prev_token} }) {
+ while (my ($prev_token_id, $count) = each %$links) {
+ $self->_inc_link('prev_token', $expr_id, $prev_token_id, $count);
+ }
+ }
+}
+
# sort token ids based on how rare they are
sub _find_rare_tokens {
my ($self, $token_ids, $min) = @_;
return unless @$token_ids;
my %links;
for my $id (@$token_ids) {
next if exists $links{$id};
$self->{_sth_token_count}->execute($id);
$links{$id} = $self->{_sth_token_count}->fetchrow_array;
}
# remove tokens which are too rare
my @ids = grep { $links{$_} >= $min } @$token_ids;
@ids = sort { $links{$a} <=> $links{$b} } @ids;
return @ids;
}
# increase the link weight between an expression and a token
sub _inc_link {
- my ($self, $type, $expr_id, $token_id) = @_;
+ my ($self, $type, $expr_id, $token_id, $count) = @_;
- $self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
+ $self->{"_sth_${type}_inc"}->execute($count, $expr_id, $token_id);
if (!$self->{"_sth_${type}_inc"}->rows) {
- $self->{"_sth_${type}_add"}->execute($expr_id, $token_id);
+ $self->{"_sth_${type}_add"}->execute($expr_id, $token_id, $count);
}
return;
}
# look up/add an expression id based on tokens
sub _expr_id_add {
my ($self, $token_ids) = @_;
$self->{_sth_expr_id}->execute(@$token_ids);
my $expr_id = $self->{_sth_expr_id}->fetchrow_array();
return $expr_id if defined $expr_id;
$self->{_sth_add_expr}->execute(@$token_ids);
return $self->storage->dbh->last_insert_id(undef, undef, "expr", undef);
}
# return token id if the token exists
sub _token_id {
my ($self, $token_info) = @_;
$self->{_sth_token_id}->execute(@$token_info);
my $token_id = $self->{_sth_token_id}->fetchrow_array();
return unless defined $token_id;
return $token_id;
}
# get token id (adding the token if it doesn't exist)
sub _token_id_add {
my ($self, $token_info) = @_;
my $token_id = $self->_token_id($token_info);
$token_id = $self->_add_token($token_info) unless defined $token_id;
return $token_id;
}
# return all tokens (regardless of spacing) that consist of this text
sub _token_similar {
my ($self, $token_text) = @_;
$self->{_sth_token_similar}->execute($token_text);
return $self->{_sth_token_similar}->fetchrow_arrayref;
}
# add a new token and return its id
sub _add_token {
my ($self, $token_info) = @_;
$self->{_sth_add_token}->execute(@$token_info);
return $self->storage->dbh->last_insert_id(undef, undef, "token", undef);
}
# return a random expression containing the given token
sub _random_expr {
my ($self, $token_id) = @_;
my $expr;
if (!defined $token_id) {
$self->{_sth_random_expr}->execute();
$expr = $self->{_sth_random_expr}->fetchrow_arrayref();
}
else {
# try the positions in a random order
for my $pos (shuffle 0 .. $self->order-1) {
my $column = "token${pos}_id";
# get a random expression which includes the token at this position
$self->{"_sth_expr_by_$column"}->execute($token_id);
$expr = $self->{"_sth_expr_by_$column"}->fetchrow_arrayref();
last if defined $expr;
}
}
return unless defined $expr;
return @$expr;
}
# return a new next/previous token
sub _pos_token {
my ($self, $pos, $expr_id, $key_tokens) = @_;
$self->{"_sth_${pos}_token_get"}->execute($expr_id);
my $pos_tokens = $self->{"_sth_${pos}_token_get"}->fetchall_arrayref();
if (defined $key_tokens) {
for my $i (0 .. $#{ $key_tokens }) {
my $want_id = $key_tokens->[$i];
my @ids = map { $_->[0] } @$pos_tokens;
my $has_id = grep { $_ == $want_id } @ids;
next unless $has_id;
return splice @$key_tokens, $i, 1;
}
}
my @novel_tokens;
for my $token (@$pos_tokens) {
push @novel_tokens, ($token->[0]) x $token->[1];
}
return $novel_tokens[rand @novel_tokens];
}
sub _construct_reply {
my ($self, $what, $expr_id, $token_ids, $expr_cache, $key_ids) = @_;
my $order = $self->order;
my $repeat_limit = $self->repeat_limit;
my $boundary_token = $self->storage->_boundary_token_id;
my $i = 0;
while (1) {
if (($i % $order) == 0 and
(($i >= $repeat_limit * 3) ||
($i >= $repeat_limit and uniq(@$token_ids) <= $order))) {
last;
}
my $id = $self->_pos_token($what, $expr_id, $key_ids);
last if $id == $boundary_token;
my @ids;
given ($what) {
when ('next') {
push @$token_ids, $id;
@ids = @$token_ids[-$order..-1];
}
when ('prev') {
unshift @$token_ids, $id;
@ids = @$token_ids[0..$order-1];
}
}
my $key = join '_', @ids;
if (!defined $expr_cache->{$key}) {
$expr_cache->{$key} = $self->_expr_id_add(\@ids);
}
$expr_id = $expr_cache->{$key};
} continue {
$i++;
}
return;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Default - The default engine backend for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>.
It generates the reply in one go, while favoring some of the tokens in the
input, and returns it. It is fast and the replies are decent, but you can
get better replies (at the cost of speed) with the
L<Scored|Hailo::Engine::Scored> engine.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/lib/Hailo/Storage/Schema.pm b/lib/Hailo/Storage/Schema.pm
index 4c8470e..2269b2e 100644
--- a/lib/Hailo/Storage/Schema.pm
+++ b/lib/Hailo/Storage/Schema.pm
@@ -1,181 +1,181 @@
package Hailo::Storage::Schema;
use 5.010;
use strict;
## Soup to spawn the database itself / create statement handles
sub deploy {
my (undef, $dbd, $dbh, $order) = @_;
my @orders = (0 .. $order-1);
my $int_primary_key = "INTEGER PRIMARY KEY AUTOINCREMENT";
$int_primary_key = "INTEGER PRIMARY KEY AUTO_INCREMENT" if $dbd eq "mysql";
$int_primary_key = "SERIAL UNIQUE" if $dbd eq "Pg";
my $text = 'TEXT';
$text = 'VARCHAR(255)' if $dbd eq 'mysql';
my $text_primary = 'TEXT NOT NULL PRIMARY KEY';
$text_primary = 'TEXT NOT NULL' if $dbd eq 'mysql';
my @tables;
push @tables => <<"TABLE";
CREATE TABLE info (
attribute $text_primary,
text TEXT NOT NULL
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE token (
id $int_primary_key,
spacing INTEGER NOT NULL,
text $text NOT NULL,
count INTEGER NOT NULL
);
TABLE
my $token_n = join ",\n ", map { "token${_}_id INTEGER NOT NULL REFERENCES token (id)" } @orders;
push @tables => <<"TABLE";
CREATE TABLE expr (
id $int_primary_key,
$token_n
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE next_token (
id $int_primary_key,
expr_id INTEGER NOT NULL REFERENCES expr (id),
token_id INTEGER NOT NULL REFERENCES token (id),
count INTEGER NOT NULL
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE prev_token (
id $int_primary_key,
expr_id INTEGER NOT NULL REFERENCES expr (id),
token_id INTEGER NOT NULL REFERENCES token (id),
count INTEGER NOT NULL
);
TABLE
for my $i (@orders) {
push @tables => "CREATE INDEX expr_token${i}_id on expr (token${i}_id);"
}
my $columns = join(', ', map { "token${_}_id" } @orders);
push @tables => "CREATE INDEX expr_token_ids on expr ($columns);";
push @tables => 'CREATE INDEX token_text on token (text);';
push @tables => 'CREATE INDEX next_token_expr_id ON next_token (expr_id);';
push @tables => 'CREATE INDEX prev_token_expr_id ON prev_token (expr_id);';
for (@tables) {
$dbh->do($_);
}
return;
}
# create statement handle objects
sub sth {
my (undef, $dbd, $dbh, $order) = @_;
my @orders = (0 .. $order-1);
my @columns = map { "token${_}_id" } 0 .. $order-1;
my $columns = join(', ', @columns);
my @ids = join(', ', ('?') x @columns);
my $ids = join(', ', @ids);
my $q_rand = 'RANDOM()';
$q_rand = 'RAND()' if $dbd eq 'mysql';
my $q_rand_id = "(abs($q_rand) % (SELECT max(id) FROM expr))";
$q_rand_id = "(random()*id+1)::int" if $dbd eq 'Pg';
my %state = (
set_info => qq[INSERT INTO info (attribute, text) VALUES (?, ?);],
random_expr => qq[SELECT * FROM expr WHERE id >= $q_rand_id LIMIT 1;],
token_resolve => qq[SELECT id, count FROM token WHERE spacing = ? AND text = ?;],
token_id => qq[SELECT id FROM token WHERE spacing = ? AND text = ?;],
token_info => qq[SELECT spacing, text FROM token WHERE id = ?;],
token_similar => qq[SELECT id, spacing, count FROM token WHERE text = ? ORDER BY $q_rand LIMIT 1;] ,
add_token => qq[INSERT INTO token (spacing, text, count) VALUES (?, ?, 0)],
- inc_token_count => qq[UPDATE token SET count = count + 1 WHERE id = ?],
+ inc_token_count => qq[UPDATE token SET count = count + ? WHERE id = ?],
# ->stats()
expr_total => qq[SELECT COUNT(*) FROM expr;],
token_total => qq[SELECT COUNT(*) FROM token;],
prev_total => qq[SELECT COUNT(*) FROM prev_token;],
next_total => qq[SELECT COUNT(*) FROM next_token;],
# Defaults, overriden in SQLite
last_expr_rowid => qq[SELECT id FROM expr ORDER BY id DESC LIMIT 1;],
last_token_rowid => qq[SELECT id FROM token ORDER BY id DESC LIMIT 1;],
token_count => qq[SELECT count FROM token WHERE id = ?;],
add_expr => qq[INSERT INTO expr ($columns) VALUES ($ids)],
expr_id => qq[SELECT id FROM expr WHERE ] . join(' AND ', map { "token${_}_id = ?" } @orders),
);
for my $table (qw(next_token prev_token)) {
$state{"${table}_links"} = qq[SELECT SUM(count) FROM $table WHERE expr_id = ?;],
$state{"${table}_count"} = qq[SELECT count FROM $table WHERE expr_id = ? AND token_id = ?;],
- $state{"${table}_inc"} = qq[UPDATE $table SET count = count + 1 WHERE expr_id = ? AND token_id = ?],
- $state{"${table}_add"} = qq[INSERT INTO $table (expr_id, token_id, count) VALUES (?, ?, 1);],
+ $state{"${table}_inc"} = qq[UPDATE $table SET count = count + ? WHERE expr_id = ? AND token_id = ?],
+ $state{"${table}_add"} = qq[INSERT INTO $table (expr_id, token_id, count) VALUES (?, ?, ?);],
$state{"${table}_get"} = qq[SELECT token_id, count FROM $table WHERE expr_id = ?;],
}
for (@orders) {
$state{"expr_by_token${_}_id"} = qq[SELECT * FROM expr WHERE token${_}_id = ? ORDER BY $q_rand LIMIT 1;];
}
# DBD specific queries / optimizations / munging
given ($dbd) {
when ('SQLite') {
# Optimize these for SQLite
$state{expr_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'expr';];
$state{token_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'token';];
$state{prev_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'prev_token';];
$state{next_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'next_token';];
}
}
# Sort to make error output easier to read if this fails. The
# order doesn't matter.
my @queries = sort keys %state;
my %sth = map { $_ => $dbh->prepare($state{$_}) } @queries;
return \%sth;
}
1;
=head1 NAME
Hailo::Storage::Schema - Deploy the database schema Hailo uses
=head1 DESCRIPTION
Implements functions to create the database schema and prepared
database queries L<Hailo::Storage> needs.
This class is internal to Hailo and has no public interface.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/command/shell.t b/t/command/shell.t
index 9b32554..b24e44c 100644
--- a/t/command/shell.t
+++ b/t/command/shell.t
@@ -1,100 +1,100 @@
use 5.010;
use strict;
use warnings;
use Test::Script::Run;
-use Test::More tests => 58;
+use Test::More tests => 60;
my $app = 'hailo';
## --examples
{
my ($return, $stdout, $stderr) = run_script( $app, [ '--help', '--examples']);
like($stdout, qr{examples:}, "no examples on normal output");
}
## Basic usage
run_not_ok( $app, [], 'hailo with no options' );
## --version
run_ok( $app, [ '--version' ], 'hailo with --version' );
run_ok( $app, [ '--version' ], 'hailo with --version' );
## --no-help
run_ok( $app, [ '--no-help' ], "Don't help me" );
## --help
{
my ($return, $stdout, $stderr) = run_script( $app, [ '--help' ]);
cmp_ok($return, '==', 1, 'Exit status is correct');
like($stderr, qr/^$/s, 'no stderr');
like($stdout, qr{usage: hailo}, 'Got usage header');
like($stdout, qr{--progress\s+Display progress}, 'Got --progress');
like($stdout, qr{files are assumed to be UTF-8 encoded}, 'Got UTF-8 note');
unlike($stdout, qr{examples:}, "no examples on normal output");
}
# --help matches POD in bin/hailo
{
SKIP: {
my ($return, $stdout, $stderr) = run_script( $app, [ '--help' ]);
my $hailo = 'bin/hailo';
skip "There's no bin/hailo", 1 unless -r $hailo;
my $content = do {
local $/;
open my $fh, "<", $hailo or skip "Couldn't open $hailo: $!", 1;
<$fh>;
};
my $usage = ($content =~ m/^=head1\s+USAGE(.+?)\n^=head1/ms)[0];
$usage =~ s/^\s*//s;
my @usage = split /\n/, $usage;
my @stdout = split /\n/, $stdout;
subtest "bin/hailo POD matches --help" => sub {
TODO: {
local $TODO = 'stdout gets truncated sometimes or something';
for (my $i = 0; $i < @stdout; $i++) {
is($usage[$i], $stdout[$i], "Line #$i of POD usage matched --help");
}
done_testing();
};
}
}
}
{
my ($return, $stdout, $stderr) = run_script( $app, [ '--blah-blah-blah' ]);
cmp_ok($return, '==', 1, 'Exit status is correct');
like($stderr, qr/^$/s, 'no stderr');
like($stdout, qr/Unknown option: blah-blah-blah/, 'Unknown option');
like($stdout, qr{usage: hailo}, 'Got usage header');
like($stdout, qr{--progress\s+Display progress}, 'Got --progress');
like($stdout, qr{files are assumed to be UTF-8 encoded}, 'Got UTF-8 note');
unlike($stdout, qr{examples:}, "no examples on error");
my (@opt) = $stdout =~ /(-[A-Za-z]|--\w+)\b/g;
like($stdout, qr/$_\b/, "stdout contained $_ option") for @opt;
}
## XXX: Doesn't work!
# ## --reply
# {
# $DB::single = 1;
# my ($return, $stdout, $stderr) = run_script( $app, [ '--brain', ':memory:', '--train', '/home/avar/g/hailo/t/command/shell.t', 'my' ]);
# cmp_ok($return, '==', 0, 'Exit status is correct');
# like($stderr, qr/^$/s, 'no stderr');
# ok($stdout, "stdout: $stdout");
# }
# ## --random-reply
# {
# my ($return, $stdout, $stderr) = run_script( $app, [ '--brain', ':memory:', '--train', abs_path(__FILE__), '--random-reply' ]);
# cmp_ok($return, '==', 0, 'Exit status is correct');
# like($stderr, qr/^$/s, 'no stderr');
# ok($stdout, "stdout: $stdout");
# }
|
hinrik/hailo | 245b28325fd86b1765f0afbaa300eaa35dd85a97 | Fix typo in Changes | diff --git a/Changes b/Changes
index 4c31e45..138a782 100644
--- a/Changes
+++ b/Changes
@@ -1,539 +1,539 @@
Revision history for Hailo
{{$NEXT}}
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
* Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- - Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
+ - Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatibility
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
|
hinrik/hailo | d5c8a547ae231a4c0a34f142cabb55531404a913 | Make the Word tokenizer work correctly when the input has newlines | diff --git a/Changes b/Changes
index 47630b6..4c31e45 100644
--- a/Changes
+++ b/Changes
@@ -1,532 +1,533 @@
Revision history for Hailo
{{$NEXT}}
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Word tokenizer:
* Preserve casing of Emacs key sequences like "C-u"
* Don't capitalize words after ellipses (e.g. "Wait... what?")
* When adding a full stop to paragraphs which end with a quoted word,
add it inside the quotes (e.g. "I heard him say 'hello there.'")
+ * Make it work correctly when the input has newlines
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
* Recognize "e.g." and other abbreviations as single tokens.
* Recognize 'â' as a word-separating dash.
* Put a full stop after words separated by dots, like "sources.list".
* Fix capitalization problems caused by "..."
* Capitalize "foo!" and "foo."
* Preserve casing of words such as "POE-Component-IRC"
* Catch "Acme::POE::Tree" as one word, and preserve its casing
* Catch "rollin'" as one word when it makes sense
* Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
* Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 6190f35..e7d04f6 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,226 +1,228 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
-my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
+my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?:\s+|$)|$CLOSE_QUOTE|$TERMINATOR|\s+|$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
- my ($self, $line) = @_;
+ my ($self, $input) = @_;
my @tokens;
- my @chunks = split /\s+/, $line;
+ $input =~ s/$DASH\K\s*\n+\s*//;
+ $input =~ s/\s*\n+\s*/ /gm;
+ my @chunks = split /\s+/, $input;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 93c1f98..69e48e4 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,467 +1,477 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
+ [
+ "I use\nPOE-Component-IRC",
+ [qw<I use POE-Component-IRC>],
+ "I use POE-Component-IRC.",
+ ],
+ [
+ "I use POE-Component- \n IRC",
+ [qw<I use POE-Component-IRC>],
+ "I use POE-Component-IRC.",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 9da75eac89abc1ae35bc82fa16c6f8204dfe8a54 | Separate these from the rest | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index b546ca1..6190f35 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,224 +1,226 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
+
+# special tokens
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 8e84b260dd03e1bad2bc733b0f32f0ea07ff2ab2 | Rename this regex for clarity | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 365f586..b546ca1 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,224 +1,224 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
-my $DECIMAL = qr/[.,]/;
+my $POINT = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
-my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
+my $NUMBER = qr/$POINT\d+(?:$POINT\d+)*|\d+(?:$POINT\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
-my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
+my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$POINT(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 9f44e6b2d7118581c8f1cc0ffa189f0f57c633e0 | Make Changes prettier | diff --git a/Changes b/Changes
index 8110474..47630b6 100644
--- a/Changes
+++ b/Changes
@@ -1,564 +1,563 @@
Revision history for Hailo
{{$NEXT}}
- - Preserve casing of Emacs key sequences like "C-u"
-
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- - Don't capitalize words after ellipses (e.g. "Wait... what?")
-
- - When adding a full stop to paragraphs which end with a quoted word,
- add it inside the quotes (e.g. "I heard him say 'hello there.'")
+ - Word tokenizer:
+ * Preserve casing of Emacs key sequences like "C-u"
+ * Don't capitalize words after ellipses (e.g. "Wait... what?")
+ * When adding a full stop to paragraphs which end with a quoted word,
+ add it inside the quotes (e.g. "I heard him say 'hello there.'")
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
- Recognize "e.g." and other abbreviations as single tokens.
- Recognize 'â' as a word-separating dash.
- Put a full stop after words separated by dots, like "sources.list".
- Fix capitalization problems caused by "..."
- Capitalize "foo!" and "foo."
- Preserve casing of words such as "POE-Component-IRC"
- Catch "Acme::POE::Tree" as one word, and preserve its casing
- Catch "rollin'" as one word when it makes sense
- Catch 'foo-" as one word when it makes sense, and capitalize at
- start of a sentence
- Capitalize quoted words at the start of a line
+ * Recognize "e.g." and other abbreviations as single tokens.
+ * Recognize 'â' as a word-separating dash.
+ * Put a full stop after words separated by dots, like "sources.list".
+ * Fix capitalization problems caused by "..."
+ * Capitalize "foo!" and "foo."
+ * Preserve casing of words such as "POE-Component-IRC"
+ * Catch "Acme::POE::Tree" as one word, and preserve its casing
+ * Catch "rollin'" as one word when it makes sense
+ * Catch 'foo-" as one word when it makes sense, and capitalize at
+ start of a sentence
+ * Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
- Run all the tests Hailo::Test runs internally for each engine
one-by-one using the DBD::SQLite memory driver. This makes sure
the internal tests don't depend on each other in odd ways.
0.29 2010-03-13 10:32:43
- Remove Data::Random as a dependency. It fails the most tests of
all the dists we depend on and we don't really need it for
anything.
0.28 2010-03-13 10:05:57
- Update README.pod which hadn't been bumped since 0.25
- Fix example in Hailo.pm's SYNOPSIS that didn't work and add an
example for a bare ->reply().
- Fix some code perlcritic whined about.
0.27 2010-03-13 09:41:46
- Stop depending on Term::ReadLine::Gnu and use Term::ReadLine
instead. I tested Term::ReadLine once and found that it was
really bad (no history, C-p, C-n etc.) but now with
|
hinrik/hailo | 2b058023b8869ca3bdd40770da913db57838befa | Add full stops inside quotes, not outside them | diff --git a/Changes b/Changes
index 4a6b63c..8110474 100644
--- a/Changes
+++ b/Changes
@@ -1,531 +1,534 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
- Don't capitalize words after ellipses (e.g. "Wait... what?")
+ - When adding a full stop to paragraphs which end with a quoted word,
+ add it inside the quotes (e.g. "I heard him say 'hello there.'")
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 56b9989..365f586 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,224 +1,224 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
- $reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
+ $reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)\K($CLOSE_QUOTE?)$/.$1/o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 052af7c..93c1f98 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,467 +1,467 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
- 'Cpanm is a true "religion".'
+ 'Cpanm is a true "religion."'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
- 'Cpanm is a true "anti-religion".'
+ 'Cpanm is a true "anti-religion."'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
[
"HE'S A NIGGER! HE'S A... wait",
[qw<HE'S A NIGGER ! HE'S A ... wait>],
"HE'S A NIGGER! HE'S A... wait.",
],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 8df023139f7791fd32b0a2d4594ea65784b59a7e | Don't capitalize words after ellipses | diff --git a/Changes b/Changes
index 07b5723..4a6b63c 100644
--- a/Changes
+++ b/Changes
@@ -1,529 +1,531 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
- Speed up learning quite a bit (up to 25%) by using more efficient SQL.
+ - Don't capitalize words after ellipses (e.g. "Wait... what?")
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 89fb735..56b9989 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,224 +1,224 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
-my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
+my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index f75627a..052af7c 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,462 +1,467 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion".'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion".'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
[
"foo----------",
[qw<foo ---------->],
"foo----------",
],
+ [
+ "HE'S A NIGGER! HE'S A... wait",
+ [qw<HE'S A NIGGER ! HE'S A ... wait>],
+ "HE'S A NIGGER! HE'S A... wait.",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
my @after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
@before = gettimeofday();
my $output = $toke->make_output($tokens);
@after = gettimeofday();
cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | bb27a0e3eb026c5048898649ebff09494b27a769 | This isn't really a separator | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index a5ea673..89fb735 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,225 +1,224 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
-my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
- $reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
+ $reply =~ s/(?:$ELLIPSIS|\s+)$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
- $reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
+ $reply =~ s/(?:$ELLIPSIS|\s+|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
- $reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
+ $reply =~ s{(?:(?:$ELLIPSIS|\s+)|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 0f9920c7aa7a728a2dca288293d5b32b396d58fe | Speed up learning quite a bit by using more efficient SQL | diff --git a/Changes b/Changes
index 598e73a..07b5723 100644
--- a/Changes
+++ b/Changes
@@ -1,527 +1,529 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
- Fixed a bug which caused the tokenizer to be very slow at capitalizing
replies which contain things like "script/osm-to-tilenumbers.pl"
+ - Speed up learning quite a bit (up to 25%) by using more efficient SQL.
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
diff --git a/lib/Hailo/Engine/Default.pm b/lib/Hailo/Engine/Default.pm
index caf09f9..b1cd7b0 100644
--- a/lib/Hailo/Engine/Default.pm
+++ b/lib/Hailo/Engine/Default.pm
@@ -1,387 +1,382 @@
package Hailo::Engine::Default;
use 5.010;
use Any::Moose;
use List::Util qw<min first shuffle>;
use List::MoreUtils qw<uniq>;
with qw[ Hailo::Role::Arguments Hailo::Role::Engine ];
has repeat_limit => (
isa => 'Int',
is => 'rw',
lazy => 1,
default => sub {
my ($self) = @_;
my $order = $self->order;
return min(($order * 10), 50);
}
);
sub BUILD {
my ($self) = @_;
# This performance hack is here because in our tight loops calling
# $self->storage->sth->{...} is actually a significant part of the
# overall program execution time since we're doing two method
# calls and hash dereferences for each call to the database.
my $sth = $self->storage->sth;
while (my ($k, $v) = each %$sth) {
$self->{"_sth_$k"} = $v;
}
return;
}
## no critic (Subroutines::ProhibitExcessComplexity)
sub reply {
my $self = shift;
my $tokens = shift // [];
# we will favor these tokens when making the reply. Shuffle them
# and discard half.
my @key_tokens = do {
my $i = 0;
grep { $i++ % 2 == 0 } shuffle(@$tokens);
};
my $token_cache = $self->_resolve_input_tokens($tokens);
my @key_ids = keys %$token_cache;
# sort the rest by rareness
@key_ids = $self->_find_rare_tokens(\@key_ids, 2);
# get the middle expression
my $pivot_token_id = shift @key_ids;
my ($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
return unless defined $pivot_expr_id; # we don't know any expressions yet
# remove key tokens we're already using
@key_ids = grep { my $used = $_; !first { $_ == $used } @token_ids } @key_ids;
my %expr_cache;
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @token_ids;
return \@output;
}
sub _resolve_input_tokens {
my ($self, $tokens) = @_;
my %token_cache;
if (@$tokens == 1) {
my ($spacing, $text) = @{ $tokens->[0] };
my $token_info = $self->_token_resolve($spacing, $text);
if (defined $token_info) {
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
else {
# when there's just one token, it could be ';' for example,
# which will have normal spacing when it appears alone, but
# suffix spacing in a sentence like "those things; foo, bar",
# so we'll be a bit more lax here by also looking for any
# token that has the same text
$token_info = $self->_token_similar($text);
if (defined $token_info) {
my ($id, $spacing, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
}
else {
for my $token (@$tokens) {
my ($spacing, $text) = @$token;
my $token_info = $self->_token_resolve($spacing, $text);
next if !defined $token_info;
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
return \%token_cache;
}
sub _token_resolve {
my ($self, $spacing, $text) = @_;
$self->{_sth_token_resolve}->execute($spacing, $text);
return $self->{_sth_token_resolve}->fetchrow_arrayref;
}
sub _token_info {
my ($self, $id) = @_;
$self->{_sth_token_info}->execute($id);
my @res = $self->{_sth_token_info}->fetchrow_array;
return \@res;
}
sub learn {
my ($self, $tokens) = @_;
my $order = $self->order;
# only learn from inputs which are long enough
return if @$tokens < $order;
my (%token_cache, %expr_cache);
# resolve/add tokens and update their counter
for my $token (@$tokens) {
my $key = join '', @$token; # the key is "$spacing$text"
if (!exists $token_cache{$key}) {
$token_cache{$key} = $self->_token_id_add($token);
}
$self->{_sth_inc_token_count}->execute($token_cache{$key});
}
# process every expression of length $order
for my $i (0 .. @$tokens - $order) {
my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
my $key = join('_', @expr);
if (!defined $expr_cache{$key}) {
$expr_cache{$key} = $self->_expr_id_add(\@expr);
}
my $expr_id = $expr_cache{$key};
# add link to next token for this expression, if any
if ($i < @$tokens - $order) {
my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
$self->_inc_link('next_token', $expr_id, $next_id);
}
# add link to previous token for this expression, if any
if ($i > 0) {
my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
$self->_inc_link('prev_token', $expr_id, $prev_id);
}
# add links to boundary token if appropriate
my $b = $self->storage->_boundary_token_id;
$self->_inc_link('prev_token', $expr_id, $b) if $i == 0;
$self->_inc_link('next_token', $expr_id, $b) if $i == @$tokens-$order;
}
return;
}
# sort token ids based on how rare they are
sub _find_rare_tokens {
my ($self, $token_ids, $min) = @_;
return unless @$token_ids;
my %links;
for my $id (@$token_ids) {
next if exists $links{$id};
$self->{_sth_token_count}->execute($id);
$links{$id} = $self->{_sth_token_count}->fetchrow_array;
}
# remove tokens which are too rare
my @ids = grep { $links{$_} >= $min } @$token_ids;
@ids = sort { $links{$a} <=> $links{$b} } @ids;
return @ids;
}
# increase the link weight between an expression and a token
sub _inc_link {
my ($self, $type, $expr_id, $token_id) = @_;
- $self->{"_sth_${type}_count"}->execute($expr_id, $token_id);
- my $count = $self->{"_sth_${type}_count"}->fetchrow_array;
-
- if (defined $count) {
- $self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
- }
- else {
+ $self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
+ if (!$self->{"_sth_${type}_inc"}->rows) {
$self->{"_sth_${type}_add"}->execute($expr_id, $token_id);
}
return;
}
# look up/add an expression id based on tokens
sub _expr_id_add {
my ($self, $token_ids) = @_;
$self->{_sth_expr_id}->execute(@$token_ids);
my $expr_id = $self->{_sth_expr_id}->fetchrow_array();
return $expr_id if defined $expr_id;
$self->{_sth_add_expr}->execute(@$token_ids);
return $self->storage->dbh->last_insert_id(undef, undef, "expr", undef);
}
# return token id if the token exists
sub _token_id {
my ($self, $token_info) = @_;
$self->{_sth_token_id}->execute(@$token_info);
my $token_id = $self->{_sth_token_id}->fetchrow_array();
return unless defined $token_id;
return $token_id;
}
# get token id (adding the token if it doesn't exist)
sub _token_id_add {
my ($self, $token_info) = @_;
my $token_id = $self->_token_id($token_info);
$token_id = $self->_add_token($token_info) unless defined $token_id;
return $token_id;
}
# return all tokens (regardless of spacing) that consist of this text
sub _token_similar {
my ($self, $token_text) = @_;
$self->{_sth_token_similar}->execute($token_text);
return $self->{_sth_token_similar}->fetchrow_arrayref;
}
# add a new token and return its id
sub _add_token {
my ($self, $token_info) = @_;
$self->{_sth_add_token}->execute(@$token_info);
return $self->storage->dbh->last_insert_id(undef, undef, "token", undef);
}
# return a random expression containing the given token
sub _random_expr {
my ($self, $token_id) = @_;
my $expr;
if (!defined $token_id) {
$self->{_sth_random_expr}->execute();
$expr = $self->{_sth_random_expr}->fetchrow_arrayref();
}
else {
# try the positions in a random order
for my $pos (shuffle 0 .. $self->order-1) {
my $column = "token${pos}_id";
# get a random expression which includes the token at this position
$self->{"_sth_expr_by_$column"}->execute($token_id);
$expr = $self->{"_sth_expr_by_$column"}->fetchrow_arrayref();
last if defined $expr;
}
}
return unless defined $expr;
return @$expr;
}
# return a new next/previous token
sub _pos_token {
my ($self, $pos, $expr_id, $key_tokens) = @_;
$self->{"_sth_${pos}_token_get"}->execute($expr_id);
my $pos_tokens = $self->{"_sth_${pos}_token_get"}->fetchall_arrayref();
if (defined $key_tokens) {
for my $i (0 .. $#{ $key_tokens }) {
my $want_id = $key_tokens->[$i];
my @ids = map { $_->[0] } @$pos_tokens;
my $has_id = grep { $_ == $want_id } @ids;
next unless $has_id;
return splice @$key_tokens, $i, 1;
}
}
my @novel_tokens;
for my $token (@$pos_tokens) {
push @novel_tokens, ($token->[0]) x $token->[1];
}
return $novel_tokens[rand @novel_tokens];
}
sub _construct_reply {
my ($self, $what, $expr_id, $token_ids, $expr_cache, $key_ids) = @_;
my $order = $self->order;
my $repeat_limit = $self->repeat_limit;
my $boundary_token = $self->storage->_boundary_token_id;
my $i = 0;
while (1) {
if (($i % $order) == 0 and
(($i >= $repeat_limit * 3) ||
($i >= $repeat_limit and uniq(@$token_ids) <= $order))) {
last;
}
my $id = $self->_pos_token($what, $expr_id, $key_ids);
last if $id == $boundary_token;
my @ids;
given ($what) {
when ('next') {
push @$token_ids, $id;
@ids = @$token_ids[-$order..-1];
}
when ('prev') {
unshift @$token_ids, $id;
@ids = @$token_ids[0..$order-1];
}
}
my $key = join '_', @ids;
if (!defined $expr_cache->{$key}) {
$expr_cache->{$key} = $self->_expr_id_add(\@ids);
}
$expr_id = $expr_cache->{$key};
} continue {
$i++;
}
return;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Default - The default engine backend for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>.
It generates the reply in one go, while favoring some of the tokens in the
input, and returns it. It is fast and the replies are decent, but you can
get better replies (at the cost of speed) with the
L<Scored|Hailo::Engine::Scored> engine.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | c0c8a5c27d3b1d4e5d0bbfe023523e0e79ea89b7 | Combine two subs into one | diff --git a/lib/Hailo/Engine/Default.pm b/lib/Hailo/Engine/Default.pm
index 84356b1..caf09f9 100644
--- a/lib/Hailo/Engine/Default.pm
+++ b/lib/Hailo/Engine/Default.pm
@@ -1,393 +1,387 @@
package Hailo::Engine::Default;
use 5.010;
use Any::Moose;
use List::Util qw<min first shuffle>;
use List::MoreUtils qw<uniq>;
with qw[ Hailo::Role::Arguments Hailo::Role::Engine ];
has repeat_limit => (
isa => 'Int',
is => 'rw',
lazy => 1,
default => sub {
my ($self) = @_;
my $order = $self->order;
return min(($order * 10), 50);
}
);
sub BUILD {
my ($self) = @_;
# This performance hack is here because in our tight loops calling
# $self->storage->sth->{...} is actually a significant part of the
# overall program execution time since we're doing two method
# calls and hash dereferences for each call to the database.
my $sth = $self->storage->sth;
while (my ($k, $v) = each %$sth) {
$self->{"_sth_$k"} = $v;
}
return;
}
## no critic (Subroutines::ProhibitExcessComplexity)
sub reply {
my $self = shift;
my $tokens = shift // [];
# we will favor these tokens when making the reply. Shuffle them
# and discard half.
my @key_tokens = do {
my $i = 0;
grep { $i++ % 2 == 0 } shuffle(@$tokens);
};
my $token_cache = $self->_resolve_input_tokens($tokens);
my @key_ids = keys %$token_cache;
# sort the rest by rareness
@key_ids = $self->_find_rare_tokens(\@key_ids, 2);
# get the middle expression
my $pivot_token_id = shift @key_ids;
my ($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
return unless defined $pivot_expr_id; # we don't know any expressions yet
# remove key tokens we're already using
@key_ids = grep { my $used = $_; !first { $_ == $used } @token_ids } @key_ids;
my %expr_cache;
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @token_ids;
return \@output;
}
sub _resolve_input_tokens {
my ($self, $tokens) = @_;
my %token_cache;
if (@$tokens == 1) {
my ($spacing, $text) = @{ $tokens->[0] };
my $token_info = $self->_token_resolve($spacing, $text);
if (defined $token_info) {
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
else {
# when there's just one token, it could be ';' for example,
# which will have normal spacing when it appears alone, but
# suffix spacing in a sentence like "those things; foo, bar",
# so we'll be a bit more lax here by also looking for any
# token that has the same text
$token_info = $self->_token_similar($text);
if (defined $token_info) {
my ($id, $spacing, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
}
else {
for my $token (@$tokens) {
my ($spacing, $text) = @$token;
my $token_info = $self->_token_resolve($spacing, $text);
next if !defined $token_info;
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
return \%token_cache;
}
sub _token_resolve {
my ($self, $spacing, $text) = @_;
$self->{_sth_token_resolve}->execute($spacing, $text);
return $self->{_sth_token_resolve}->fetchrow_arrayref;
}
sub _token_info {
my ($self, $id) = @_;
$self->{_sth_token_info}->execute($id);
my @res = $self->{_sth_token_info}->fetchrow_array;
return \@res;
}
sub learn {
my ($self, $tokens) = @_;
my $order = $self->order;
# only learn from inputs which are long enough
return if @$tokens < $order;
my (%token_cache, %expr_cache);
# resolve/add tokens and update their counter
for my $token (@$tokens) {
my $key = join '', @$token; # the key is "$spacing$text"
if (!exists $token_cache{$key}) {
$token_cache{$key} = $self->_token_id_add($token);
}
$self->{_sth_inc_token_count}->execute($token_cache{$key});
}
# process every expression of length $order
for my $i (0 .. @$tokens - $order) {
my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
my $key = join('_', @expr);
if (!defined $expr_cache{$key}) {
- my $expr_id = $self->_expr_id(\@expr);
- $expr_id = $self->_add_expr(\@expr) if !defined $expr_id;
- $expr_cache{$key} = $expr_id;
+ $expr_cache{$key} = $self->_expr_id_add(\@expr);
}
my $expr_id = $expr_cache{$key};
# add link to next token for this expression, if any
if ($i < @$tokens - $order) {
my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
$self->_inc_link('next_token', $expr_id, $next_id);
}
# add link to previous token for this expression, if any
if ($i > 0) {
my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
$self->_inc_link('prev_token', $expr_id, $prev_id);
}
# add links to boundary token if appropriate
my $b = $self->storage->_boundary_token_id;
$self->_inc_link('prev_token', $expr_id, $b) if $i == 0;
$self->_inc_link('next_token', $expr_id, $b) if $i == @$tokens-$order;
}
return;
}
# sort token ids based on how rare they are
sub _find_rare_tokens {
my ($self, $token_ids, $min) = @_;
return unless @$token_ids;
my %links;
for my $id (@$token_ids) {
next if exists $links{$id};
$self->{_sth_token_count}->execute($id);
$links{$id} = $self->{_sth_token_count}->fetchrow_array;
}
# remove tokens which are too rare
my @ids = grep { $links{$_} >= $min } @$token_ids;
@ids = sort { $links{$a} <=> $links{$b} } @ids;
return @ids;
}
# increase the link weight between an expression and a token
sub _inc_link {
my ($self, $type, $expr_id, $token_id) = @_;
$self->{"_sth_${type}_count"}->execute($expr_id, $token_id);
my $count = $self->{"_sth_${type}_count"}->fetchrow_array;
if (defined $count) {
$self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
}
else {
$self->{"_sth_${type}_add"}->execute($expr_id, $token_id);
}
return;
}
-# add new expression to the database
-sub _add_expr {
+# look up/add an expression id based on tokens
+sub _expr_id_add {
my ($self, $token_ids) = @_;
- # add the expression
+ $self->{_sth_expr_id}->execute(@$token_ids);
+ my $expr_id = $self->{_sth_expr_id}->fetchrow_array();
+ return $expr_id if defined $expr_id;
+
$self->{_sth_add_expr}->execute(@$token_ids);
return $self->storage->dbh->last_insert_id(undef, undef, "expr", undef);
}
-# look up an expression id based on tokens
-sub _expr_id {
- my ($self, $tokens) = @_;
- $self->{_sth_expr_id}->execute(@$tokens);
- return $self->{_sth_expr_id}->fetchrow_array();
-}
-
# return token id if the token exists
sub _token_id {
my ($self, $token_info) = @_;
$self->{_sth_token_id}->execute(@$token_info);
my $token_id = $self->{_sth_token_id}->fetchrow_array();
return unless defined $token_id;
return $token_id;
}
# get token id (adding the token if it doesn't exist)
sub _token_id_add {
my ($self, $token_info) = @_;
my $token_id = $self->_token_id($token_info);
$token_id = $self->_add_token($token_info) unless defined $token_id;
return $token_id;
}
# return all tokens (regardless of spacing) that consist of this text
sub _token_similar {
my ($self, $token_text) = @_;
$self->{_sth_token_similar}->execute($token_text);
return $self->{_sth_token_similar}->fetchrow_arrayref;
}
# add a new token and return its id
sub _add_token {
my ($self, $token_info) = @_;
$self->{_sth_add_token}->execute(@$token_info);
return $self->storage->dbh->last_insert_id(undef, undef, "token", undef);
}
# return a random expression containing the given token
sub _random_expr {
my ($self, $token_id) = @_;
my $expr;
if (!defined $token_id) {
$self->{_sth_random_expr}->execute();
$expr = $self->{_sth_random_expr}->fetchrow_arrayref();
}
else {
# try the positions in a random order
for my $pos (shuffle 0 .. $self->order-1) {
my $column = "token${pos}_id";
# get a random expression which includes the token at this position
$self->{"_sth_expr_by_$column"}->execute($token_id);
$expr = $self->{"_sth_expr_by_$column"}->fetchrow_arrayref();
last if defined $expr;
}
}
return unless defined $expr;
return @$expr;
}
# return a new next/previous token
sub _pos_token {
my ($self, $pos, $expr_id, $key_tokens) = @_;
$self->{"_sth_${pos}_token_get"}->execute($expr_id);
my $pos_tokens = $self->{"_sth_${pos}_token_get"}->fetchall_arrayref();
if (defined $key_tokens) {
for my $i (0 .. $#{ $key_tokens }) {
my $want_id = $key_tokens->[$i];
my @ids = map { $_->[0] } @$pos_tokens;
my $has_id = grep { $_ == $want_id } @ids;
next unless $has_id;
return splice @$key_tokens, $i, 1;
}
}
my @novel_tokens;
for my $token (@$pos_tokens) {
push @novel_tokens, ($token->[0]) x $token->[1];
}
return $novel_tokens[rand @novel_tokens];
}
sub _construct_reply {
my ($self, $what, $expr_id, $token_ids, $expr_cache, $key_ids) = @_;
my $order = $self->order;
my $repeat_limit = $self->repeat_limit;
my $boundary_token = $self->storage->_boundary_token_id;
my $i = 0;
while (1) {
if (($i % $order) == 0 and
(($i >= $repeat_limit * 3) ||
($i >= $repeat_limit and uniq(@$token_ids) <= $order))) {
last;
}
my $id = $self->_pos_token($what, $expr_id, $key_ids);
last if $id == $boundary_token;
my @ids;
given ($what) {
when ('next') {
push @$token_ids, $id;
@ids = @$token_ids[-$order..-1];
}
when ('prev') {
unshift @$token_ids, $id;
@ids = @$token_ids[0..$order-1];
}
}
my $key = join '_', @ids;
if (!defined $expr_cache->{$key}) {
- $expr_cache->{$key} = $self->_expr_id(\@ids);
+ $expr_cache->{$key} = $self->_expr_id_add(\@ids);
}
$expr_id = $expr_cache->{$key};
} continue {
$i++;
}
return;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Default - The default engine backend for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>.
It generates the reply in one go, while favoring some of the tokens in the
input, and returns it. It is fast and the replies are decent, but you can
get better replies (at the cost of speed) with the
L<Scored|Hailo::Engine::Scored> engine.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
index af20535..43085b7 100644
--- a/lib/Hailo/Engine/Scored.pm
+++ b/lib/Hailo/Engine/Scored.pm
@@ -1,275 +1,275 @@
package Hailo::Engine::Scored;
use 5.010;
use Any::Moose;
use List::Util qw<sum>;
use List::MoreUtils qw<any>;
use Time::HiRes qw<gettimeofday tv_interval>;
extends 'Hailo::Engine::Default';
after BUILD => sub {
my ($self) = @_;
my %args = $self->arguments;
if (defined $args{iterations} && defined $args{interval}) {
die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
}
return;
};
sub reply {
my $self = shift;
my $tokens = shift // [];
# see if we recognize any of the input tokens
my $token_cache = $self->_resolve_input_tokens($tokens);
my @input_token_ids = keys %$token_cache;
my @token_counts;
# let's select potential pivot tokens
if (keys %$token_cache) {
# we got some known tokens, let's prefer the ones with normal
# spacing, i.e. words instead of things like ',' or '('.
@token_counts = map {
$token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
} keys %$token_cache;
if (!@token_counts) {
# no known words in the input, so we'll settle for the rest
@token_counts = map { [$_, $token_cache->{$_}[2]] } keys %$token_cache;
}
}
my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
my @started = gettimeofday();
my $iterations = 0;
my $done;
my %args = $self->arguments;
if (!defined $args{iterations} && !defined $args{interval}) {
# construct replies for half a second by default
$args{interval} = 0.5;
}
if (defined $args{iterations}) {
$done = sub {
return 1 if $iterations == $args{iterations};
};
}
else {
$done = sub {
my $elapsed = tv_interval(\@started, [gettimeofday]);
return 1 if $elapsed >= $args{interval};
};
}
my (%link_cache, %expr_cache, $best_score, $best_reply);
while (1) {
$iterations++;
my $reply = $self->_generate_reply($token_probs, \%expr_cache);
return if !defined $reply; # we don't know any expressions yet
my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
last if $done->();
next;
}
if (!defined $best_score || $score > $best_score) {
$best_score = $score;
$best_reply = $reply;
}
last if $done->();
}
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @$best_reply;
return \@output;
}
# Calculate the probability we wish to pick each token as the pivot.
# This uses -log2(p) as a method for inverting token probability,
# ensuring that our rarer tokens are picked more often.
sub _get_pivot_probabilites {
my ($self, $token_counts) = @_;
return [] if !@$token_counts;
return [[$token_counts->[0], 1]] if @$token_counts == 1;
# calculate the (non-normalized) probability we want each to occur
my $count_sum = sum(map { $_->[1] } @$token_counts);
my $p = [];
my $p_sum = 0;
for my $token_count (map { $_->[1] } @$token_counts) {
my $token_p = -log(($token_count/$count_sum))/log(2);
push @$p, $token_p;
$p_sum += $token_p;
}
# normalize the probabilities
my @probs = map {
[$token_counts->[$_], $p->[$_] / $p_sum];
} 0..$#{ $token_counts };
return \@probs;
}
sub _generate_reply {
my ($self, $token_probs, $expr_cache) = @_;
my ($pivot_expr_id, @token_ids) = @_;
if (@$token_probs) {
my $pivot_token_id = $self->_choose_pivot($token_probs);
($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
}
else {
($pivot_expr_id, @token_ids) = $self->_random_expr();
return if !defined $pivot_expr_id; # no expressions in the database
}
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
return \@token_ids;
}
sub _evaluate_reply {
my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
my $order = $self->order;
my $score = 0;
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $next_token_id = $reply_token_ids->[$idx];
if (any { $_ == $next_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
my $key = join('_', @expr)."-$next_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
for my $idx (0 .. $#{ $reply_token_ids } - $order) {
my $prev_token_id = $reply_token_ids->[$idx];
if (any { $_ == $prev_token_id } @$input_token_ids) {
my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
my $key = join('_', @expr)."-$prev_token_id";
if (!defined $cache->{$key}) {
$cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
}
if ($cache->{$key} > 0) {
$score -= log($cache->{$key})/log(2);
}
}
}
return $score;
}
sub _expr_token_probability {
my ($self, $pos, $expr, $token_id) = @_;
my $order = $self->order;
- my $expr_id = $self->_expr_id($expr);
+ my $expr_id = $self->_expr_id_add($expr);
$self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
return 0 if !$expr2token;
$self->{"_sth_${pos}_token_links"}->execute($expr_id);
my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
return $expr2token / $expr2all;
}
sub _choose_pivot {
my ($self, $token_probs) = @_;
my $random = rand;
my $p = 0;
for my $token (@$token_probs) {
$p += $token->[1];
return $token->[0][0] if $p > $random;
}
return;
}
sub _too_similar {
my ($self, $input_token_ids, $reply_token_ids) = @_;
my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
for my $reply_token_id (@$reply_token_ids) {
return if !$input_token_ids{$reply_token_id};
}
return 1;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>. It is inherits from
L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
C<reply> method.
It generates multiple replies and applies a scoring algorithm to them, then
returns the best one, similar to MegaHAL.
=head1 ATTRIBUTES
=head2 C<engine_args>
This is a hash reference which can have the following keys:
=head3 C<iterations>
The number of replies to generate before returning the best one.
=head3 C<interval>
The time (in seconds) to spend on generating replies before returning the
best one.
You can not specify both C<iterations> and C<interval> at the same time. If
neither is specified, a default C<interval> of 0.5 seconds will be used.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
This module was based on code from Peter Teichman's Cobe project.
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 2969454f791054c383e2a17c127df96ca2c91e63 | Don't increment the token count so much | diff --git a/lib/Hailo/Engine/Default.pm b/lib/Hailo/Engine/Default.pm
index 671324f..84356b1 100644
--- a/lib/Hailo/Engine/Default.pm
+++ b/lib/Hailo/Engine/Default.pm
@@ -1,393 +1,393 @@
package Hailo::Engine::Default;
use 5.010;
use Any::Moose;
use List::Util qw<min first shuffle>;
use List::MoreUtils qw<uniq>;
with qw[ Hailo::Role::Arguments Hailo::Role::Engine ];
has repeat_limit => (
isa => 'Int',
is => 'rw',
lazy => 1,
default => sub {
my ($self) = @_;
my $order = $self->order;
return min(($order * 10), 50);
}
);
sub BUILD {
my ($self) = @_;
# This performance hack is here because in our tight loops calling
# $self->storage->sth->{...} is actually a significant part of the
# overall program execution time since we're doing two method
# calls and hash dereferences for each call to the database.
my $sth = $self->storage->sth;
while (my ($k, $v) = each %$sth) {
$self->{"_sth_$k"} = $v;
}
return;
}
## no critic (Subroutines::ProhibitExcessComplexity)
sub reply {
my $self = shift;
my $tokens = shift // [];
# we will favor these tokens when making the reply. Shuffle them
# and discard half.
my @key_tokens = do {
my $i = 0;
grep { $i++ % 2 == 0 } shuffle(@$tokens);
};
my $token_cache = $self->_resolve_input_tokens($tokens);
my @key_ids = keys %$token_cache;
# sort the rest by rareness
@key_ids = $self->_find_rare_tokens(\@key_ids, 2);
# get the middle expression
my $pivot_token_id = shift @key_ids;
my ($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
return unless defined $pivot_expr_id; # we don't know any expressions yet
# remove key tokens we're already using
@key_ids = grep { my $used = $_; !first { $_ == $used } @token_ids } @key_ids;
my %expr_cache;
# construct the end of the reply
$self->_construct_reply('next', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# construct the beginning of the reply
$self->_construct_reply('prev', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# translate token ids to token spacing/text
my @output = map {
$token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @token_ids;
return \@output;
}
sub _resolve_input_tokens {
my ($self, $tokens) = @_;
my %token_cache;
if (@$tokens == 1) {
my ($spacing, $text) = @{ $tokens->[0] };
my $token_info = $self->_token_resolve($spacing, $text);
if (defined $token_info) {
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
else {
# when there's just one token, it could be ';' for example,
# which will have normal spacing when it appears alone, but
# suffix spacing in a sentence like "those things; foo, bar",
# so we'll be a bit more lax here by also looking for any
# token that has the same text
$token_info = $self->_token_similar($text);
if (defined $token_info) {
my ($id, $spacing, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
}
else {
for my $token (@$tokens) {
my ($spacing, $text) = @$token;
my $token_info = $self->_token_resolve($spacing, $text);
next if !defined $token_info;
my ($id, $count) = @$token_info;
$token_cache{$id} = [$spacing, $text, $count];
}
}
return \%token_cache;
}
sub _token_resolve {
my ($self, $spacing, $text) = @_;
$self->{_sth_token_resolve}->execute($spacing, $text);
return $self->{_sth_token_resolve}->fetchrow_arrayref;
}
sub _token_info {
my ($self, $id) = @_;
$self->{_sth_token_info}->execute($id);
my @res = $self->{_sth_token_info}->fetchrow_array;
return \@res;
}
sub learn {
my ($self, $tokens) = @_;
my $order = $self->order;
# only learn from inputs which are long enough
return if @$tokens < $order;
my (%token_cache, %expr_cache);
+ # resolve/add tokens and update their counter
for my $token (@$tokens) {
my $key = join '', @$token; # the key is "$spacing$text"
- next if exists $token_cache{$key};
- $token_cache{$key} = $self->_token_id_add($token);
+ if (!exists $token_cache{$key}) {
+ $token_cache{$key} = $self->_token_id_add($token);
+ }
+ $self->{_sth_inc_token_count}->execute($token_cache{$key});
}
# process every expression of length $order
for my $i (0 .. @$tokens - $order) {
my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
my $key = join('_', @expr);
if (!defined $expr_cache{$key}) {
my $expr_id = $self->_expr_id(\@expr);
- if (!defined $expr_id) {
- $expr_id = $self->_add_expr(\@expr);
- $self->{_sth_inc_token_count}->execute($_) for uniq(@expr);
- }
+ $expr_id = $self->_add_expr(\@expr) if !defined $expr_id;
$expr_cache{$key} = $expr_id;
}
my $expr_id = $expr_cache{$key};
# add link to next token for this expression, if any
if ($i < @$tokens - $order) {
my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
$self->_inc_link('next_token', $expr_id, $next_id);
}
# add link to previous token for this expression, if any
if ($i > 0) {
my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
$self->_inc_link('prev_token', $expr_id, $prev_id);
}
# add links to boundary token if appropriate
my $b = $self->storage->_boundary_token_id;
$self->_inc_link('prev_token', $expr_id, $b) if $i == 0;
$self->_inc_link('next_token', $expr_id, $b) if $i == @$tokens-$order;
}
return;
}
# sort token ids based on how rare they are
sub _find_rare_tokens {
my ($self, $token_ids, $min) = @_;
return unless @$token_ids;
my %links;
for my $id (@$token_ids) {
next if exists $links{$id};
$self->{_sth_token_count}->execute($id);
$links{$id} = $self->{_sth_token_count}->fetchrow_array;
}
# remove tokens which are too rare
my @ids = grep { $links{$_} >= $min } @$token_ids;
@ids = sort { $links{$a} <=> $links{$b} } @ids;
return @ids;
}
# increase the link weight between an expression and a token
sub _inc_link {
my ($self, $type, $expr_id, $token_id) = @_;
$self->{"_sth_${type}_count"}->execute($expr_id, $token_id);
my $count = $self->{"_sth_${type}_count"}->fetchrow_array;
if (defined $count) {
$self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
}
else {
$self->{"_sth_${type}_add"}->execute($expr_id, $token_id);
}
return;
}
# add new expression to the database
sub _add_expr {
my ($self, $token_ids) = @_;
# add the expression
$self->{_sth_add_expr}->execute(@$token_ids);
return $self->storage->dbh->last_insert_id(undef, undef, "expr", undef);
}
# look up an expression id based on tokens
sub _expr_id {
my ($self, $tokens) = @_;
$self->{_sth_expr_id}->execute(@$tokens);
return $self->{_sth_expr_id}->fetchrow_array();
}
# return token id if the token exists
sub _token_id {
my ($self, $token_info) = @_;
$self->{_sth_token_id}->execute(@$token_info);
my $token_id = $self->{_sth_token_id}->fetchrow_array();
return unless defined $token_id;
return $token_id;
}
# get token id (adding the token if it doesn't exist)
sub _token_id_add {
my ($self, $token_info) = @_;
my $token_id = $self->_token_id($token_info);
$token_id = $self->_add_token($token_info) unless defined $token_id;
return $token_id;
}
# return all tokens (regardless of spacing) that consist of this text
sub _token_similar {
my ($self, $token_text) = @_;
$self->{_sth_token_similar}->execute($token_text);
return $self->{_sth_token_similar}->fetchrow_arrayref;
}
# add a new token and return its id
sub _add_token {
my ($self, $token_info) = @_;
$self->{_sth_add_token}->execute(@$token_info);
return $self->storage->dbh->last_insert_id(undef, undef, "token", undef);
}
# return a random expression containing the given token
sub _random_expr {
my ($self, $token_id) = @_;
my $expr;
if (!defined $token_id) {
$self->{_sth_random_expr}->execute();
$expr = $self->{_sth_random_expr}->fetchrow_arrayref();
}
else {
# try the positions in a random order
for my $pos (shuffle 0 .. $self->order-1) {
my $column = "token${pos}_id";
# get a random expression which includes the token at this position
$self->{"_sth_expr_by_$column"}->execute($token_id);
$expr = $self->{"_sth_expr_by_$column"}->fetchrow_arrayref();
last if defined $expr;
}
}
return unless defined $expr;
return @$expr;
}
# return a new next/previous token
sub _pos_token {
my ($self, $pos, $expr_id, $key_tokens) = @_;
$self->{"_sth_${pos}_token_get"}->execute($expr_id);
my $pos_tokens = $self->{"_sth_${pos}_token_get"}->fetchall_arrayref();
if (defined $key_tokens) {
for my $i (0 .. $#{ $key_tokens }) {
my $want_id = $key_tokens->[$i];
my @ids = map { $_->[0] } @$pos_tokens;
my $has_id = grep { $_ == $want_id } @ids;
next unless $has_id;
return splice @$key_tokens, $i, 1;
}
}
my @novel_tokens;
for my $token (@$pos_tokens) {
push @novel_tokens, ($token->[0]) x $token->[1];
}
return $novel_tokens[rand @novel_tokens];
}
sub _construct_reply {
my ($self, $what, $expr_id, $token_ids, $expr_cache, $key_ids) = @_;
my $order = $self->order;
my $repeat_limit = $self->repeat_limit;
my $boundary_token = $self->storage->_boundary_token_id;
my $i = 0;
while (1) {
if (($i % $order) == 0 and
(($i >= $repeat_limit * 3) ||
($i >= $repeat_limit and uniq(@$token_ids) <= $order))) {
last;
}
my $id = $self->_pos_token($what, $expr_id, $key_ids);
last if $id == $boundary_token;
my @ids;
given ($what) {
when ('next') {
push @$token_ids, $id;
@ids = @$token_ids[-$order..-1];
}
when ('prev') {
unshift @$token_ids, $id;
@ids = @$token_ids[0..$order-1];
}
}
my $key = join '_', @ids;
if (!defined $expr_cache->{$key}) {
$expr_cache->{$key} = $self->_expr_id(\@ids);
}
$expr_id = $expr_cache->{$key};
} continue {
$i++;
}
return;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Default - The default engine backend for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>.
It generates the reply in one go, while favoring some of the tokens in the
input, and returns it. It is fast and the replies are decent, but you can
get better replies (at the cost of speed) with the
L<Scored|Hailo::Engine::Scored> engine.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | eae214f3be6e4f4f8ab94170b941c28a706daed7 | Fix capitalization slowdown | diff --git a/Changes b/Changes
index 8754353..598e73a 100644
--- a/Changes
+++ b/Changes
@@ -1,524 +1,527 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
+ - Fixed a bug which caused the tokenizer to be very slow at capitalizing
+ replies which contain things like "script/osm-to-tilenumbers.pl"
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index c99c0a2..a5ea673 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,225 +1,225 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)*||$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE))*/;
+my $LOOSE_WORD = qr/$WORD_TYPES|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)|$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE)|$DASH(?!$DASH{2}))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index df29974..f75627a 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,449 +1,462 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
+use Time::HiRes qw<gettimeofday tv_interval>;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion".'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion".'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
[
"It's as simple as C-u C-c C-t C-t t",
[qw<it's as simple as C-u C-c C-t C-t t>],
"It's as simple as C-u C-c C-t C-t t.",
],
+ [
+ "foo----------",
+ [qw<foo ---------->],
+ "foo----------",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
+ my @before = gettimeofday();
my $tokens = $toke->make_tokens($test->[0]);
+ my @after = gettimeofday();
+ cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Tokenizing in under <1 second');
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
+
+ @before = gettimeofday();
my $output = $toke->make_output($tokens);
+ @after = gettimeofday();
+ cmp_ok(tv_interval(\@before, \@after), '<', 1, 'Making output in <1 second');
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | f07521bb1a175d5a58054fa32e54bebb4d0fc51d | Shorten this | diff --git a/lib/Hailo/Storage/Schema.pm b/lib/Hailo/Storage/Schema.pm
index 6f606e8..4c8470e 100644
--- a/lib/Hailo/Storage/Schema.pm
+++ b/lib/Hailo/Storage/Schema.pm
@@ -1,184 +1,181 @@
package Hailo::Storage::Schema;
use 5.010;
use strict;
## Soup to spawn the database itself / create statement handles
sub deploy {
my (undef, $dbd, $dbh, $order) = @_;
my @orders = (0 .. $order-1);
my $int_primary_key = "INTEGER PRIMARY KEY AUTOINCREMENT";
$int_primary_key = "INTEGER PRIMARY KEY AUTO_INCREMENT" if $dbd eq "mysql";
$int_primary_key = "SERIAL UNIQUE" if $dbd eq "Pg";
my $text = 'TEXT';
$text = 'VARCHAR(255)' if $dbd eq 'mysql';
my $text_primary = 'TEXT NOT NULL PRIMARY KEY';
$text_primary = 'TEXT NOT NULL' if $dbd eq 'mysql';
my @tables;
push @tables => <<"TABLE";
CREATE TABLE info (
attribute $text_primary,
text TEXT NOT NULL
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE token (
id $int_primary_key,
spacing INTEGER NOT NULL,
text $text NOT NULL,
count INTEGER NOT NULL
);
TABLE
my $token_n = join ",\n ", map { "token${_}_id INTEGER NOT NULL REFERENCES token (id)" } @orders;
push @tables => <<"TABLE";
CREATE TABLE expr (
id $int_primary_key,
$token_n
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE next_token (
id $int_primary_key,
expr_id INTEGER NOT NULL REFERENCES expr (id),
token_id INTEGER NOT NULL REFERENCES token (id),
count INTEGER NOT NULL
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE prev_token (
id $int_primary_key,
expr_id INTEGER NOT NULL REFERENCES expr (id),
token_id INTEGER NOT NULL REFERENCES token (id),
count INTEGER NOT NULL
);
TABLE
for my $i (@orders) {
push @tables => "CREATE INDEX expr_token${i}_id on expr (token${i}_id);"
}
my $columns = join(', ', map { "token${_}_id" } @orders);
push @tables => "CREATE INDEX expr_token_ids on expr ($columns);";
push @tables => 'CREATE INDEX token_text on token (text);';
push @tables => 'CREATE INDEX next_token_expr_id ON next_token (expr_id);';
push @tables => 'CREATE INDEX prev_token_expr_id ON prev_token (expr_id);';
for (@tables) {
$dbh->do($_);
}
return;
}
# create statement handle objects
sub sth {
my (undef, $dbd, $dbh, $order) = @_;
my @orders = (0 .. $order-1);
my @columns = map { "token${_}_id" } 0 .. $order-1;
my $columns = join(', ', @columns);
my @ids = join(', ', ('?') x @columns);
my $ids = join(', ', @ids);
my $q_rand = 'RANDOM()';
$q_rand = 'RAND()' if $dbd eq 'mysql';
my $q_rand_id = "(abs($q_rand) % (SELECT max(id) FROM expr))";
$q_rand_id = "(random()*id+1)::int" if $dbd eq 'Pg';
my %state = (
set_info => qq[INSERT INTO info (attribute, text) VALUES (?, ?);],
random_expr => qq[SELECT * FROM expr WHERE id >= $q_rand_id LIMIT 1;],
token_resolve => qq[SELECT id, count FROM token WHERE spacing = ? AND text = ?;],
token_id => qq[SELECT id FROM token WHERE spacing = ? AND text = ?;],
token_info => qq[SELECT spacing, text FROM token WHERE id = ?;],
token_similar => qq[SELECT id, spacing, count FROM token WHERE text = ? ORDER BY $q_rand LIMIT 1;] ,
add_token => qq[INSERT INTO token (spacing, text, count) VALUES (?, ?, 0)],
inc_token_count => qq[UPDATE token SET count = count + 1 WHERE id = ?],
# ->stats()
expr_total => qq[SELECT COUNT(*) FROM expr;],
token_total => qq[SELECT COUNT(*) FROM token;],
prev_total => qq[SELECT COUNT(*) FROM prev_token;],
next_total => qq[SELECT COUNT(*) FROM next_token;],
# Defaults, overriden in SQLite
last_expr_rowid => qq[SELECT id FROM expr ORDER BY id DESC LIMIT 1;],
last_token_rowid => qq[SELECT id FROM token ORDER BY id DESC LIMIT 1;],
- next_token_links => qq[SELECT SUM(count) FROM next_token WHERE expr_id = ?;],
- prev_token_links => qq[SELECT SUM(count) FROM prev_token WHERE expr_id = ?;],
- next_token_count => qq[SELECT count FROM next_token WHERE expr_id = ? AND token_id = ?;],
- prev_token_count => qq[SELECT count FROM prev_token WHERE expr_id = ? AND token_id = ?;],
- next_token_inc => qq[UPDATE next_token SET count = count + 1 WHERE expr_id = ? AND token_id = ?],
- prev_token_inc => qq[UPDATE prev_token SET count = count + 1 WHERE expr_id = ? AND token_id = ?],
- next_token_add => qq[INSERT INTO next_token (expr_id, token_id, count) VALUES (?, ?, 1);],
- prev_token_add => qq[INSERT INTO prev_token (expr_id, token_id, count) VALUES (?, ?, 1);],
- next_token_get => qq[SELECT token_id, count FROM next_token WHERE expr_id = ?;],
- prev_token_get => qq[SELECT token_id, count FROM prev_token WHERE expr_id = ?;],
-
token_count => qq[SELECT count FROM token WHERE id = ?;],
add_expr => qq[INSERT INTO expr ($columns) VALUES ($ids)],
expr_id => qq[SELECT id FROM expr WHERE ] . join(' AND ', map { "token${_}_id = ?" } @orders),
);
+ for my $table (qw(next_token prev_token)) {
+ $state{"${table}_links"} = qq[SELECT SUM(count) FROM $table WHERE expr_id = ?;],
+ $state{"${table}_count"} = qq[SELECT count FROM $table WHERE expr_id = ? AND token_id = ?;],
+ $state{"${table}_inc"} = qq[UPDATE $table SET count = count + 1 WHERE expr_id = ? AND token_id = ?],
+ $state{"${table}_add"} = qq[INSERT INTO $table (expr_id, token_id, count) VALUES (?, ?, 1);],
+ $state{"${table}_get"} = qq[SELECT token_id, count FROM $table WHERE expr_id = ?;],
+ }
+
for (@orders) {
$state{"expr_by_token${_}_id"} = qq[SELECT * FROM expr WHERE token${_}_id = ? ORDER BY $q_rand LIMIT 1;];
}
# DBD specific queries / optimizations / munging
given ($dbd) {
when ('SQLite') {
# Optimize these for SQLite
$state{expr_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'expr';];
$state{token_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'token';];
$state{prev_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'prev_token';];
$state{next_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'next_token';];
}
}
# Sort to make error output easier to read if this fails. The
# order doesn't matter.
my @queries = sort keys %state;
my %sth = map { $_ => $dbh->prepare($state{$_}) } @queries;
return \%sth;
}
1;
=head1 NAME
Hailo::Storage::Schema - Deploy the database schema Hailo uses
=head1 DESCRIPTION
Implements functions to create the database schema and prepared
database queries L<Hailo::Storage> needs.
This class is internal to Hailo and has no public interface.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 7d786df4722931a41db515e1f54c762898fae2c1 | Fix Changes formatting | diff --git a/Changes b/Changes
index 4acc64e..8754353 100644
--- a/Changes
+++ b/Changes
@@ -1,527 +1,528 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
- Speed up the learning of repetitive sentences by caching more
- Added Hailo::Engine::Scored, which generates multiple replies (limited
by time or number of iterations) and returns the best one. Based on
code from Peter Teichman's Cobe project.
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
+
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
|
hinrik/hailo | 93aa4bc37614591e14bf85e1aa2e59a837a00f14 | Add an engine which creates replies like MegaHAL | diff --git a/Changes b/Changes
index d8f37dc..4acc64e 100644
--- a/Changes
+++ b/Changes
@@ -1,520 +1,524 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
- Speed up the learning of repetitive sentences by caching more
+ - Added Hailo::Engine::Scored, which generates multiple replies (limited
+ by time or number of iterations) and returns the best one. Based on
+ code from Peter Teichman's Cobe project.
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
diff --git a/lib/Hailo.pm b/lib/Hailo.pm
index e006d46..617b977 100644
--- a/lib/Hailo.pm
+++ b/lib/Hailo.pm
@@ -1,526 +1,527 @@
package Hailo;
use 5.010;
use autodie qw(open close);
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use File::Glob ':glob';
use Class::Load qw(try_load_class);
use Scalar::Util qw(blessed);
use List::Util qw(first);
use namespace::clean -except => 'meta';
use constant PLUGINS => [ qw[
Hailo::Engine::Default
+ Hailo::Engine::Scored
Hailo::Storage::MySQL
Hailo::Storage::PostgreSQL
Hailo::Storage::SQLite
Hailo::Tokenizer::Chars
Hailo::Tokenizer::Words
Hailo::UI::ReadLine
] ];
has brain => (
isa => 'Str',
is => 'rw',
);
has order => (
isa => 'Int',
is => 'rw',
default => 2,
trigger => sub {
my ($self, $order) = @_;
$self->_custom_order(1);
},
);
has _custom_order => (
isa => 'Bool',
is => 'rw',
default => 0,
init_arg => undef,
documentation => "Here so we can differentiate between the default value of order being explictly set and being set by default",
);
has _custom_tokenizer_class => (
isa => 'Bool',
is => 'rw',
default => 0,
init_arg => undef,
documentation => "Here so we can differentiate between the default value of tokenizer_class being explictly set and being set by default",
);
has save_on_exit => (
isa => 'Bool',
is => 'rw',
default => 1,
);
has brain_resource => (
documentation => "Alias for `brain' for backwards compatibility",
isa => 'Str',
is => 'rw',
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
sub BUILD {
my ($self) = @_;
my $brain = $self->brain;
return if !defined $brain;
$self->brain(bsd_glob($brain));
return;
}
my %has = (
engine => {
name => 'Engine',
default => 'Default',
},
storage => {
name => 'Storage',
default => 'SQLite',
},
tokenizer => {
name => 'Tokenizer',
default => 'Words',
},
ui => {
name => 'UI',
default => 'ReadLine',
},
);
for my $k (keys %has) {
my $name = $has{$k}->{name};
my $default = $has{$k}->{default};
my $method_class = "${k}_class";
my $method_args = "${k}_args";
# working classes
has "${k}_class" => (
isa => 'Str',
is => "rw",
default => $default,
($k ~~ 'tokenizer'
? (trigger => sub {
my ($self, $class) = @_;
$self->_custom_tokenizer_class(1);
})
: ())
);
# Object arguments
has "${k}_args" => (
documentation => "Arguments for the $name class",
isa => 'HashRef',
is => "ro",
default => sub { +{} },
);
# Working objects
has "_${k}" => (
does => "Hailo::Role::$name",
lazy_build => 1,
is => 'ro',
init_arg => undef,
);
# Generate the object itself
no strict 'refs';
*{"_build__${k}"} = sub {
my ($self) = @_;
my $obj = $self->_new_class(
$name,
$self->$method_class,
{
arguments => $self->$method_args,
($k ~~ [ qw< engine storage > ]
? (order => $self->order)
: ()),
($k ~~ [ qw< engine > ]
? (storage => $self->_storage)
: ()),
(($k ~~ [ qw< storage > ] and defined $self->brain)
? (
hailo => do {
require Scalar::Util;
Scalar::Util::weaken(my $s = $self);
my %callback = (
has_custom_order => sub { $s->_custom_order },
has_custom_tokenizer_class => sub { $s->_custom_tokenizer_class },
set_order => sub {
my ($db_order) = @_;
$s->order($db_order);
$s->_engine->order($db_order);
},
set_tokenizer_class => sub {
my ($db_tokenizer_class) = @_;
$s->tokenizer_class($db_tokenizer_class);
},
);
\%callback;
},
brain => $self->brain
)
: ()),
(($k ~~ [ qw< storage > ]
? (tokenizer_class => $self->tokenizer_class)
: ()))
},
);
return $obj;
};
}
sub _new_class {
my ($self, $type, $class, $args) = @_;
my $pkg;
if ($class =~ m[^\+(?<custom_plugin>.+)$]) {
$pkg = $+{custom_plugin};
} else {
my @plugins = @{ $self->PLUGINS };
# Be fuzzy about includes, e.g. DBD::SQLite or SQLite or sqlite will go
$pkg = first { / $type : .* : $class /ix }
sort { length $a <=> length $b }
@plugins;
unless ($pkg) {
local $" = ', ';
my @p = grep { /$type/ } @plugins;
die "Couldn't find a class name matching '$class' in plugins '@p'";
}
}
my ($success, $error) = try_load_class($pkg);
die $error if !$success;
return $pkg->new(%$args);
}
sub save {
my ($self, @args) = @_;
$self->_storage->save(@args);
return;
}
sub train {
my ($self, $input) = @_;
$self->_storage->start_training();
given ($input) {
# With STDIN
when (not ref and defined and $_ eq '-') {
die "You must provide STDIN when training from '-'" if $self->_is_interactive(*STDIN);
$self->_train_fh(*STDIN);
}
# With a filehandle
when (ref eq 'GLOB') {
$self->_train_fh($input);
}
# With a file
when (not ref) {
open my $fh, '<:encoding(utf8)', $input;
$self->_train_fh($fh, $input);
}
# With an Array
when (ref eq 'ARRAY') {
$self->_learn_one($_) for @$input;
}
# With something naughty
default {
die "Unknown input: $input";
}
}
$self->_storage->stop_training();
return;
}
sub _train_fh {
my ($self, $fh, $filename) = @_;
while (my $line = <$fh>) {
chomp $line;
$self->_learn_one($line);
}
return;
}
sub learn {
my ($self, $input) = @_;
my $inputs;
given ($input) {
when (not defined) {
die "Cannot learn from undef input";
}
when (not ref) {
$inputs = [$input];
}
# With an Array
when (ref eq 'ARRAY') {
$inputs = $input
}
default {
die "Unknown input: $input";
}
}
my $storage = $self->_storage;
$storage->start_learning();
$self->_learn_one($_) for @$inputs;
$storage->stop_learning();
return;
}
sub _learn_one {
my ($self, $input) = @_;
my $engine = $self->_engine;
my $tokens = $self->_tokenizer->make_tokens($input);
$engine->learn($tokens);
return;
}
sub learn_reply {
my ($self, $input) = @_;
$self->learn($input);
return $self->reply($input);
}
sub reply {
my ($self, $input) = @_;
my $storage = $self->_storage;
# start_training() hasn't been called so we can't guarentee that
# the storage has been engaged at this point. This must be called
# before ->_engine() is called anywhere to ensure that the
# lazy-loading in the engine works.
$storage->_engage() unless $storage->_engaged;
my $engine = $self->_engine;
my $tokenizer = $self->_tokenizer;
my $reply;
if (defined $input) {
my $tokens = $tokenizer->make_tokens($input);
$reply = $engine->reply($tokens);
}
else {
$reply = $engine->reply();
}
return unless defined $reply;
return $tokenizer->make_output($reply);
}
sub stats {
my ($self) = @_;
return $self->_storage->totals();
}
sub DEMOLISH {
my ($self) = @_;
$self->save() if blessed $self->{_storage} and $self->save_on_exit;
return;
}
sub _is_interactive {
require IO::Interactive;
return IO::Interactive::is_interactive();
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo - A pluggable Markov engine analogous to MegaHAL
=head1 SYNOPSIS
This is the synopsis for using Hailo as a module. See L<hailo> for
command-line invocation.
# Hailo requires Perl 5.10
use 5.010;
use Any::Moose;
use Hailo;
# Construct a new in-memory Hailo using the SQLite backend. See
# backend documentation for other options.
my $hailo = Hailo->new;
# Various ways to learn
my @train_this = ("I like big butts", "and I can not lie");
$hailo->learn(\@train_this);
$hailo->learn($_) for @train_this;
# Heavy-duty training interface. Backends may drop some safety
# features like journals or synchronous IO to train faster using
# this mode.
$hailo->train("megahal.trn");
$hailo->train($filehandle);
# Make the brain babble
say $hailo->reply("hello good sir.");
# Just say something at random
say $hailo->reply();
=head1 DESCRIPTION
Hailo is a fast and lightweight markov engine intended to replace
L<AI::MegaHAL|AI::MegaHAL>. It has a L<Mouse|Mouse> (or
L<Moose|Moose>) based core with pluggable
L<storage|Hailo::Role::Storage>, L<tokenizer|Hailo::Role::Tokenizer>
and L<engine|Hailo::Role::Engine> backends.
It is similar to MegaHAL in functionality, the main differences (with the
default backends) being better scalability, drastically less memory usage,
an improved tokenizer, and tidier output.
With this distribution, you can create, modify, and query Hailo brains. To
use Hailo in event-driven POE applications, you can use the
L<POE::Component::Hailo|POE::Component::Hailo> wrapper. One example is
L<POE::Component::IRC::Plugin::Hailo|POE::Component::IRC::Plugin::Hailo>,
which implements an IRC chat bot.
=head2 Etymology
I<Hailo> is a portmanteau of I<HAL> (as in MegaHAL) and
L<failo|http://identi.ca/failo>.
=head1 Backends
Hailo supports pluggable L<storage|Hailo::Role::Storage> and
L<tokenizer|Hailo::Role::Tokenizer> backends, it also supports a
pluggable L<UI|Hailo::Role::UI> backend which is used by the L<hailo>
command-line utility.
=head2 Storage
Hailo can currently store its data in either a
L<SQLite|Hailo::Storage::SQLite>,
L<PostgreSQL|Hailo::Storage::PostgreSQL> or
L<MySQL|Hailo::Storage::MySQL> database. Some NoSQL backends were
supported in earlier versions, but they were removed as they had no
redeeming quality.
SQLite is the primary target for Hailo. It's much faster and uses less
resources than the other two. It's highly recommended that you use it.
See L<Hailo::Storage/"Comparison of backends"> for benchmarks showing
how the various backends compare under different workloads, and how
you can create your own.
=head2 Tokenizer
By default Hailo will use L<the word
tokenizer|Hailo::Tokenizer::Words> to split up input by whitespace,
taking into account things like quotes, sentence terminators and more.
There's also a L<the character
tokenizer|Hailo::Tokenizer::Chars>. It's not generally useful for a
conversation bot but can be used to e.g. generate new words given a
list of existing words.
=head1 UPGRADING
Hailo makes no promises about brains generated with earlier versions
being compatable with future version and due to the way Hailo works
there's no practical way to make that promise. Learning in Hailo is
lossy so an accurate conversion is impossible.
If you're maintaining a Hailo brain that you want to keep using you
should save the input you trained it on and re-train when you upgrade.
Hailo is always going to lose information present in the input you
give it. How input tokens get split up and saved to the storage
backend depends on the version of the tokenizer being used and how
that input gets saved to the database.
For instance if an earlier version of Hailo tokenized C<"foo+bar">
simply as C<"foo+bar"> but a later version split that up into
C<"foo", "+", "bar">, then an input of C<"foo+bar are my favorite
metasyntactic variables"> wouldn't take into account the existing
C<"foo+bar"> string in the database.
Tokenizer changes like this would cause the brains to accumulate
garbage and would leave other parts in a state they wouldn't otherwise
have gotten into.
There have been more drastic changes to the database format itself in
the past.
Having said all that the database format and the tokenizer are
relatively stable. At the time of writing 0.33 is the latest release
and it's compatable with brains down to at least 0.17. If you're
upgrading and there isn't a big notice about the storage format being
incompatable in the F<Changes> file your old brains will probably work
just fine.
=head1 ATTRIBUTES
=head2 C<brain>
The name of the brain (file name, database name) to use as storage.
There is no default. Whether this gets used at all depends on the
storage backend, currently only SQLite uses it.
=head2 C<save_on_exit>
A boolean value indicating whether Hailo should save its state before
its object gets destroyed. This defaults to true and will simply call
L<save|/save> at C<DEMOLISH> time.
See L<Hailo::Storage::SQLite/"in_memory"> for how the SQLite backend
uses this option.
=head2 C<order>
The Markov order (chain length) you want to use for an empty brain.
The default is 2.
=head2 C<engine_class>
=head2 C<storage_class>
=head2 C<tokenizer_class>
=head2 C<ui_class>
A a short name name of the class we use for the engine, storage,
tokenizer or ui backends.
By default this is B<Default> for the engine, B<SQLite> for storage,
B<Words> for the tokenizer and B<ReadLine> for the UI. The UI backend
is only used by the L<hailo> command-line interface.
You can only specify the short name of one of the packages Hailo
itself ships with. If you need another class then just prefix the
package with a plus (Catalyst style), e.g. C<+My::Foreign::Tokenizer>.
=head2 C<engine_args>
=head2 C<storage_args>
=head2 C<tokenizer_args>
=head2 C<ui_args>
diff --git a/lib/Hailo/Engine/Default.pm b/lib/Hailo/Engine/Default.pm
index cd7d730..671324f 100644
--- a/lib/Hailo/Engine/Default.pm
+++ b/lib/Hailo/Engine/Default.pm
@@ -1,345 +1,393 @@
package Hailo::Engine::Default;
use 5.010;
use Any::Moose;
use List::Util qw<min first shuffle>;
use List::MoreUtils qw<uniq>;
with qw[ Hailo::Role::Arguments Hailo::Role::Engine ];
has repeat_limit => (
isa => 'Int',
is => 'rw',
lazy => 1,
default => sub {
my ($self) = @_;
my $order = $self->order;
return min(($order * 10), 50);
}
);
sub BUILD {
my ($self) = @_;
# This performance hack is here because in our tight loops calling
# $self->storage->sth->{...} is actually a significant part of the
# overall program execution time since we're doing two method
# calls and hash dereferences for each call to the database.
my $sth = $self->storage->sth;
while (my ($k, $v) = each %$sth) {
$self->{"_sth_$k"} = $v;
}
return;
}
## no critic (Subroutines::ProhibitExcessComplexity)
sub reply {
my $self = shift;
my $tokens = shift // [];
# we will favor these tokens when making the reply. Shuffle them
# and discard half.
my @key_tokens = do {
my $i = 0;
grep { $i++ % 2 == 0 } shuffle(@$tokens);
};
- my (@key_ids, %token_cache);
- for my $token_info (@key_tokens) {
- my $text = $token_info->[1];
- my $info = $self->_token_similar($text);
- next unless defined $info;
- my ($id, $spacing) = @$info;
- next unless defined $id;
- push @key_ids, $id;
- next if exists $token_cache{$id};
- $token_cache{$id} = [$spacing, $text];
- }
+ my $token_cache = $self->_resolve_input_tokens($tokens);
+ my @key_ids = keys %$token_cache;
# sort the rest by rareness
@key_ids = $self->_find_rare_tokens(\@key_ids, 2);
# get the middle expression
- my $seed_token_id = shift @key_ids;
- my ($orig_expr_id, @token_ids) = $self->_random_expr($seed_token_id);
- return unless defined $orig_expr_id; # we don't know any expressions yet
+ my $pivot_token_id = shift @key_ids;
+ my ($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
+ return unless defined $pivot_expr_id; # we don't know any expressions yet
# remove key tokens we're already using
@key_ids = grep { my $used = $_; !first { $_ == $used } @token_ids } @key_ids;
- my $expr_id = $orig_expr_id;
+ my %expr_cache;
# construct the end of the reply
- $self->_construct_reply('next', $expr_id, \@token_ids, \@key_ids);
+ $self->_construct_reply('next', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# construct the beginning of the reply
- $self->_construct_reply('prev', $expr_id, \@token_ids, \@key_ids);
+ $self->_construct_reply('prev', $pivot_expr_id, \@token_ids, \%expr_cache, \@key_ids);
# translate token ids to token spacing/text
- my @reply = map {
- $token_cache{$_} // ($token_cache{$_} = $self->_token_info($_))
+ my @output = map {
+ $token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
} @token_ids;
- return \@reply;
+ return \@output;
+}
+
+sub _resolve_input_tokens {
+ my ($self, $tokens) = @_;
+ my %token_cache;
+
+ if (@$tokens == 1) {
+ my ($spacing, $text) = @{ $tokens->[0] };
+ my $token_info = $self->_token_resolve($spacing, $text);
+
+ if (defined $token_info) {
+ my ($id, $count) = @$token_info;
+ $token_cache{$id} = [$spacing, $text, $count];
+ }
+ else {
+ # when there's just one token, it could be ';' for example,
+ # which will have normal spacing when it appears alone, but
+ # suffix spacing in a sentence like "those things; foo, bar",
+ # so we'll be a bit more lax here by also looking for any
+ # token that has the same text
+ $token_info = $self->_token_similar($text);
+ if (defined $token_info) {
+ my ($id, $spacing, $count) = @$token_info;
+ $token_cache{$id} = [$spacing, $text, $count];
+ }
+ }
+ }
+ else {
+ for my $token (@$tokens) {
+ my ($spacing, $text) = @$token;
+ my $token_info = $self->_token_resolve($spacing, $text);
+ next if !defined $token_info;
+ my ($id, $count) = @$token_info;
+ $token_cache{$id} = [$spacing, $text, $count];
+ }
+ }
+
+ return \%token_cache;
+}
+
+sub _token_resolve {
+ my ($self, $spacing, $text) = @_;
+
+ $self->{_sth_token_resolve}->execute($spacing, $text);
+ return $self->{_sth_token_resolve}->fetchrow_arrayref;
}
sub _token_info {
my ($self, $id) = @_;
$self->{_sth_token_info}->execute($id);
my @res = $self->{_sth_token_info}->fetchrow_array;
return \@res;
}
sub learn {
my ($self, $tokens) = @_;
my $order = $self->order;
# only learn from inputs which are long enough
return if @$tokens < $order;
my (%token_cache, %expr_cache);
for my $token (@$tokens) {
my $key = join '', @$token; # the key is "$spacing$text"
next if exists $token_cache{$key};
$token_cache{$key} = $self->_token_id_add($token);
}
# process every expression of length $order
for my $i (0 .. @$tokens - $order) {
my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
my $key = join('_', @expr);
if (!defined $expr_cache{$key}) {
my $expr_id = $self->_expr_id(\@expr);
if (!defined $expr_id) {
$expr_id = $self->_add_expr(\@expr);
$self->{_sth_inc_token_count}->execute($_) for uniq(@expr);
}
$expr_cache{$key} = $expr_id;
}
my $expr_id = $expr_cache{$key};
# add link to next token for this expression, if any
if ($i < @$tokens - $order) {
my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
$self->_inc_link('next_token', $expr_id, $next_id);
}
# add link to previous token for this expression, if any
if ($i > 0) {
my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
$self->_inc_link('prev_token', $expr_id, $prev_id);
}
# add links to boundary token if appropriate
my $b = $self->storage->_boundary_token_id;
$self->_inc_link('prev_token', $expr_id, $b) if $i == 0;
$self->_inc_link('next_token', $expr_id, $b) if $i == @$tokens-$order;
}
return;
}
# sort token ids based on how rare they are
sub _find_rare_tokens {
my ($self, $token_ids, $min) = @_;
return unless @$token_ids;
my %links;
for my $id (@$token_ids) {
next if exists $links{$id};
$self->{_sth_token_count}->execute($id);
$links{$id} = $self->{_sth_token_count}->fetchrow_array;
}
# remove tokens which are too rare
my @ids = grep { $links{$_} >= $min } @$token_ids;
@ids = sort { $links{$a} <=> $links{$b} } @ids;
return @ids;
}
# increase the link weight between an expression and a token
sub _inc_link {
my ($self, $type, $expr_id, $token_id) = @_;
$self->{"_sth_${type}_count"}->execute($expr_id, $token_id);
my $count = $self->{"_sth_${type}_count"}->fetchrow_array;
if (defined $count) {
$self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
}
else {
$self->{"_sth_${type}_add"}->execute($expr_id, $token_id);
}
return;
}
# add new expression to the database
sub _add_expr {
my ($self, $token_ids) = @_;
# add the expression
$self->{_sth_add_expr}->execute(@$token_ids);
return $self->storage->dbh->last_insert_id(undef, undef, "expr", undef);
}
# look up an expression id based on tokens
sub _expr_id {
my ($self, $tokens) = @_;
$self->{_sth_expr_id}->execute(@$tokens);
return $self->{_sth_expr_id}->fetchrow_array();
}
# return token id if the token exists
sub _token_id {
my ($self, $token_info) = @_;
$self->{_sth_token_id}->execute(@$token_info);
my $token_id = $self->{_sth_token_id}->fetchrow_array();
return unless defined $token_id;
return $token_id;
}
# get token id (adding the token if it doesn't exist)
sub _token_id_add {
my ($self, $token_info) = @_;
my $token_id = $self->_token_id($token_info);
$token_id = $self->_add_token($token_info) unless defined $token_id;
return $token_id;
}
# return all tokens (regardless of spacing) that consist of this text
sub _token_similar {
my ($self, $token_text) = @_;
$self->{_sth_token_similar}->execute($token_text);
return $self->{_sth_token_similar}->fetchrow_arrayref;
}
# add a new token and return its id
sub _add_token {
my ($self, $token_info) = @_;
$self->{_sth_add_token}->execute(@$token_info);
return $self->storage->dbh->last_insert_id(undef, undef, "token", undef);
}
# return a random expression containing the given token
sub _random_expr {
my ($self, $token_id) = @_;
my $expr;
if (!defined $token_id) {
$self->{_sth_random_expr}->execute();
$expr = $self->{_sth_random_expr}->fetchrow_arrayref();
}
else {
# try the positions in a random order
for my $pos (shuffle 0 .. $self->order-1) {
my $column = "token${pos}_id";
# get a random expression which includes the token at this position
$self->{"_sth_expr_by_$column"}->execute($token_id);
$expr = $self->{"_sth_expr_by_$column"}->fetchrow_arrayref();
last if defined $expr;
}
}
return unless defined $expr;
return @$expr;
}
# return a new next/previous token
sub _pos_token {
my ($self, $pos, $expr_id, $key_tokens) = @_;
$self->{"_sth_${pos}_token_get"}->execute($expr_id);
my $pos_tokens = $self->{"_sth_${pos}_token_get"}->fetchall_arrayref();
if (defined $key_tokens) {
for my $i (0 .. $#{ $key_tokens }) {
my $want_id = $key_tokens->[$i];
my @ids = map { $_->[0] } @$pos_tokens;
my $has_id = grep { $_ == $want_id } @ids;
next unless $has_id;
return splice @$key_tokens, $i, 1;
}
}
my @novel_tokens;
for my $token (@$pos_tokens) {
push @novel_tokens, ($token->[0]) x $token->[1];
}
return $novel_tokens[rand @novel_tokens];
}
sub _construct_reply {
- my ($self, $what, $expr_id, $token_ids, $key_ids) = @_;
+ my ($self, $what, $expr_id, $token_ids, $expr_cache, $key_ids) = @_;
my $order = $self->order;
my $repeat_limit = $self->repeat_limit;
my $boundary_token = $self->storage->_boundary_token_id;
my $i = 0;
while (1) {
if (($i % $order) == 0 and
(($i >= $repeat_limit * 3) ||
($i >= $repeat_limit and uniq(@$token_ids) <= $order))) {
last;
}
my $id = $self->_pos_token($what, $expr_id, $key_ids);
- last if $id eq $boundary_token;
+ last if $id == $boundary_token;
+ my @ids;
given ($what) {
when ('next') {
push @$token_ids, $id;
- $expr_id = $self->_expr_id([@$token_ids[-$order..-1]]);
+ @ids = @$token_ids[-$order..-1];
}
when ('prev') {
unshift @$token_ids, $id;
- $expr_id = $self->_expr_id([@$token_ids[0..$order-1]]);
+ @ids = @$token_ids[0..$order-1];
}
}
+
+ my $key = join '_', @ids;
+ if (!defined $expr_cache->{$key}) {
+ $expr_cache->{$key} = $self->_expr_id(\@ids);
+ }
+ $expr_id = $expr_cache->{$key};
} continue {
$i++;
}
return;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Default - The default engine backend for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>.
+It generates the reply in one go, while favoring some of the tokens in the
+input, and returns it. It is fast and the replies are decent, but you can
+get better replies (at the cost of speed) with the
+L<Scored|Hailo::Engine::Scored> engine.
+
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/lib/Hailo/Engine/Scored.pm b/lib/Hailo/Engine/Scored.pm
new file mode 100644
index 0000000..af20535
--- /dev/null
+++ b/lib/Hailo/Engine/Scored.pm
@@ -0,0 +1,275 @@
+package Hailo::Engine::Scored;
+
+use 5.010;
+use Any::Moose;
+use List::Util qw<sum>;
+use List::MoreUtils qw<any>;
+use Time::HiRes qw<gettimeofday tv_interval>;
+
+extends 'Hailo::Engine::Default';
+
+after BUILD => sub {
+ my ($self) = @_;
+ my %args = $self->arguments;
+
+ if (defined $args{iterations} && defined $args{interval}) {
+ die __PACKAGE__.": You can only specify one of 'iterations' and 'interval'\n";
+ }
+ return;
+};
+
+sub reply {
+ my $self = shift;
+ my $tokens = shift // [];
+
+ # see if we recognize any of the input tokens
+ my $token_cache = $self->_resolve_input_tokens($tokens);
+ my @input_token_ids = keys %$token_cache;
+ my @token_counts;
+
+ # let's select potential pivot tokens
+ if (keys %$token_cache) {
+ # we got some known tokens, let's prefer the ones with normal
+ # spacing, i.e. words instead of things like ',' or '('.
+ @token_counts = map {
+ $token_cache->{$_}[0] == 0 ? [$_, $token_cache->{$_}[2]] : ()
+ } keys %$token_cache;
+
+ if (!@token_counts) {
+ # no known words in the input, so we'll settle for the rest
+ @token_counts = map { [$_, $token_cache->{$_}[2]] } keys %$token_cache;
+ }
+
+ }
+
+ my $token_probs = $self->_get_pivot_probabilites(\@token_counts);
+ my @started = gettimeofday();
+ my $iterations = 0;
+
+ my $done;
+ my %args = $self->arguments;
+ if (!defined $args{iterations} && !defined $args{interval}) {
+ # construct replies for half a second by default
+ $args{interval} = 0.5;
+ }
+
+ if (defined $args{iterations}) {
+ $done = sub {
+ return 1 if $iterations == $args{iterations};
+ };
+ }
+ else {
+ $done = sub {
+ my $elapsed = tv_interval(\@started, [gettimeofday]);
+ return 1 if $elapsed >= $args{interval};
+ };
+ }
+
+ my (%link_cache, %expr_cache, $best_score, $best_reply);
+ while (1) {
+ $iterations++;
+ my $reply = $self->_generate_reply($token_probs, \%expr_cache);
+ return if !defined $reply; # we don't know any expressions yet
+
+ my $score = $self->_evaluate_reply(\@input_token_ids, $reply, \%link_cache);
+
+ if (defined $best_reply && $self->_too_similar(\@input_token_ids, $reply)) {
+ last if $done->();
+ next;
+ }
+
+ if (!defined $best_score || $score > $best_score) {
+ $best_score = $score;
+ $best_reply = $reply;
+ }
+
+ last if $done->();
+ }
+
+ # translate token ids to token spacing/text
+ my @output = map {
+ $token_cache->{$_} // ($token_cache->{$_} = $self->_token_info($_))
+ } @$best_reply;
+ return \@output;
+}
+
+# Calculate the probability we wish to pick each token as the pivot.
+# This uses -log2(p) as a method for inverting token probability,
+# ensuring that our rarer tokens are picked more often.
+sub _get_pivot_probabilites {
+ my ($self, $token_counts) = @_;
+
+ return [] if !@$token_counts;
+ return [[$token_counts->[0], 1]] if @$token_counts == 1;
+
+ # calculate the (non-normalized) probability we want each to occur
+ my $count_sum = sum(map { $_->[1] } @$token_counts);
+ my $p = [];
+ my $p_sum = 0;
+ for my $token_count (map { $_->[1] } @$token_counts) {
+ my $token_p = -log(($token_count/$count_sum))/log(2);
+ push @$p, $token_p;
+ $p_sum += $token_p;
+ }
+
+ # normalize the probabilities
+ my @probs = map {
+ [$token_counts->[$_], $p->[$_] / $p_sum];
+ } 0..$#{ $token_counts };
+
+ return \@probs;
+}
+
+sub _generate_reply {
+ my ($self, $token_probs, $expr_cache) = @_;
+
+ my ($pivot_expr_id, @token_ids) = @_;
+ if (@$token_probs) {
+ my $pivot_token_id = $self->_choose_pivot($token_probs);
+ ($pivot_expr_id, @token_ids) = $self->_random_expr($pivot_token_id);
+ }
+ else {
+ ($pivot_expr_id, @token_ids) = $self->_random_expr();
+ return if !defined $pivot_expr_id; # no expressions in the database
+ }
+
+ # construct the end of the reply
+ $self->_construct_reply('next', $pivot_expr_id, \@token_ids, $expr_cache);
+
+ # construct the beginning of the reply
+ $self->_construct_reply('prev', $pivot_expr_id, \@token_ids, $expr_cache);
+
+ return \@token_ids;
+}
+
+sub _evaluate_reply {
+ my ($self, $input_token_ids, $reply_token_ids, $cache) = @_;
+ my $order = $self->order;
+ my $score = 0;
+
+ for my $idx (0 .. $#{ $reply_token_ids } - $order) {
+ my $next_token_id = $reply_token_ids->[$idx];
+
+ if (any { $_ == $next_token_id } @$input_token_ids) {
+ my @expr = @$reply_token_ids[$idx .. $idx+$order-1];
+ my $key = join('_', @expr)."-$next_token_id";
+
+ if (!defined $cache->{$key}) {
+ $cache->{$key} = $self->_expr_token_probability('next', \@expr, $next_token_id);
+ }
+ if ($cache->{$key} > 0) {
+ $score -= log($cache->{$key})/log(2);
+ }
+ }
+ }
+
+ for my $idx (0 .. $#{ $reply_token_ids } - $order) {
+ my $prev_token_id = $reply_token_ids->[$idx];
+
+ if (any { $_ == $prev_token_id } @$input_token_ids) {
+ my @expr = @$reply_token_ids[$idx+1 .. $idx+$order];
+ my $key = join('_', @expr)."-$prev_token_id";
+
+ if (!defined $cache->{$key}) {
+ $cache->{$key} = $self->_expr_token_probability('prev', \@expr, $prev_token_id);
+ }
+ if ($cache->{$key} > 0) {
+ $score -= log($cache->{$key})/log(2);
+ }
+ }
+ }
+
+ return $score;
+}
+
+sub _expr_token_probability {
+ my ($self, $pos, $expr, $token_id) = @_;
+ my $order = $self->order;
+
+ my $expr_id = $self->_expr_id($expr);
+
+ $self->{"_sth_${pos}_token_count"}->execute($expr_id, $token_id);
+ my $expr2token = $self->{"_sth_${pos}_token_count"}->fetchrow_array();
+ return 0 if !$expr2token;
+
+ $self->{"_sth_${pos}_token_links"}->execute($expr_id);
+ my $expr2all = $self->{"_sth_${pos}_token_links"}->fetchrow_array();
+ return $expr2token / $expr2all;
+}
+
+sub _choose_pivot {
+ my ($self, $token_probs) = @_;
+
+ my $random = rand;
+ my $p = 0;
+ for my $token (@$token_probs) {
+ $p += $token->[1];
+ return $token->[0][0] if $p > $random;
+ }
+
+ return;
+}
+
+sub _too_similar {
+ my ($self, $input_token_ids, $reply_token_ids) = @_;
+
+ my %input_token_ids = map { +$_ => 1 } @$input_token_ids;
+
+ for my $reply_token_id (@$reply_token_ids) {
+ return if !$input_token_ids{$reply_token_id};
+ }
+ return 1;
+}
+
+__PACKAGE__->meta->make_immutable;
+
+=encoding utf8
+
+=head1 NAME
+
+Hailo::Engine::Scored - MegaHAL-style reply scoring for L<Hailo|Hailo>
+
+=head1 DESCRIPTION
+
+This backend implements the logic of replying to and learning from
+input using the resources given to the L<engine
+roles|Hailo::Role::Engine>. It is inherits from
+L<Hailo::Engine::Default|Hailo::Engine::Default> and only overrides its
+C<reply> method.
+
+It generates multiple replies and applies a scoring algorithm to them, then
+returns the best one, similar to MegaHAL.
+
+=head1 ATTRIBUTES
+
+=head2 C<engine_args>
+
+This is a hash reference which can have the following keys:
+
+=head3 C<iterations>
+
+The number of replies to generate before returning the best one.
+
+=head3 C<interval>
+
+The time (in seconds) to spend on generating replies before returning the
+best one.
+
+You can not specify both C<iterations> and C<interval> at the same time. If
+neither is specified, a default C<interval> of 0.5 seconds will be used.
+
+=head1 AUTHORS
+
+Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
+
+This module was based on code from Peter Teichman's Cobe project.
+
+=head1 LICENSE AND COPYRIGHT
+
+Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
+E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
+
+This program is free software, you can redistribute it and/or modify
+it under the same terms as Perl itself.
+
+=cut
diff --git a/lib/Hailo/Storage/Schema.pm b/lib/Hailo/Storage/Schema.pm
index afdea2d..6f606e8 100644
--- a/lib/Hailo/Storage/Schema.pm
+++ b/lib/Hailo/Storage/Schema.pm
@@ -1,181 +1,184 @@
package Hailo::Storage::Schema;
use 5.010;
use strict;
## Soup to spawn the database itself / create statement handles
sub deploy {
my (undef, $dbd, $dbh, $order) = @_;
my @orders = (0 .. $order-1);
my $int_primary_key = "INTEGER PRIMARY KEY AUTOINCREMENT";
$int_primary_key = "INTEGER PRIMARY KEY AUTO_INCREMENT" if $dbd eq "mysql";
$int_primary_key = "SERIAL UNIQUE" if $dbd eq "Pg";
my $text = 'TEXT';
$text = 'VARCHAR(255)' if $dbd eq 'mysql';
my $text_primary = 'TEXT NOT NULL PRIMARY KEY';
$text_primary = 'TEXT NOT NULL' if $dbd eq 'mysql';
my @tables;
push @tables => <<"TABLE";
CREATE TABLE info (
attribute $text_primary,
text TEXT NOT NULL
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE token (
id $int_primary_key,
spacing INTEGER NOT NULL,
text $text NOT NULL,
count INTEGER NOT NULL
);
TABLE
my $token_n = join ",\n ", map { "token${_}_id INTEGER NOT NULL REFERENCES token (id)" } @orders;
push @tables => <<"TABLE";
CREATE TABLE expr (
id $int_primary_key,
$token_n
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE next_token (
id $int_primary_key,
expr_id INTEGER NOT NULL REFERENCES expr (id),
token_id INTEGER NOT NULL REFERENCES token (id),
count INTEGER NOT NULL
);
TABLE
push @tables => <<"TABLE";
CREATE TABLE prev_token (
id $int_primary_key,
expr_id INTEGER NOT NULL REFERENCES expr (id),
token_id INTEGER NOT NULL REFERENCES token (id),
count INTEGER NOT NULL
);
TABLE
for my $i (@orders) {
push @tables => "CREATE INDEX expr_token${i}_id on expr (token${i}_id);"
}
my $columns = join(', ', map { "token${_}_id" } @orders);
push @tables => "CREATE INDEX expr_token_ids on expr ($columns);";
push @tables => 'CREATE INDEX token_text on token (text);';
push @tables => 'CREATE INDEX next_token_expr_id ON next_token (expr_id);';
push @tables => 'CREATE INDEX prev_token_expr_id ON prev_token (expr_id);';
for (@tables) {
$dbh->do($_);
}
return;
}
# create statement handle objects
sub sth {
my (undef, $dbd, $dbh, $order) = @_;
my @orders = (0 .. $order-1);
my @columns = map { "token${_}_id" } 0 .. $order-1;
my $columns = join(', ', @columns);
my @ids = join(', ', ('?') x @columns);
my $ids = join(', ', @ids);
my $q_rand = 'RANDOM()';
$q_rand = 'RAND()' if $dbd eq 'mysql';
my $q_rand_id = "(abs($q_rand) % (SELECT max(id) FROM expr))";
- $q_rand_id = "(random()*id+1)::int" if $dbd eq 'Pg';
+ $q_rand_id = "(random()*id+1)::int" if $dbd eq 'Pg';
my %state = (
set_info => qq[INSERT INTO info (attribute, text) VALUES (?, ?);],
random_expr => qq[SELECT * FROM expr WHERE id >= $q_rand_id LIMIT 1;],
+ token_resolve => qq[SELECT id, count FROM token WHERE spacing = ? AND text = ?;],
token_id => qq[SELECT id FROM token WHERE spacing = ? AND text = ?;],
token_info => qq[SELECT spacing, text FROM token WHERE id = ?;],
- token_similar => qq[SELECT id, spacing FROM token WHERE text = ? ORDER BY $q_rand LIMIT 1;] ,
+ token_similar => qq[SELECT id, spacing, count FROM token WHERE text = ? ORDER BY $q_rand LIMIT 1;] ,
add_token => qq[INSERT INTO token (spacing, text, count) VALUES (?, ?, 0)],
inc_token_count => qq[UPDATE token SET count = count + 1 WHERE id = ?],
# ->stats()
expr_total => qq[SELECT COUNT(*) FROM expr;],
token_total => qq[SELECT COUNT(*) FROM token;],
prev_total => qq[SELECT COUNT(*) FROM prev_token;],
next_total => qq[SELECT COUNT(*) FROM next_token;],
# Defaults, overriden in SQLite
last_expr_rowid => qq[SELECT id FROM expr ORDER BY id DESC LIMIT 1;],
last_token_rowid => qq[SELECT id FROM token ORDER BY id DESC LIMIT 1;],
+ next_token_links => qq[SELECT SUM(count) FROM next_token WHERE expr_id = ?;],
+ prev_token_links => qq[SELECT SUM(count) FROM prev_token WHERE expr_id = ?;],
next_token_count => qq[SELECT count FROM next_token WHERE expr_id = ? AND token_id = ?;],
prev_token_count => qq[SELECT count FROM prev_token WHERE expr_id = ? AND token_id = ?;],
next_token_inc => qq[UPDATE next_token SET count = count + 1 WHERE expr_id = ? AND token_id = ?],
prev_token_inc => qq[UPDATE prev_token SET count = count + 1 WHERE expr_id = ? AND token_id = ?],
next_token_add => qq[INSERT INTO next_token (expr_id, token_id, count) VALUES (?, ?, 1);],
prev_token_add => qq[INSERT INTO prev_token (expr_id, token_id, count) VALUES (?, ?, 1);],
next_token_get => qq[SELECT token_id, count FROM next_token WHERE expr_id = ?;],
prev_token_get => qq[SELECT token_id, count FROM prev_token WHERE expr_id = ?;],
token_count => qq[SELECT count FROM token WHERE id = ?;],
add_expr => qq[INSERT INTO expr ($columns) VALUES ($ids)],
expr_id => qq[SELECT id FROM expr WHERE ] . join(' AND ', map { "token${_}_id = ?" } @orders),
);
for (@orders) {
$state{"expr_by_token${_}_id"} = qq[SELECT * FROM expr WHERE token${_}_id = ? ORDER BY $q_rand LIMIT 1;];
}
# DBD specific queries / optimizations / munging
given ($dbd) {
when ('SQLite') {
# Optimize these for SQLite
$state{expr_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'expr';];
$state{token_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'token';];
$state{prev_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'prev_token';];
$state{next_total} = qq[SELECT seq FROM sqlite_sequence WHERE name = 'next_token';];
}
}
# Sort to make error output easier to read if this fails. The
# order doesn't matter.
my @queries = sort keys %state;
my %sth = map { $_ => $dbh->prepare($state{$_}) } @queries;
return \%sth;
}
1;
=head1 NAME
Hailo::Storage::Schema - Deploy the database schema Hailo uses
=head1 DESCRIPTION
Implements functions to create the database schema and prepared
database queries L<Hailo::Storage> needs.
This class is internal to Hailo and has no public interface.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/01_compile.t b/t/01_compile.t
index b9c4634..68f723f 100644
--- a/t/01_compile.t
+++ b/t/01_compile.t
@@ -1,61 +1,62 @@
use 5.010;
use strict;
use warnings;
use Class::Load 'try_load_class';
use File::Spec::Functions 'catfile';
use Test::More;
use Test::Script;
# find lib -type f | perl -pe 's[^lib/][ ]; s[.pm$][]; s[/][::]g'
my @classes = qw(
Hailo
Hailo::Storage::MySQL
Hailo::Storage::SQLite
Hailo::Storage::Schema
Hailo::Storage::PostgreSQL
Hailo::Command
Hailo::Role::Engine
Hailo::Role::UI
Hailo::Role::Storage
Hailo::Role::Arguments
Hailo::Role::Tokenizer
Hailo::Storage
Hailo::UI::ReadLine
Hailo::Engine::Default
+ Hailo::Engine::Scored
Hailo::Tokenizer::Words
Hailo::Tokenizer::Chars
);
plan tests => scalar(@classes) * 3 + 1;
my $i = 1; for (@classes) {
SKIP: {
skip "Couldn't compile optional dependency $_", 1 if !try_load_class($_);
pass("Loaded class $_");
}
}
SKIP: {
no strict 'refs';
unless (defined ${"Hailo::VERSION"}) {
skip "Can't test \$VERSION from a Git checkout", 2 * scalar(@classes);
}
my $j = 1; for (@classes) {
like(${"${_}::VERSION"}, qr/^[0-9.]+$/, "$_ has a \$VERSION that makes sense");
cmp_ok(
${"${_}::VERSION"},
'eq',
$Hailo::VERSION,
qq[$_\::VERSION matches \$Hailo::VERSION. If not use perl-reversion --current ${"${_}::VERSION"} -bump]
);
}
}
SKIP: {
skip "There's no blib", 1 unless -d "blib" and -f catfile qw(blib script hailo);
script_compiles(catfile('bin', 'hailo'));
};
|
hinrik/hailo | ecccfc9bd6daa66fc97832635f0838cd8241526e | Use an expression cache here | diff --git a/Changes b/Changes
index 9905bdf..d8f37dc 100644
--- a/Changes
+++ b/Changes
@@ -1,518 +1,520 @@
Revision history for Hailo
{{$NEXT}}
- Preserve casing of Emacs key sequences like "C-u"
+ - Speed up the learning of repetitive sentences by caching more
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
diff --git a/lib/Hailo/Engine/Default.pm b/lib/Hailo/Engine/Default.pm
index 143f7c7..cd7d730 100644
--- a/lib/Hailo/Engine/Default.pm
+++ b/lib/Hailo/Engine/Default.pm
@@ -1,340 +1,345 @@
package Hailo::Engine::Default;
use 5.010;
use Any::Moose;
use List::Util qw<min first shuffle>;
use List::MoreUtils qw<uniq>;
with qw[ Hailo::Role::Arguments Hailo::Role::Engine ];
has repeat_limit => (
isa => 'Int',
is => 'rw',
lazy => 1,
default => sub {
my ($self) = @_;
my $order = $self->order;
return min(($order * 10), 50);
}
);
sub BUILD {
my ($self) = @_;
# This performance hack is here because in our tight loops calling
# $self->storage->sth->{...} is actually a significant part of the
# overall program execution time since we're doing two method
# calls and hash dereferences for each call to the database.
my $sth = $self->storage->sth;
while (my ($k, $v) = each %$sth) {
$self->{"_sth_$k"} = $v;
}
return;
}
## no critic (Subroutines::ProhibitExcessComplexity)
sub reply {
my $self = shift;
my $tokens = shift // [];
# we will favor these tokens when making the reply. Shuffle them
# and discard half.
my @key_tokens = do {
my $i = 0;
grep { $i++ % 2 == 0 } shuffle(@$tokens);
};
my (@key_ids, %token_cache);
for my $token_info (@key_tokens) {
my $text = $token_info->[1];
my $info = $self->_token_similar($text);
next unless defined $info;
my ($id, $spacing) = @$info;
next unless defined $id;
push @key_ids, $id;
next if exists $token_cache{$id};
$token_cache{$id} = [$spacing, $text];
}
# sort the rest by rareness
@key_ids = $self->_find_rare_tokens(\@key_ids, 2);
# get the middle expression
my $seed_token_id = shift @key_ids;
my ($orig_expr_id, @token_ids) = $self->_random_expr($seed_token_id);
return unless defined $orig_expr_id; # we don't know any expressions yet
# remove key tokens we're already using
@key_ids = grep { my $used = $_; !first { $_ == $used } @token_ids } @key_ids;
my $expr_id = $orig_expr_id;
# construct the end of the reply
$self->_construct_reply('next', $expr_id, \@token_ids, \@key_ids);
# construct the beginning of the reply
$self->_construct_reply('prev', $expr_id, \@token_ids, \@key_ids);
# translate token ids to token spacing/text
my @reply = map {
$token_cache{$_} // ($token_cache{$_} = $self->_token_info($_))
} @token_ids;
return \@reply;
}
sub _token_info {
my ($self, $id) = @_;
$self->{_sth_token_info}->execute($id);
my @res = $self->{_sth_token_info}->fetchrow_array;
return \@res;
}
sub learn {
my ($self, $tokens) = @_;
my $order = $self->order;
# only learn from inputs which are long enough
return if @$tokens < $order;
- my %token_cache;
+ my (%token_cache, %expr_cache);
for my $token (@$tokens) {
- my $key = join '', @$token;
+ my $key = join '', @$token; # the key is "$spacing$text"
next if exists $token_cache{$key};
$token_cache{$key} = $self->_token_id_add($token);
}
# process every expression of length $order
for my $i (0 .. @$tokens - $order) {
my @expr = map { $token_cache{ join('', @{ $tokens->[$_] }) } } $i .. $i+$order-1;
- my $expr_id = $self->_expr_id(\@expr);
+ my $key = join('_', @expr);
- if (!defined $expr_id) {
- $expr_id = $self->_add_expr(\@expr);
- $self->{_sth_inc_token_count}->execute($_) for uniq(@expr);
+ if (!defined $expr_cache{$key}) {
+ my $expr_id = $self->_expr_id(\@expr);
+ if (!defined $expr_id) {
+ $expr_id = $self->_add_expr(\@expr);
+ $self->{_sth_inc_token_count}->execute($_) for uniq(@expr);
+ }
+ $expr_cache{$key} = $expr_id;
}
+ my $expr_id = $expr_cache{$key};
# add link to next token for this expression, if any
if ($i < @$tokens - $order) {
my $next_id = $token_cache{ join('', @{ $tokens->[$i+$order] }) };
$self->_inc_link('next_token', $expr_id, $next_id);
}
# add link to previous token for this expression, if any
if ($i > 0) {
my $prev_id = $token_cache{ join('', @{ $tokens->[$i-1] }) };
$self->_inc_link('prev_token', $expr_id, $prev_id);
}
# add links to boundary token if appropriate
my $b = $self->storage->_boundary_token_id;
$self->_inc_link('prev_token', $expr_id, $b) if $i == 0;
$self->_inc_link('next_token', $expr_id, $b) if $i == @$tokens-$order;
}
return;
}
# sort token ids based on how rare they are
sub _find_rare_tokens {
my ($self, $token_ids, $min) = @_;
return unless @$token_ids;
my %links;
for my $id (@$token_ids) {
next if exists $links{$id};
$self->{_sth_token_count}->execute($id);
$links{$id} = $self->{_sth_token_count}->fetchrow_array;
}
# remove tokens which are too rare
my @ids = grep { $links{$_} >= $min } @$token_ids;
@ids = sort { $links{$a} <=> $links{$b} } @ids;
return @ids;
}
# increase the link weight between an expression and a token
sub _inc_link {
my ($self, $type, $expr_id, $token_id) = @_;
$self->{"_sth_${type}_count"}->execute($expr_id, $token_id);
my $count = $self->{"_sth_${type}_count"}->fetchrow_array;
if (defined $count) {
$self->{"_sth_${type}_inc"}->execute($expr_id, $token_id);
}
else {
$self->{"_sth_${type}_add"}->execute($expr_id, $token_id);
}
return;
}
# add new expression to the database
sub _add_expr {
my ($self, $token_ids) = @_;
# add the expression
$self->{_sth_add_expr}->execute(@$token_ids);
return $self->storage->dbh->last_insert_id(undef, undef, "expr", undef);
}
# look up an expression id based on tokens
sub _expr_id {
my ($self, $tokens) = @_;
$self->{_sth_expr_id}->execute(@$tokens);
return $self->{_sth_expr_id}->fetchrow_array();
}
# return token id if the token exists
sub _token_id {
my ($self, $token_info) = @_;
$self->{_sth_token_id}->execute(@$token_info);
my $token_id = $self->{_sth_token_id}->fetchrow_array();
return unless defined $token_id;
return $token_id;
}
# get token id (adding the token if it doesn't exist)
sub _token_id_add {
my ($self, $token_info) = @_;
my $token_id = $self->_token_id($token_info);
$token_id = $self->_add_token($token_info) unless defined $token_id;
return $token_id;
}
# return all tokens (regardless of spacing) that consist of this text
sub _token_similar {
my ($self, $token_text) = @_;
$self->{_sth_token_similar}->execute($token_text);
return $self->{_sth_token_similar}->fetchrow_arrayref;
}
# add a new token and return its id
sub _add_token {
my ($self, $token_info) = @_;
$self->{_sth_add_token}->execute(@$token_info);
return $self->storage->dbh->last_insert_id(undef, undef, "token", undef);
}
# return a random expression containing the given token
sub _random_expr {
my ($self, $token_id) = @_;
my $expr;
if (!defined $token_id) {
$self->{_sth_random_expr}->execute();
$expr = $self->{_sth_random_expr}->fetchrow_arrayref();
}
else {
# try the positions in a random order
for my $pos (shuffle 0 .. $self->order-1) {
my $column = "token${pos}_id";
# get a random expression which includes the token at this position
$self->{"_sth_expr_by_$column"}->execute($token_id);
$expr = $self->{"_sth_expr_by_$column"}->fetchrow_arrayref();
last if defined $expr;
}
}
return unless defined $expr;
return @$expr;
}
# return a new next/previous token
sub _pos_token {
my ($self, $pos, $expr_id, $key_tokens) = @_;
$self->{"_sth_${pos}_token_get"}->execute($expr_id);
my $pos_tokens = $self->{"_sth_${pos}_token_get"}->fetchall_arrayref();
if (defined $key_tokens) {
for my $i (0 .. $#{ $key_tokens }) {
my $want_id = $key_tokens->[$i];
my @ids = map { $_->[0] } @$pos_tokens;
my $has_id = grep { $_ == $want_id } @ids;
next unless $has_id;
return splice @$key_tokens, $i, 1;
}
}
my @novel_tokens;
for my $token (@$pos_tokens) {
push @novel_tokens, ($token->[0]) x $token->[1];
}
return $novel_tokens[rand @novel_tokens];
}
sub _construct_reply {
my ($self, $what, $expr_id, $token_ids, $key_ids) = @_;
my $order = $self->order;
my $repeat_limit = $self->repeat_limit;
my $boundary_token = $self->storage->_boundary_token_id;
my $i = 0;
while (1) {
if (($i % $order) == 0 and
(($i >= $repeat_limit * 3) ||
($i >= $repeat_limit and uniq(@$token_ids) <= $order))) {
last;
}
my $id = $self->_pos_token($what, $expr_id, $key_ids);
last if $id eq $boundary_token;
given ($what) {
when ('next') {
push @$token_ids, $id;
$expr_id = $self->_expr_id([@$token_ids[-$order..-1]]);
}
when ('prev') {
unshift @$token_ids, $id;
$expr_id = $self->_expr_id([@$token_ids[0..$order-1]]);
}
}
} continue {
$i++;
}
return;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Engine::Default - The default engine backend for L<Hailo|Hailo>
=head1 DESCRIPTION
This backend implements the logic of replying to and learning from
input using the resources given to the L<engine
roles|Hailo::Role::Engine>.
=head1 AUTHORS
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson and
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 145cffe2f530e58ca5f4db01e4da58f8e7ac6686 | Preserve casing of Emacs key sequences like "C-u" | diff --git a/Changes b/Changes
index 9787f0e..9905bdf 100644
--- a/Changes
+++ b/Changes
@@ -1,516 +1,518 @@
Revision history for Hailo
{{$NEXT}}
+ - Preserve casing of Emacs key sequences like "C-u"
+
0.67 2011-04-27 23:33:08
- Word tokenizer: Improve matching of Perl class names
- Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 0eb45cd..c99c0a2 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,226 +1,225 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
-my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
+my $UPPER_NONW = qr/^ (?:\p{Upper}+ \W+)(?<!I') (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)*||$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
- # {2,} so it doesn't match I'm
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index f2c1a61..df29974 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,444 +1,449 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion".'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion".'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
+ [
+ "It's as simple as C-u C-c C-t C-t t",
+ [qw<it's as simple as C-u C-c C-t C-t t>],
+ "It's as simple as C-u C-c C-t C-t t.",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my $tokens = $toke->make_tokens($test->[0]);
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
my $output = $toke->make_output($tokens);
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 83f7a650f3aee472551ca553db4282c687bb5d86 | Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability | diff --git a/Changes b/Changes
index a44ac67..63d2b37 100644
--- a/Changes
+++ b/Changes
@@ -1,517 +1,518 @@
Revision history for Hailo
{{$NEXT}}
- Word tokenizer: Improve matching of Perl class names
+ - Require MooseX::StrictConstructor 0.16 for Moose 2.0 compatability
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
diff --git a/dist.ini b/dist.ini
index d8274dd..95bb363 100644
--- a/dist.ini
+++ b/dist.ini
@@ -1,84 +1,84 @@
name = Hailo
author = Hinrik Ãrn Sigurðsson <[email protected]>
author = Ãvar Arnfjörð Bjarmason <[email protected]>
copyright_holder = Hinrik Ãrn Sigurðsson and Ãvar Arnfjörð Bjarmason
license = Perl_5
[@AVAR]
dist = Hailo
bugtracker = rt
homepage = http://hailo.org
github_user = hailo
no_AutoPrereq = 1
use_MakeMaker = 0
use_CompileTests = 0
[=inc::HailoMakeMaker / HailoMakeMaker]
[Prereqs / RuntimeRequires]
perl = 5.010
; Core stuff
Time::HiRes = 0
File::Spec::Functions = 0
;; Depend on Mouse and Moose, we can use either one
; Mouse
Mouse = 0.62
MouseX::StrictConstructor = 0.02
MouseX::Getopt = 0.2601
; Moose
Moose = 1.08
-MooseX::StrictConstructor = 0.10
+MooseX::StrictConstructor = 0.16
MooseX::Getopt = 0.37
; Hailo.pm
Any::Moose = 0.13
autodie = 2.08
Class::Load = 0.06
IPC::System::Simple = 1.21
File::CountLines = 0.0.2
IO::Interactive = 0.0.6
; Command.pm
Getopt::Long::Descriptive = 0.085
Dir::Self = 0.10
Term::Sk = 0.06
; ReadLine.pm
Term::ReadLine = 0
Data::Dump = 1.17
; DBD.pm
List::MoreUtils = 0.22
; SQLite.pm
DBD::SQLite = 1.29
; Words.pm
Regexp::Common = 2010010201
; everywhere
namespace::clean = 0.18
[Prereqs / TestRequires]
File::Slurp = 9999.13
Test::Exception = 0.29
Test::Expect = 0.31
Test::More = 0.94
Test::Output = 0.16
Test::Script = 1.07
Test::Script::Run = 0.04
Test::Synopsis = 0.06
Data::Section = 0.101620
; Data to babble on
Bot::Training = 0
Bot::Training::MegaHAL = 0
Bot::Training::StarCraft = 0
[Prereqs / RuntimeRecommends]
;; Pg/mysql: optional backends
DBD::mysql = 4.013
DBD::Pg = 2.16.1
|
hinrik/hailo | e82d2e56e97096cd48592a9e37d082d36613ea35 | This was causing a warning | diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 27f478c..f2c1a61 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,444 +1,444 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion".'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion".'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
- [qw<you know , 4-3 equals 1>],
+ [qw<you know> ,',', qw<4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
[
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
[qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
"::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my $tokens = $toke->make_tokens($test->[0]);
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
my $output = $toke->make_output($tokens);
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | 81fffcdaec86807239b5b3afcf93a222891a050a | This should be elsif | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 5c97630..0eb45cd 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,226 +1,226 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)*||$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
- if (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
+ elsif (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
# {2,} so it doesn't match I'm
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 758e98dc3f5ccb24fa3b620e258230f467c10fe2 | This was put in by accident. Unused. | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index d0539dc..5c97630 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,227 +1,226 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)*||$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
- my $prev_chunk;
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
if (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
# {2,} so it doesn't match I'm
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 2b64247d368ab4d3c18fc960d736c4b59773e970 | Word tokenizer: Improve matching of Perl class names | diff --git a/Changes b/Changes
index 7ade648..a44ac67 100644
--- a/Changes
+++ b/Changes
@@ -1,516 +1,518 @@
Revision history for Hailo
{{$NEXT}}
+ - Word tokenizer: Improve matching of Perl class names
+
0.66 2011-04-27 07:37:45
- Die when the user tries --stats without a brain
- Word tokenizer: stopped matching numbers separately in many cases.
"95GB", "3D", "800x600" are all single tokens now. This should reduce
the volume of nonsensical word-number pairs in the output.
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 1408677..d0539dc 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,227 +1,227 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $APOSTROPHE = qr/['â´]/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $NON_WORD = qr/\W+/;
my $BARE_WORD = qr/\w+/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
-my $PERL_CLASS = qr/ \w+ (?: :: \w+ )+ (?: :: )? /x;
+my $PERL_CLASS = qr/ (?: :: \w+ (?: :: \w+ )* | \w+ (?: :: \w+ )+ ) (?: :: )? | \w+ :: /x;
my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)*||$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
my $prev_chunk;
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
if (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
# {2,} so it doesn't match I'm
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 09f9636..27f478c 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,439 +1,444 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion".'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion".'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
[qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
[qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
[qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
[qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
[
"You know, 4-3 equals 1",
[qw<you know , 4-3 equals 1>],
"You know, 4-3 equals 1.",
],
[
"moo-5 moo-5-moo moo_5",
[qw<moo-5 moo-5-moo moo_5>],
"Moo-5 moo-5-moo moo_5.",
],
+ [
+ "::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
+ [qw<::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar>],
+ "::Class Class:: ::Foo::Bar Foo::Bar:: Foo::Bar",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my $tokens = $toke->make_tokens($test->[0]);
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
my $output = $toke->make_output($tokens);
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
|
hinrik/hailo | fa15d8e46406ecc6d81042317c55d702e39af9a2 | Reorganization, more descriptive variable names, etc | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 2964641..1408677 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,225 +1,227 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
-my $ALPHA = qr/(?![_\d])\w/;
+my $ALPHABET = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
-my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOSTROPHE = qr/['â´]/;
-my $APOST_WORD = qr/$ALPHA+(?:$APOSTROPHE$ALPHA+)+/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
-my $EMAIL = qr/[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}/i;
-my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $NON_WORD = qr/\W+/;
-my $PLAIN_WORD = qr/\w+/;
-my $ALPHA_WORD = qr/$APOST_WORD|$PLAIN_WORD/;
-my $WORD_TYPES = qr/$NUMBER|$PLAIN_WORD\.(?:$PLAIN_WORD\.)+|$ALPHA_WORD/;
-my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHA|$NUMBER)/;
+my $BARE_WORD = qr/\w+/;
+my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
+my $APOST_WORD = qr/$ALPHABET+(?:$APOSTROPHE$ALPHABET+)+/;
+my $NORM_WORD = qr/$APOST_WORD|$BARE_WORD/;
+my $WORD_TYPES = qr/$NUMBER|$BARE_WORD\.(?:$BARE_WORD\.)+|$NORM_WORD/;
+my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHABET|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
+my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
+my $EMAIL = qr/ [A-Z0-9._%+-]+ @ [A-Z0-9.-]+ \. [A-Z]{2,4} /xi;
+my $PERL_CLASS = qr/ \w+ (?: :: \w+ )+ (?: :: )? /x;
+my $EXTRA_URI = qr{ (?: \w+ \+ ) ssh:// \S+ }x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$PLAIN_WORD(?:$DASH(?:$WORD_TYPES|$PLAIN_WORD)*||$APOSTROPHE(?!$ALPHA|$NUMBER|$APOSTROPHE))*/;
+my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$BARE_WORD(?:$DASH(?:$WORD_TYPES|$BARE_WORD)*||$APOSTROPHE(?!$ALPHABET|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
my $prev_chunk;
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
- if (!$got_word && $chunk =~ s/ ^ (?<class> \w+ (?:::\w+)+ (?:::)? )//xo) {
+ if (!$got_word && $chunk =~ s/ ^ (?<class> $PERL_CLASS )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
- elsif (!$got_word && $chunk =~ s{ ^ (?<uri> (?:\w+\+) ssh:// \S+ ) }{}xo) {
+ elsif (!$got_word && $chunk =~ s{ ^ (?<uri> $EXTRA_URI ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
$word = $+{word};
}
else {
$chunk =~ s/^($WORD)//o and $word = $1;
}
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
# {2,} so it doesn't match I'm
and $word !~ $UPPER_NONW;
push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
- $reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHA)}{I}go;
+ $reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHABET)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 56478eb29bb793b247b9714720cca83872a45ced | Remove dead code from Word tokenizer | diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 4b7451c..2964641 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,254 +1,225 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHA = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOSTROPHE = qr/['â´]/;
my $APOST_WORD = qr/$ALPHA+(?:$APOSTROPHE$ALPHA+)+/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $EMAIL = qr/[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}/i;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $NON_WORD = qr/\W+/;
my $PLAIN_WORD = qr/\w+/;
my $ALPHA_WORD = qr/$APOST_WORD|$PLAIN_WORD/;
my $WORD_TYPES = qr/$NUMBER|$PLAIN_WORD\.(?:$PLAIN_WORD\.)+|$ALPHA_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHA|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$PLAIN_WORD(?:$DASH(?:$WORD_TYPES|$PLAIN_WORD)*||$APOSTROPHE(?!$ALPHA|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
my $prev_chunk;
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
if (!$got_word && $chunk =~ s/ ^ (?<class> \w+ (?:::\w+)+ (?:::)? )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> (?:\w+\+) ssh:// \S+ ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
- my @words;
+ my $word;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
- push @words, $+{word};
- }
-
- while (length $chunk) {
- last if $chunk !~ s/^($WORD)//o;
- push @words, $1;
- }
-
- for my $word (@words) {
- # Maybe preserve the casing of this word
- $word = lc $word
- if $word ne uc $word
- # Mixed-case words like "WoW"
- and $word !~ $MIXED_CASE
- # Words that are upper case followed by a non-word character.
- # {2,} so it doesn't match I'm
- and $word !~ $UPPER_NONW;
- }
-
- if (@words == 1) {
- push @tokens, [$self->{_spacing_normal}, $words[0]];
- }
- elsif (@words == 2) {
- # When there are two words joined together, we need to
- # decide if it's normal+postfix (e.g. "4.1GB") or
- # prefix+normal (e.g. "v2.3")
-
- if ($words[0] =~ /$NUMBER/ && $words[1] =~ /$ALPHA_WORD/) {
- push @tokens, [$self->{_spacing_normal}, $words[0]];
- push @tokens, [$self->{_spacing_postfix}, $words[1]];
- }
- elsif ($words[0] =~ /$ALPHA_WORD/ && $words[1] =~ /$NUMBER/) {
- push @tokens, [$self->{_spacing_prefix}, $words[0]];
- push @tokens, [$self->{_spacing_normal}, $words[1]];
- }
+ $word = $+{word};
}
else {
- # When 3 or more words are together, (e.g. "800x600"),
- # we treat them as two normal tokens surrounding one or
- # more infix tokens
- push @tokens, [$self->{_spacing_normal}, $_] for $words[0];
- push @tokens, [$self->{_spacing_infix}, $_] for @words[1..$#words-1];
- push @tokens, [$self->{_spacing_normal}, $_] for $words[-1];
+ $chunk =~ s/^($WORD)//o and $word = $1;
}
+ # Maybe preserve the casing of this word
+ $word = lc $word
+ if $word ne uc $word
+ # Mixed-case words like "WoW"
+ and $word !~ $MIXED_CASE
+ # Words that are upper case followed by a non-word character.
+ # {2,} so it doesn't match I'm
+ and $word !~ $UPPER_NONW;
+
+ push @tokens, [$self->{_spacing_normal}, $word];
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHA)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | 8d2a5272ad91e3e0b5fbcf8b758ef83263bf70d4 | Stop matching numbers separately in many cases | diff --git a/Changes b/Changes
index 2c13ace..0266eb5 100644
--- a/Changes
+++ b/Changes
@@ -1,518 +1,522 @@
Revision history for Hailo
{{$NEXT}}
- Die when the user tries --stats without a brain
+ - Word tokenizer: stopped matching numbers separately in many cases.
+ "95GB", "3D", "800x600" are all single tokens now. This should reduce
+ the volume of nonsensical word-number pairs in the output.
+
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
- Run all the tests Hailo::Test runs internally for each engine
one-by-one using the DBD::SQLite memory driver. This makes sure
the internal tests don't depend on each other in odd ways.
0.29 2010-03-13 10:32:43
- Remove Data::Random as a dependency. It fails the most tests of
all the dists we depend on and we don't really need it for
diff --git a/lib/Hailo/Tokenizer/Words.pm b/lib/Hailo/Tokenizer/Words.pm
index 974d1b3..4b7451c 100644
--- a/lib/Hailo/Tokenizer/Words.pm
+++ b/lib/Hailo/Tokenizer/Words.pm
@@ -1,254 +1,254 @@
package Hailo::Tokenizer::Words;
use 5.010;
use utf8;
use Any::Moose;
use Any::Moose 'X::StrictConstructor';
use Regexp::Common qw/ URI /;
use namespace::clean -except => 'meta';
with qw(Hailo::Role::Arguments
Hailo::Role::Tokenizer);
# [[:alpha:]] doesn't match combining characters on Perl >=5.12
my $ALPHA = qr/(?![_\d])\w/;
# tokenization
my $DASH = qr/[â-]/;
my $DECIMAL = qr/[.,]/;
-my $NUMBER = qr/$DECIMAL?\d+(?:$DECIMAL\d+)*/;
+my $NUMBER = qr/$DECIMAL\d+(?:$DECIMAL\d+)*|\d+(?:$DECIMAL\d+)+\w*/;
my $APOSTROPHE = qr/['â´]/;
my $APOST_WORD = qr/$ALPHA+(?:$APOSTROPHE$ALPHA+)+/;
my $ELLIPSIS = qr/\.{2,}|â¦/;
my $EMAIL = qr/[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}/i;
my $TWAT_NAME = qr/ \@ [A-Za-z0-9_]+ /x;
my $NON_WORD = qr/\W+/;
-my $PLAIN_WORD = qr/(?:\w(?<!\d))+/;
+my $PLAIN_WORD = qr/\w+/;
my $ALPHA_WORD = qr/$APOST_WORD|$PLAIN_WORD/;
my $WORD_TYPES = qr/$NUMBER|$PLAIN_WORD\.(?:$PLAIN_WORD\.)+|$ALPHA_WORD/;
my $WORD_APOST = qr/$WORD_TYPES(?:$DASH$WORD_TYPES)*$APOSTROPHE(?!$ALPHA|$NUMBER)/;
my $WORD = qr/$WORD_TYPES(?:(?:$DASH$WORD_TYPES)+|$DASH(?!$DASH))?/;
my $MIXED_CASE = qr/ \p{Lower}+ \p{Upper} /x;
my $UPPER_NONW = qr/^ \p{Upper}{2,} \W+ (?: \p{Upper}* \p{Lower} ) /x;
# capitalization
# The rest of the regexes are pretty hairy. The goal here is to catch the
# most common cases where a word should be capitalized. We try hard to
# guard against capitalizing things which don't look like proper words.
# Examples include URLs and code snippets.
my $OPEN_QUOTE = qr/['"âââ«»ããâ¹â]/;
my $CLOSE_QUOTE = qr/['"âââ«»ããâºâ]/;
my $TERMINATOR = qr/(?:[?!â½]+|(?<!\.)\.)/;
my $ADDRESS = qr/:/;
my $PUNCTUATION = qr/[?!â½,;.:]/;
my $BOUNDARY = qr/$CLOSE_QUOTE?(?:\s*$TERMINATOR|$ADDRESS|$ELLIPSIS)\s+$OPEN_QUOTE?\s*/;
-my $LOOSE_WORD = qr/(?:$WORD_TYPES)|\w+(?:$DASH(?:$WORD_TYPES|\w+)*||$APOSTROPHE(?!$ALPHA|$NUMBER|$APOSTROPHE))*/;
+my $LOOSE_WORD = qr/(?:$WORD_TYPES)|$PLAIN_WORD(?:$DASH(?:$WORD_TYPES|$PLAIN_WORD)*||$APOSTROPHE(?!$ALPHA|$NUMBER|$APOSTROPHE))*/;
my $SPLIT_WORD = qr{$LOOSE_WORD(?:/$LOOSE_WORD)?(?=$PUNCTUATION(?: |$)|$CLOSE_QUOTE|$TERMINATOR| |$)};
my $SEPARATOR = qr/\s+|$ELLIPSIS/;
# we want to capitalize words that come after "On example.com?"
# or "You mean 3.2?", but not "Yes, e.g."
my $DOTTED_STRICT = qr/$LOOSE_WORD(?:$DECIMAL(?:\d+|\w{2,}))?/;
my $WORD_STRICT = qr/$DOTTED_STRICT(?:$APOSTROPHE$DOTTED_STRICT)*/;
# input -> tokens
sub make_tokens {
my ($self, $line) = @_;
my @tokens;
my @chunks = split /\s+/, $line;
# process all whitespace-delimited chunks
my $prev_chunk;
for my $chunk (@chunks) {
my $got_word;
while (length $chunk) {
# We convert it to ASCII and then look for a URI because $RE{URI}
# from Regexp::Common doesn't support non-ASCII domain names
my $ascii = $chunk;
$ascii =~ s/[^[:ascii:]]/a/g;
# URIs
if (!$got_word && $ascii =~ / ^ $RE{URI} /xo) {
my $uri_end = $+[0];
my $uri = substr $chunk, 0, $uri_end;
$chunk =~ s/^\Q$uri//;
push @tokens, [$self->{_spacing_normal}, $uri];
$got_word = 1;
}
# Perl class names
if (!$got_word && $chunk =~ s/ ^ (?<class> \w+ (?:::\w+)+ (?:::)? )//xo) {
push @tokens, [$self->{_spacing_normal}, $+{class}];
$got_word = 1;
}
# ssh:// (and foo+ssh://) URIs
elsif (!$got_word && $chunk =~ s{ ^ (?<uri> (?:\w+\+) ssh:// \S+ ) }{}xo) {
push @tokens, [$self->{_spacing_normal}, $+{uri}];
$got_word = 1;
}
# email addresses
elsif (!$got_word && $chunk =~ s/ ^ (?<email> $EMAIL ) //xo) {
push @tokens, [$self->{_spacing_normal}, $+{email}];
$got_word = 1;
}
# Twitter names
elsif (!$got_word && $chunk =~ s/ ^ (?<twat> $TWAT_NAME ) //xo) {
# Names on Twitter/Identi.ca can only match
# @[A-Za-z0-9_]+. I tested this on ~800k Twatterhose
# names.
push @tokens, [$self->{_spacing_normal}, $+{twat}];
$got_word = 1;
}
# normal words
elsif ($chunk =~ / ^ $WORD /xo) {
my @words;
# special case to allow matching q{ridin'} as one word, even when
# it appears as q{"ridin'"}, but not as q{'ridin'}
my $last_char = @tokens ? substr $tokens[-1][1], -1, 1 : '';
if (!@tokens && $chunk =~ s/ ^ (?<word>$WORD_APOST) //xo
|| $last_char =~ / ^ $APOSTROPHE $ /xo
&& $chunk =~ s/ ^ (?<word>$WORD_APOST) (?<! $last_char ) //xo) {
push @words, $+{word};
}
while (length $chunk) {
last if $chunk !~ s/^($WORD)//o;
push @words, $1;
}
for my $word (@words) {
# Maybe preserve the casing of this word
$word = lc $word
if $word ne uc $word
# Mixed-case words like "WoW"
and $word !~ $MIXED_CASE
# Words that are upper case followed by a non-word character.
# {2,} so it doesn't match I'm
and $word !~ $UPPER_NONW;
}
if (@words == 1) {
push @tokens, [$self->{_spacing_normal}, $words[0]];
}
elsif (@words == 2) {
# When there are two words joined together, we need to
# decide if it's normal+postfix (e.g. "4.1GB") or
# prefix+normal (e.g. "v2.3")
if ($words[0] =~ /$NUMBER/ && $words[1] =~ /$ALPHA_WORD/) {
push @tokens, [$self->{_spacing_normal}, $words[0]];
push @tokens, [$self->{_spacing_postfix}, $words[1]];
}
elsif ($words[0] =~ /$ALPHA_WORD/ && $words[1] =~ /$NUMBER/) {
push @tokens, [$self->{_spacing_prefix}, $words[0]];
push @tokens, [$self->{_spacing_normal}, $words[1]];
}
}
else {
# When 3 or more words are together, (e.g. "800x600"),
# we treat them as two normal tokens surrounding one or
# more infix tokens
push @tokens, [$self->{_spacing_normal}, $_] for $words[0];
push @tokens, [$self->{_spacing_infix}, $_] for @words[1..$#words-1];
push @tokens, [$self->{_spacing_normal}, $_] for $words[-1];
}
$got_word = 1;
}
# everything else
elsif ($chunk =~ s/ ^ (?<non_word> $NON_WORD ) //xo) {
my $non_word = $+{non_word};
my $spacing = $self->{_spacing_normal};
# was the previous token a word?
if ($got_word) {
$spacing = length $chunk
? $self->{_spacing_infix}
: $self->{_spacing_postfix};
}
# do we still have more tokens in this chunk?
elsif (length $chunk) {
$spacing = $self->{_spacing_prefix};
}
push @tokens, [$spacing, $non_word];
}
}
}
return \@tokens;
}
# tokens -> output
sub make_output {
my ($self, $tokens) = @_;
my $reply = '';
for my $pos (0 .. $#{ $tokens }) {
my ($spacing, $text) = @{ $tokens->[$pos] };
$reply .= $text;
# append whitespace if this is not a prefix token or infix token,
# and this is not the last token, and the next token is not
# a postfix/infix token
if ($pos != $#{ $tokens }
&& $spacing != $self->{_spacing_prefix}
&& $spacing != $self->{_spacing_infix}
&& !($pos < $#{ $tokens }
&& ($tokens->[$pos+1][0] == $self->{_spacing_postfix}
|| $tokens->[$pos+1][0] == $self->{_spacing_infix})
)
) {
$reply .= ' ';
}
}
# capitalize the first word
$reply =~ s/^\s*$OPEN_QUOTE?\s*\K($SPLIT_WORD)(?=$ELLIPSIS|(?:(?:$CLOSE_QUOTE|$TERMINATOR|$ADDRESS|$PUNCTUATION+)?(?:\s|$)))/\u$1/o;
# capitalize the second word
$reply =~ s/^\s*$OPEN_QUOTE?\s*$SPLIT_WORD(?:(?:\s*$TERMINATOR|$ADDRESS)\s+)\K($SPLIT_WORD)/\u$1/o;
# capitalize all other words after word boundaries
# we do it in two passes because we need to match two words at a time
$reply =~ s/$SEPARATOR$OPEN_QUOTE?\s*$WORD_STRICT$BOUNDARY\K($SPLIT_WORD)/\x1B\u$1\x1B/go;
$reply =~ s/\x1B$WORD_STRICT\x1B$BOUNDARY\K($SPLIT_WORD)/\u$1/go;
$reply =~ s/\x1B//go;
# end paragraphs with a period when it makes sense
$reply =~ s/(?:$SEPARATOR|^)$OPEN_QUOTE?(?:$SPLIT_WORD(?:\.$SPLIT_WORD)*)$CLOSE_QUOTE?\K$/./o;
# capitalize I'm, I've...
$reply =~ s{(?:$SEPARATOR|$OPEN_QUOTE)\Ki(?=$APOSTROPHE$ALPHA)}{I}go;
return $reply;
}
__PACKAGE__->meta->make_immutable;
=encoding utf8
=head1 NAME
Hailo::Tokenizer::Words - A tokenizer for L<Hailo|Hailo> which splits
on whitespace and word boundaries, mostly.
=head1 DESCRIPTION
This tokenizer does its best to handle various languages. It knows about most
apostrophes, quotes, and sentence terminators.
=head1 AUTHOR
Hinrik E<Ouml>rn SigurE<eth>sson, [email protected]
=head1 LICENSE AND COPYRIGHT
Copyright 2010 Hinrik E<Ouml>rn SigurE<eth>sson
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
diff --git a/t/tokenizer/Words.t b/t/tokenizer/Words.t
index 584fd12..09f9636 100644
--- a/t/tokenizer/Words.t
+++ b/t/tokenizer/Words.t
@@ -1,429 +1,439 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Test::More tests => 2;
use Hailo::Tokenizer::Words;
binmode $_, ':encoding(utf8)' for (*STDIN, *STDOUT, *STDERR);
subtest make_tokens => sub {
my $t = sub {
my ($str, $tokens) = @_;
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($str);
my $tok;
push @$tok, $_->[1] for @$parsed;
is_deeply(
$tok,
$tokens,
"make_tokens: <<$str>> ==> " . (join ' ', map { qq[<<$_>>] } @$tokens) . ""
);
};
for my $chr (map { chr } 11 .. 200) {
next if $chr =~ /^\s$/;
$t->($chr, [ $chr ]);
}
$t->("foo bar", [ qw<foo bar> ]);
$t->("Ã", [ 'Ã' ]);
# Words like WoW and other odd things that change capitalization
# mid-way should retain their capitalization.
$t->("I hate WoW.", [ qw< I hate WoW . > ]);
# Preserve mixed capitalization
$t->("GumbyBRAIN", [ qw< GumbyBRAIN > ]);
$t->("\"GumbyBRAIN\"", [ qw< " GumbyBRAIN " > ]);
$t->("HoRRiBlE", [ qw< HoRRiBlE > ]);
$t->("HoRRiBle", [ qw< HoRRiBle > ]);
$t->("hoRRiBle", [ qw< hoRRiBle > ]);
{
my $warn = '';
local $SIG{__WARN__} = sub { $warn .= $_[0] };
$t->($_, [ $_ ]) for "n" . "o" x 500;
is($warn, '', "Didn't get Complex regular subexpression recursion limit (32766) exceeded");
}
my @want = ( qw[
WoW 1
foo 0
Foo 0
FoO 1
fOO 1
foO 1
foO 1
GumbyBRAIN 1
gumbyBRAIN 1
HoRRiBlE 1
HoRRiBle 1
hoRRiBle 1
] );
while (my ($word, $should) = splice @want, 0, 2) {
$t->($word, [ $should ? $word : lc $word ]);
}
# Similarly we should preserve capitalization on words split by '
# and other \W characters
$t->("I FYIQ'ed that job.", [ qw< I FYIQ'ed that job . > ]);
$t->("That guy was KIA'd.", [ qw< that guy was KIA'd . > ]);
done_testing();
};
subtest make_output => sub {
my @tokens = (
[
' " why hello there. «yes». "foo is a bar", e.g. bla ... yes',
[qw<" why hello there . « yes ». " foo is a bar>, '",', qw<e.g. bla ... yes>],
'" Why hello there. «Yes». "Foo is a bar", e.g. bla ... yes.',
],
[
"someone: how're you?",
[qw<someone : how're you ?>],
"Someone: How're you?",
],
[
'what?! well...',
[qw<what ?! well ...>],
'What?! Well...',
],
[
'hello. you: what are you doing?',
[qw<hello . you : what are you doing ?>],
'Hello. You: What are you doing?',
],
[
'foo: foo: foo: what are you doing?',
[qw<foo : foo : foo : what are you doing ?>],
'Foo: Foo: Foo: What are you doing?',
],
[
"I'm talking about this key:value thing",
[qw<i'm talking about this key : value thing>],
"I'm talking about this key:value thing."
],
[
"what? but that's impossible",
[qw<what ? but that's impossible>],
"What? But that's impossible.",
],
[
'on example.com? yes',
[qw<on example . com ? yes>],
"On example.com? Yes.",
],
[
'pi is 3.14, well, almost',
[qw<pi is 3.14>, ',', 'well', ',', 'almost'],
"Pi is 3.14, well, almost.",
],
[
'foo 0.40 bar or .40 bar bla 0,40 foo ,40',
[qw<foo 0.40 bar or .40 bar bla>, '0,40', 'foo', ',40'],
'Foo 0.40 bar or .40 bar bla 0,40 foo ,40.',
],
[
"sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
[qw<sá '' karlkyns '' aðili à [[ hjónaband ]] i tveggja lesbÃa ?>],
"Sá ''karlkyns'' aðili à [[hjónaband]]i tveggja lesbÃa?",
],
[
"you mean i've got 3,14? yes",
[qw<you mean i've got>, '3,14', '?', 'yes'],
"You mean I've got 3,14? Yes.",
],
[
'Pretty girl like her "peak". oh and youâre touching yourself',
[qw<pretty girl like her " peak ". oh and youâre touching yourself>],
'Pretty girl like her "peak". Oh and youâre touching yourself.',
],
[
'http://foo.BAR/bAz',
[qw<http://foo.BAR/bAz>],
'http://foo.BAR/bAz',
],
[
'http://www.example.com/some/path?funny**!(),,:;@=&=',
[ 'http://www.example.com/some/path?funny**!(),,:;@=&=' ],
'http://www.example.com/some/path?funny**!(),,:;@=&=',
],
[
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
[ qw< svn+ssh://svn.wikimedia.org/svnroot/mediawiki > ],
'svn+ssh://svn.wikimedia.org/svnroot/mediawiki',
],
[
"foo bar baz. i said i'll do this",
[qw<foo bar baz . i said i'll do this>],
"Foo bar baz. I said I'll do this.",
],
[
'talking about i&34324 yes',
[qw<talking about i & 34324 yes>],
'Talking about i&34324 yes.'
],
[
'talking about i',
[qw<talking about i>],
'Talking about i.'
],
[
'none, as most animals do, I love conservapedia.',
['none', ',', qw<as most animals do>, ',', qw<I love conservapedia .>],
'None, as most animals do, I love conservapedia.'
],
[
'hm...',
[qw<hm ...>],
'Hm...'
],
[
'anti-scientology demonstration in london? hella-cool',
[qw<anti-scientology demonstration in london ? hella-cool>],
'Anti-scientology demonstration in london? Hella-cool.'
],
[
'This. compound-words are cool',
[qw<this . compound-words are cool>],
'This. Compound-words are cool.'
],
[
'Foo. Compound-word',
[qw<foo . compound-word>],
'Foo. Compound-word.'
],
[
'one',
[qw<one>],
'One.'
],
[
'cpanm is a true "religion"',
[qw<cpanm is a true " religion ">],
'Cpanm is a true "religion".'
],
[
'cpanm is a true "anti-religion"',
[qw<cpanm is a true " anti-religion ">],
'Cpanm is a true "anti-religion".'
],
[
'Maps to weekends/holidays',
[qw<maps to weekends / holidays>],
'Maps to weekends/holidays.'
],
[
's/foo/bar',
[qw<s / foo / bar>],
's/foo/bar'
],
[
's/foo/bar/',
[qw<s / foo / bar />],
's/foo/bar/'
],
[
'Where did I go? http://foo.bar/',
[qw<where did I go ? http://foo.bar/>],
'Where did I go? http://foo.bar/'
],
[
'What did I do? s/foo/bar/',
[qw<what did I do ? s / foo / bar />],
'What did I do? s/foo/bar/'
],
[
'I called foo() and foo(bar)',
[qw<I called foo () and foo ( bar )>],
'I called foo() and foo(bar)'
],
[
'foo() is a function',
[qw<foo () is a function>],
'foo() is a function.'
],
[
'the symbol : and the symbol /',
[qw<the symbol : and the symbol />],
'The symbol : and the symbol /'
],
[
'.com bubble',
[qw<. com bubble>],
'.com bubble.'
],
[
'við vorum þar. à norður- eða vesturhlutanum',
[qw<við vorum þar . à norður- eða vesturhlutanum>],
'Við vorum þar. à norður- eða vesturhlutanum.'
],
[
"i'm talking about -postfix. yeah",
[qw<i'm talking about - postfix . yeah>],
"I'm talking about -postfix. yeah.",
],
[
"But..what about me? but...no",
[qw<but .. what about me ? but ... no>],
"But..what about me? But...no.",
],
[
"For foo'345 'foo' bar",
[qw<for foo ' 345 ' foo ' bar>],
"For foo'345 'foo' bar.",
],
[
"loves2spooge",
- [qw<loves 2 spooge>],
+ [qw<loves2spooge>],
"Loves2spooge.",
],
[
'she´ll be doing it now',
[qw<she´ll be doing it now>],
'She´ll be doing it now.',
],
[
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH',
[qw<CPAN upload : Crypt-Rijndael-MySQL-0.02 by SATOH>],
'CPAN upload: Crypt-Rijndael-MySQL-0.02 by SATOH.',
],
[
"I use a resolution of 800x600 on my computer",
- [qw<I use a resolution of 800 x 600 on my computer>],
+ [qw<I use a resolution of 800x600 on my computer>],
"I use a resolution of 800x600 on my computer.",
],
[
"WOAH 3D",
- [qw<WOAH 3 D>],
+ [qw<WOAH 3D>],
"WOAH 3D.",
],
[
"jarl sounds like yankee negro-lovers. britain was even into old men.",
[qw<jarl sounds like yankee negro-lovers . britain was even into old men .>],
"Jarl sounds like yankee negro-lovers. Britain was even into old men.",
],
[
"just look at http://beint.lýðræði.is does it turn tumi metrosexual",
[qw<just look at http://beint.lýðræði.is does it turn tumi metrosexual>],
"Just look at http://beint.lýðræði.is does it turn tumi metrosexual.",
],
[
'du: Invalid option --^',
[qw<du : invalid option --^>],
'Du: Invalid option --^',
],
[
'4.1GB downloaded, 95GB uploaded',
- [qw<4.1 GB downloaded>, ',', qw<95 GB uploaded>],
+ [qw<4.1GB downloaded>, ',', qw<95GB uploaded>],
'4.1GB downloaded, 95GB uploaded.',
],
[
'Use <http://google.com> as your homepage',
[qw{use < http://google.com > as your homepage}],
'Use <http://google.com> as your homepage.',
],
[
'Foo http://æðislegt.is,>>> bar',
[qw{foo http://æðislegt.is}, ',>>>', 'bar'],
'Foo http://æðislegt.is,>>> bar.',
],
[
'Foo http://æðislegt.is,$ bar',
[qw<foo http://æðislegt.is>, ',$', 'bar'],
'Foo http://æðislegt.is,$ bar.',
],
[
'http://google.is/search?q="stiklað+á+stóru"',
[qw{http://google.is/search?q= " stiklað + á + stóru "}],
'http://google.is/search?q="stiklað+á+stóru"',
],
[
'this is STARGÎÌTE',
[qw<this is STARGÎÌTE>],
'This is STARGÎÌTE.',
],
[
'[email protected] [email protected] [email protected]',
[qw<[email protected] [email protected] [email protected]>],
'[email protected] [email protected] [email protected]',
],
[
'tumi@foo',
[qw<tumi @ foo>],
'tumi@foo',
],
[
'e.g. the river',
[qw<e.g. the river>],
'E.g. the river.',
],
[
'dongâlicking is a really valuable book.',
[qw<dongâlicking is a really valuable book .>],
'Dongâlicking is a really valuable book.',
],
[
'taka úr sources.list',
[qw<taka úr sources . list>],
'Taka úr sources.list.',
],
[
'Huh? what? i mean what is your wife a...goer...eh? know what a dude last night...',
[qw<huh ? what ? i mean what is your wife a ... goer ... eh ? know what a dude last night ...>],
'Huh? What? I mean what is your wife a...goer...eh? Know what a dude last night...',
],
[
'neeeigh!',
[qw<neeeigh !>],
'Neeeigh!',
],
[
'neeeigh.',
[qw<neeeigh .>],
'Neeeigh.',
],
[
'odin-: foo-- # blah. odin-: yes',
[qw<odin- : foo -->, '#', qw<blah . odin- : yes>],
'Odin-: Foo-- # blah. Odin-: Yes.',
],
[
"struttin' that nigga",
[qw<struttin' that nigga>],
"Struttin' that nigga.",
],
[
'"maybe" and A better deal. "would" still need my coffee with tea.',
[qw<" maybe " and A better deal . " would " still need my coffee with tea .>],
'"Maybe" and A better deal. "Would" still need my coffee with tea.',
],
[
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
[qw<this Acme::POE::Tree module is neat . Acme::POE::Tree>],
"This Acme::POE::Tree module is neat. Acme::POE::Tree",
],
[
"I use POE-Component-IRC",
[qw<I use POE-Component-IRC>],
"I use POE-Component-IRC.",
],
+ [
+ "You know, 4-3 equals 1",
+ [qw<you know , 4-3 equals 1>],
+ "You know, 4-3 equals 1.",
+ ],
+ [
+ "moo-5 moo-5-moo moo_5",
+ [qw<moo-5 moo-5-moo moo_5>],
+ "Moo-5 moo-5-moo moo_5.",
+ ],
);
my $toke = Hailo::Tokenizer::Words->new();
for my $test (@tokens) {
my $tokens = $toke->make_tokens($test->[0]);
my $t;
push @$t, $_->[1] for @$tokens;
is_deeply($t, $test->[1], 'Tokens are correct');
my $output = $toke->make_output($tokens);
is_deeply($output, $test->[2], 'Output is correct');
}
done_testing();
};
diff --git a/t/tokenizer/Words/utf8-text.t b/t/tokenizer/Words/utf8-text.t
index 4fc28c5..8940149 100644
--- a/t/tokenizer/Words/utf8-text.t
+++ b/t/tokenizer/Words/utf8-text.t
@@ -1,1585 +1,1570 @@
use 5.010;
use utf8;
use strict;
use warnings;
use Encode qw<encode_utf8>;
use Data::Section -setup;
use Test::More;
use Hailo::Tokenizer::Words;
binmode $_, ':encoding(utf8)' for (*STDOUT, *STDERR);
BEGIN {
if ($] < 5.012000) {
plan skip_all => "This test relies on Perl >=5.12's Unicode matching";
}
my $got_yaml;
eval {
require YAML::XS;
YAML::XS->import('Dump', 'Load');
$got_yaml = 1;
};
plan skip_all => "Haven't got YAML::XS" if !$got_yaml;
}
-plan tests => '2519';
+plan tests => '2503';
my $self = bless {} => __PACKAGE__;
my $text = ${ $self->section_data("UTF-8 encoded sample plain-text file") };
my $toke = Hailo::Tokenizer::Words->new();
my $parsed = $toke->make_tokens($text);
# This is how the YAML::XS output was produced:
+#binmode *STDERR;
#print STDERR Dump($parsed);
#exit;
my $yaml = Load(encode_utf8(${ $self->section_data("YAML::XS result") }));
for (my $i = 0; $i < @$yaml; $i++) {
is($parsed->[$i][0], $yaml->[$i][0], "Token #$i: type matches");
is($parsed->[$i][1], $yaml->[$i][1], "Token #$i: content matches");
}
is(scalar(@$parsed), scalar(@$yaml), "Number of tokens matches");
__DATA__
__[ UTF-8 encoded sample plain-text file ]__
UTF-8 encoded sample plain-text file
â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾
Markus Kuhn [ËmaʳkÊs kuËn] <http://www.cl.cam.ac.uk/~mgk25/> â 2002-07-25
The ASCII compatible UTF-8 encoding used in this plain-text file
is defined in Unicode, ISO 10646-1, and RFC 2279.
Using Unicode/UTF-8, you can write in emails and source code things such as
Mathematics and sciences:
â® Eâ
da = Q, n â â, â f(i) = â g(i), â§â¡ââââââââââ¤â«
âªâ¢ââa²+b³ ââ¥âª
âxââ: âxâ = âââxâ, α ⧠¬β = ¬(¬α ⨠β), âªâ¢âââââââ ââ¥âª
âªâ¢ââ· câ ââ¥âª
â â ââ â ⤠â â â â â â, â¨â¢â ââ¥â¬
âªâ¢â â ââ¥âª
⥠< a â b â¡ c ⤠d ⪠⤠â (â¦Aâ§ â âªBâ«), âªâ¢â â² ââ¥âª
âªâ¢â â³aâ±-bâ±ââ¥âª
2Hâ + Oâ â 2HâO, R = 4.7 kΩ, â 200 mm â©â£âi=1 â â¦â
Linguistics and dictionaries:
ði ıntÉËnæÊÉnÉl fÉËnÉtık ÉsoÊsiËeıÊn
Y [ËÊpsilÉn], Yen [jÉn], Yoga [ËjoËgÉ]
APL:
((Vâ³V)=â³â´V)/Vâ,V â·ââ³ââ´ââââ¾âââ
Nicer typography in plain text files:
ââââââââââââââââââââââââââââââââââââââââââââ
â â
â ⢠âsingleâ and âdoubleâ quotes â
â â
â ⢠Curly apostrophes: âWeâve been hereâ â
â â
â ⢠Latin-1 apostrophe and accents: '´` â
â â
â ⢠âdeutscheâ âAnführungszeichenâ â
â â
â ⢠â , â¡, â°, â¢, 3â4, â, â5/+5, â¢, ⦠â
â â
â ⢠ASCII safety test: 1lI|, 0OD, 8B â
â âââââââââââ® â
â ⢠the euro symbol: â 14.95 ⬠â â
â â°âââââââââ⯠â
ââââââââââââââââââââââââââââââââââââââââââââ
Combining characters:
STARGÎÌTE SG-1, a = vÌ = rÌ, aâ ⥠bâ
Greek (in Polytonic):
The Greek anthem:
Σὲ γνÏÏá½·Î¶Ï á¼Ïὸ Ïὴν κόÏη
Ïοῦ ÏÏαθιοῦ Ïὴν ÏÏομεÏá½µ,
Ïá½² γνÏÏá½·Î¶Ï á¼Ïὸ Ïὴν á½Ïη
Ïοὺ μὲ βία μεÏÏάει Ïá½´ γá¿.
᾿ÎÏ᾿ Ïá½° κόκκαλα βγαλμένη
Ïῶν ῾ÎλλήνÏν Ïá½° ἱεÏá½±
καὶ Ïὰν ÏÏá¿¶Ïα á¼Î½Î´ÏειÏμένη
Ïαá¿Ïε, ὦ Ïαá¿Ïε, ᾿ÎλεÏ
θεÏιά!
From a speech of Demosthenes in the 4th century BC:
Îá½Ïá½¶ Ïαá½Ïá½° ÏαÏá½·ÏÏαÏαί μοι γιγνώÏκειν, ὦ á¼Î½Î´ÏÎµÏ á¾¿Îθηναá¿Î¿Î¹,
á½
Ïαν Ï᾿ Îµá¼°Ï Ïá½° ÏÏάγμαÏα á¼ÏοβλέÏÏ ÎºÎ±á½¶ á½
Ïαν ÏÏá½¸Ï ÏοὺÏ
λόγοÏ
Ï Î¿á½Ï á¼ÎºÎ¿á½»ÏÎ ÏÎ¿á½ºÏ Î¼á½²Î½ Î³á½°Ï Î»á½¹Î³Î¿Ï
Ï ÏεÏá½¶ Ïοῦ
ÏιμÏÏá½µÏαÏθαι ΦίλιÏÏον á½Ïá¿¶ γιγνομένοÏ
Ï, Ïá½° δὲ ÏÏάγμαÏ᾿
Îµá¼°Ï ÏοῦÏο ÏÏοήκονÏα, á½¥Ïθ᾿ á½
ÏÏÏ Î¼á½´ ÏειÏόμεθ᾿ αá½Ïοὶ
ÏÏá½¹ÏεÏον ÎºÎ±Îºá¿¶Ï ÏκέÏαÏθαι δέον. οá½Î´á½³Î½ οá½Î½ á¼Î»Î»Î¿ μοι δοκοῦÏιν
οἱ Ïá½° ÏοιαῦÏα λέγονÏÎµÏ á¼¢ Ïὴν á½ÏόθεÏιν, ÏεÏá½¶ á¼§Ï Î²Î¿Ï
λεύεÏθαι,
οá½Ïá½¶ Ïὴν οá½Ïαν ÏαÏιÏÏάνÏÎµÏ á½Î¼á¿Î½ á¼Î¼Î±ÏÏάνειν. á¼Î³á½¼ δέ, á½
Ïι μέν
ÏοÏ᾿ á¼Î¾á¿Î½ Ïá¿ Ïόλει καὶ Ïá½° αá½Ïá¿Ï á¼Ïειν á¼ÏÏÎ±Î»á¿¶Ï ÎºÎ±á½¶ ΦίλιÏÏον
ÏιμÏÏá½µÏαÏθαι, καὶ μάλ᾿ á¼ÎºÏÎ¹Î²á¿¶Ï Î¿á¼¶Î´Î±Î á¼Ï᾿ á¼Î¼Î¿á¿¦ γάÏ, οὠÏάλαι
γέγονεν ÏαῦÏ᾿ á¼Î¼Ïá½¹ÏεÏαΠνῦν μένÏοι Ïá½³ÏειÏμαι Ïοῦθ᾿ ἱκανὸν
ÏÏολαβεá¿Î½ ἡμá¿Î½ εἶναι Ïὴν ÏÏá½½Ïην, á½
ÏÏÏ ÏÎ¿á½ºÏ ÏÏ
μμάÏοÏ
Ï
Ïá½½Ïομεν. á¼á½°Î½ Î³á½°Ï ÏοῦÏο βεβαίÏÏ á½Ïá½±Ïξá¿, Ïá½¹Ïε καὶ ÏεÏá½¶ Ïοῦ
Ïίνα ÏιμÏÏá½µÏεÏαί ÏÎ¹Ï ÎºÎ±á½¶ á½Î½ ÏÏá½¹Ïον á¼Î¾á½³ÏÏαι ÏκοÏεá¿Î½Î ÏÏὶν δὲ
Ïὴν á¼ÏÏὴν á½ÏÎ¸á¿¶Ï á½ÏοθέÏθαι, μάÏαιον ἡγοῦμαι ÏεÏá½¶ Ïá¿Ï
ÏελεÏ
Ïá¿Ï á½Î½Ïινοῦν Ïοιεá¿Ïθαι λόγον.
ÎημοÏθένοÏ
Ï, δ ᾿ÎλÏ
νθιακὸÏ
Georgian:
From a Unicode conference invitation:
ááá®ááá áá®áááá ááááá áá á áááá¡á¢á ááªáá Unicode-áá¡ ááááá á¡ááá ááá¨áá áá¡á
áááá¤áá áááªáááá ááá¡áá¡á¬á áááá, á ááááá᪠áááááá áááá 10-12 ááá á¢á¡,
á¥. áááááªá¨á, ááá áááááá¨á. áááá¤áá áááªáá á¨áá°áá ááá¡ áá ááá áá¡áá¤áááá¡
áá¥á¡ááá á¢ááá¡ áá¡áá ááá áááá¨á á áááá ááªáá ááá¢áá ááá¢á áá Unicode-á,
ááá¢áá áááªáááááááááªáá áá áááááááááªáá, Unicode-áá¡ ááááá§ááááá
áááá ááªáá£á á¡áá¡á¢ááááá¡á, áá ááááá§áááááá áá ááá ááááá¨á, á¨á áá¤á¢ááá¨á,
á¢áá¥á¡á¢áááá¡ áááá£á¨áááááá¡á áá áá áááááááááá áááááá£á¢áá á£á á¡áá¡á¢ááááá¨á.
Russian:
From a Unicode conference invitation:
ÐаÑегиÑÑÑиÑÑйÑеÑÑ ÑейÑÐ°Ñ Ð½Ð° ÐеÑÑÑÑÑ ÐеждÑнаÑоднÑÑ ÐонÑеÑенÑÐ¸Ñ Ð¿Ð¾
Unicode, коÑоÑÐ°Ñ ÑоÑÑоиÑÑÑ 10-12 маÑÑа 1997 года в ÐайнÑе в ÐеÑмании.
ÐонÑеÑенÑÐ¸Ñ ÑобеÑÐµÑ ÑиÑокий кÑÑг ÑкÑпеÑÑов по вопÑоÑам глобалÑного
ÐнÑеÑнеÑа и Unicode, локализаÑии и инÑеÑнаÑионализаÑии, воплоÑÐµÐ½Ð¸Ñ Ð¸
пÑÐ¸Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Unicode в ÑазлиÑнÑÑ
опеÑаÑионнÑÑ
ÑиÑÑемаÑ
и пÑогÑаммнÑÑ
пÑиложениÑÑ
, ÑÑиÑÑаÑ
, веÑÑÑке и многоÑзÑÑнÑÑ
компÑÑÑеÑнÑÑ
ÑиÑÑемаÑ
.
Thai (UCS Level 2):
Excerpt from a poetry on The Romance of The Three Kingdoms (a Chinese
classic 'San Gua'):
[----------------------------|------------------------]
๠à¹à¸à¹à¸à¸à¸´à¸à¸®à¸±à¹à¸à¹à¸ªà¸·à¹à¸à¸¡à¹à¸à¸£à¸¡à¹à¸ªà¸à¸ªà¸±à¸à¹à¸§à¸ à¸à¸£à¸°à¸à¸à¹à¸à¸¨à¸à¸à¸à¸à¸¹à¹à¸à¸¹à¹à¸à¸¶à¹à¸à¹à¸«à¸¡à¹
สิà¸à¸ªà¸à¸à¸à¸©à¸±à¸à¸£à¸´à¸¢à¹à¸à¹à¸à¸à¸«à¸à¹à¸²à¹à¸¥à¸à¸±à¸à¹à¸ สà¸à¸à¸à¸à¸à¹à¹à¸à¸£à¹à¹à¸à¹à¹à¸à¸¥à¸²à¹à¸à¸²à¸à¸±à¸à¸à¸²
à¸à¸£à¸à¸à¸±à¸à¸à¸·à¸à¸à¸±à¸à¸à¸µà¹à¸à¹à¸à¸à¸µà¹à¸à¸¶à¹à¸ à¸à¹à¸²à¸à¹à¸¡à¸·à¸à¸à¸à¸¶à¸à¸§à¸´à¸à¸£à¸´à¸à¹à¸à¹à¸à¸à¸±à¸à¸«à¸à¸²
à¹à¸®à¸à¸´à¹à¸à¹à¸£à¸µà¸¢à¸à¸à¸±à¸à¸à¸±à¹à¸§à¸«à¸±à¸§à¹à¸¡à¸·à¸à¸à¸¡à¸² หมายà¸à¸°à¸à¹à¸²à¸¡à¸à¸à¸±à¹à¸§à¸à¸±à¸§à¸ªà¸³à¸à¸±à¸
à¹à¸«à¸¡à¸·à¸à¸à¸à¸±à¸à¹à¸ªà¹à¸¥à¹à¹à¸ªà¸·à¸à¸à¸²à¸à¹à¸à¸«à¸² รัà¸à¸«à¸¡à¸²à¸à¹à¸²à¹à¸à¹à¸²à¸¡à¸²à¹à¸¥à¸¢à¸à¸²à¸ªà¸±à¸
à¸à¹à¸²à¸¢à¸à¹à¸à¸à¸à¸¸à¹à¸à¸¢à¸¸à¹à¸¢à¸à¹à¸«à¹à¹à¸à¸à¸à¸±à¸ à¹à¸à¹à¸ªà¸²à¸§à¸à¸±à¹à¸à¹à¸à¹à¸à¸à¸à¸§à¸à¸à¸·à¹à¸à¸à¸§à¸à¹à¸
à¸à¸¥à¸±à¸à¸¥à¸´à¸à¸¸à¸¢à¸à¸¸à¸¢à¸à¸µà¸à¸¥à¸±à¸à¸à¹à¸à¹à¸«à¸à¸¸ à¸à¹à¸²à¸à¸à¸²à¹à¸à¸¨à¸à¸£à¸´à¸à¸«à¸à¸²à¸à¹à¸²à¸£à¹à¸à¸à¹à¸«à¹
à¸à¹à¸à¸à¸£à¸à¸£à¸²à¸à¹à¸²à¸à¸±à¸à¸à¸à¸à¸£à¸£à¸¥à¸±à¸¢ ฤà¹
หาà¹à¸à¸£à¸à¹à¸³à¸à¸¹à¸à¸¹à¹à¸à¸£à¸£à¸¥à¸±à¸à¸à¹ ฯ
(The above is a two-column text. If combining characters are handled
correctly, the lines of the second column should be aligned with the
| character above.)
Ethiopian:
Proverbs in the Amharic language:
á°áá á áá³á¨áµ ááᥠá áá¨á°áµá¢
á¥á á«áá á¥áá°á á£á´ á ááá áá¢
áᥠá«áá¤á± ááá¥á ááá¢
á°á á ááá á
ᤠá£áá ᣠáá£áµ á áá°ááá¢
á¨á á áááá³ á á
ᤠá áá³á½áá¢
á áᥠá á á á³á á°áá³á¢
á²á°á¨áá áá°á¨ááá¢
ááµ á ááµá¥ ááááá á á¥áá© ááá³áá¢
áµá á¢á«á¥á á áá á³ á«áµáá¢
á°á á¥áá°á¤á± á¥áá
á¥áá° áá¨á¤á± á áá°á³á°ááá¢
á¥ááá á¨á¨áá°áá áá®á® á³áááá á ááµááá¢
á¨áá¨á¤áµ áá£á¥ á¢á«á©áµ ááµá
á£á«á©áµ á«á áá
á¢
á¥á« á¨ááá³áµ ááá ááá³áµá¢
áá£á áá°áªá« á¨ááᥠáááµ áá ááá«áá¢
á¨á¥áµáá á áá© áá« á¨á áá« á áá© ááá«á¢
á°ááá á¢á°á á°ááá¶ á£áá¢
áá³á
á
áá á¢áá á¨ááµá
á áµáá°áá¢
á¥ááá
á á áá«á½á
áá áááá¢
Runes:
á»á á³á¹á«á¦ á¦á«á á»á áá¢áá á©á¾ á¦á«á ááªá¾áá á¾á©á±á¦á¹ááªá±áá¢á á¹áᦠá¦áª á¹áá¥á«
(Old English, which transcribed into Latin reads 'He cwaeth that he
bude thaem lande northweardum with tha Westsae.' and means 'He said
that he lived in the northern land near the Western Sea.')
Braille:
â¡â â §â â ¼â â â¡â â â ⠹⠰â â¡£â â
â¡â â â â ¹ â ºâ â â â â â â â â â â â â â ºâ ⠹⠲ ⡹⠻â â â â â â â ³â â
â ±â â â â §â » â â â ³â â ¹â â â ² ⡹â â â â â â â » â â â â â â â ¥â â â â â ºâ â
â â â â â « â â ¹ â ¹â â â â »â â ¹â â â â â ¹â â â â »â
â â ¹â â ¥â â â »â â â
â »â
â â â â ¹â â ¡â â â â â ³â â ⠻⠲ â¡â â â â â â â â â â â « â â â ² â¡â â
â¡â â â â â â â °â â â â â â ºâ â â â â â â ¥â â â â °â¡¡â â â â â â â â â â ⠹⠹â â â â
â ¡â â â â â â â ¥â â â â â â â â â â â ²
â¡â â â¡â â â â ¹ â ºâ â â â â â â â â â â â â â â â ¤â â â â â ²
â¡â â â â¡ â â â â °â â â â â â â â â â ¹ â ¹â â â¡ â
â â ªâ â â â â ¹
â ªâ â
â â ªâ â «â â â â ±â â ⠹⠻â â â â â â â â â ¥â â â â ¹ â â â â â â â ³â
â â â â â â ¤â â â â â ² â¡ â â â £â â â â §â â â â ² â â â â â «â â â ¹â â â â â â â
â â â â â â â â â â â â ¤â â â â â â â ¹â â â â â â â â â â â â â â â â â â â â â â ⠻⠹
â â ¹â â â â â â â ² â¡â ¥â â ¹â â ºâ â â â â â â â ³â â â â â â â â â
â â â â ¹â â â â â â â â â â â â â ¹ â ¥â â â â â ⠪⠫ â â â â â
â ©â â â â â â â â â â ¥â â â â â â â â ¹â â¡â ³â â â ⠹⠰â â â â â â â â â ² ⡹⠳
â ºâ â â ⠹⠻â â â â â â â »â â â â â â â â â â â â â â â â â â â â â â â â â â ¹â â ¹â â
â¡â â â â ¹ â ºâ â â â â â â â â â â â â â â â ¤â â â â â ²
(The first couple of paragraphs of "A Christmas Carol" by Dickens)
Compact font selection example text:
ABCDEFGHIJKLMNOPQRSTUVWXYZ /0123456789
abcdefghijklmnopqrstuvwxyz £©µÃÃÃÃÃéöÿ
âââââââ â¢â¦â°â¢ÅŠŸž⬠ÎÎÎÎÎ©Î±Î²Î³Î´Ï ÐÐÐÐÐабвгд
âââââ§âªâ¡â âââ¨â»â£ ââ¼ââââºâºâ ï¬ï¿½ââá¼ á¸Ó¥áºÉËâ×Ô±á
Greetings in various languages:
Hello world, ÎαλημέÏα κόÏμε, ã³ã³ããã
Box drawing alignment tests: â
â
ââââ¦âââ ââââ¬âââ ââââ¬âââ® ââââ¬âââ® ââââ³âââ ââââ â· â» ââ¯â ââ°â â â±â²â±â²â³â³â³
ââââ¨âââ ââââ§âââ ââââªâââ âââââââ âââââââ ââââ â¶â¼â´âºââ¸â â¼â¨ ââ⥠â â²â±â²â±â³â³â³
âââ² â±ââ ââ ââ ââ â ââ ââ â ââ ââ â¿ ââ ââ
ââ âµ â¹ ââ·â ââ¸â â â±â²â±â²â³â³â³
â â¡ â³ â⣠â⢠â⤠ââ¼ââ¼ââ¼â¤ ââ«ââââ«â¤ â£â¿â¾â¼â¼â¿â« ââââ ââââ â ââ
â
â â â â²â±â²â±â³â³â³
âââ± â²ââ ââ ââ ââ â ââ ââ â ââ ââ â½ ââ ââââââââ â â â â â â â
ââââ¥âââ ââââ¤âââ ââââªâââ âââââââ âââââââ ââââââââ â â â â â â â
ââââ©âââ ââââ´âââ â°âââ´ââ⯠â°âââ´ââ⯠ââââ»âââ ââââââ ââââ â ââââ â âââââ
âââ
ââââââ
__[ YAML::XS result ]__
---
- - 0
- UTF-8
- - 0
- encoded
- - 0
- sample
- - 0
- plain-text
- - 0
- file
- - 0
- â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾â¾
- - 0
- markus
- - 0
- kuhn
- - 1
- '['
- - 0
- ËmaʳkÊs
- - 0
- kuËn
- - 2
- ']'
- - 1
- <
- - 0
- http://www.cl.cam.ac.uk/~mgk25/
- - 2
- '>'
- - 0
- â
- - 0
- 2002-07-25
- - 0
- the
- - 0
- ASCII
- - 0
- compatible
- - 0
- UTF-8
- - 0
- encoding
- - 0
- used
- - 0
- in
- - 0
- this
- - 0
- plain-text
- - 0
- file
- - 0
- is
- - 0
- defined
- - 0
- in
- - 0
- unicode
- - 2
- ','
- - 0
- ISO
- - 0
- 10646-1
- - 2
- ','
- - 0
- and
- - 0
- RFC
- - 0
- '2279'
- - 2
- .
- - 0
- using
- - 0
- unicode
- - 3
- /
- - 0
- UTF-8
- - 2
- ','
- - 0
- you
- - 0
- can
- - 0
- write
- - 0
- in
- - 0
- emails
- - 0
- and
- - 0
- source
- - 0
- code
- - 0
- things
- - 0
- such
- - 0
- as
- - 0
- mathematics
- - 0
- and
- - 0
- sciences
- - 2
- ':'
- - 0
- â®
- - 0
- E
- - 3
- â
- - 0
- da
- - 0
- =
- - 0
- Q
- - 2
- ','
- - 0
- n
- - 0
- â
- - 0
- â,
- - 0
- â
- - 0
- f
- - 3
- (
- - 0
- i
- - 2
- )
- - 0
- =
- - 0
- â
- - 0
- g
- - 3
- (
- - 0
- i
- - 2
- ),
- - 0
- â§â¡ââââââââââ¤â«
- - 1
- âªâ¢ââ
- - 0
- a
- - 3
- ²+
- - 0
- b
- - 2
- ³
- - 0
- ââ¥âª
- - 1
- â
- - 0
- x
- - 3
- â
- - 0
- â
- - 2
- ':'
- - 1
- â
- - 0
- x
- - 2
- â
- - 0
- =
- - 1
- âââ
- - 0
- x
- - 2
- â,
- - 0
- α
- - 0
- â§
- - 1
- ¬
- - 0
- β
- - 0
- =
- - 1
- ¬(¬
- - 0
- α
- - 0
- â¨
- - 0
- β
- - 2
- ),
- - 0
- âªâ¢âââââââ
- - 0
- ââ¥âª
- - 0
- âªâ¢ââ·
- - 0
- c
- - 2
- â
- - 0
- ââ¥âª
- - 0
- â
- - 0
- â
- - 0
- â
- - 2
- â
- - 0
- â
- - 0
- â¤
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 2
- ','
- - 0
- â¨â¢â
- - 0
- ââ¥â¬
- - 0
- âªâ¢â
- - 0
- â
- - 0
- ââ¥âª
- - 0
- â¥
- - 0
- <
- - 0
- a
- - 0
- â
- - 0
- b
- - 0
- â¡
- - 0
- c
- - 0
- â¤
- - 0
- d
- - 0
- âª
- - 0
- â¤
- - 0
- â
- - 1
- (â¦
- - 0
- A
- - 2
- â§
- - 0
- â
- - 1
- âª
- - 0
- B
- - 2
- â«),
- - 0
- âªâ¢â
- - 0
- â²
- - 0
- ââ¥âª
- - 0
- âªâ¢â
- - 1
- â³
- - 0
- aâ±-bâ±
- - 2
- ââ¥âª
- - 0
- - '2'
-- - 2
- - H
+ - 2H
- - 2
- â
- - 0
- +
- - 0
- O
- - 2
- â
- - 0
- â
- - 0
- - '2'
-- - 2
- - H
+ - 2H
- - 3
- â
- - 0
- O
- - 2
- ','
- - 0
- R
- - 0
- =
- - 0
- '4.7'
- - 0
- kΩ
- - 2
- ','
- - 0
- â
- - 0
- '200'
- - 0
- mm
- - 1
- â©â£â
- - 0
- i
- - 3
- =
- - 0
- '1'
- - 0
- â â¦â
- - 0
- linguistics
- - 0
- and
- - 0
- dictionaries
- - 2
- ':'
- - 0
- ði
- - 0
- ıntÉËnæÊÉnÉl
- - 0
- fÉËnÉtık
- - 0
- ÉsoÊsiËeıÊn
- - 0
- Y
- - 1
- '['
- - 0
- ËÊpsilÉn
- - 2
- '],'
- - 0
- yen
- - 1
- '['
- - 0
- jÉn
- - 2
- '],'
- - 0
- yoga
- - 1
- '['
- - 0
- ËjoËgÉ
- - 2
- ']'
- - 0
- APL
- - 2
- ':'
- - 1
- ((
- - 0
- V
- - 3
- â³
- - 0
- V
- - 3
- )=â³â´
- - 0
- V
- - 3
- )/
- - 0
- V
- - 3
- â,
- - 0
- V
- - 0
- â·ââ³ââ´ââââ¾âââ
- - 0
- nicer
- - 0
- typography
- - 0
- in
- - 0
- plain
- - 0
- text
- - 0
- files
- - 2
- ':'
- - 0
- ââââââââââââââââââââââââââââââââââââââââââââ
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 1
- â
- - 0
- single
- - 2
- â
- - 0
- and
- - 1
- â
- - 0
- double
- - 2
- â
- - 0
- quotes
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 0
- curly
- - 0
- apostrophes
- - 2
- ':'
- - 1
- â
- - 0
- weâve
- - 0
- been
- - 0
- here
- - 2
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 0
- latin-1
- - 0
- apostrophe
- - 0
- and
- - 0
- accents
- - 2
- ':'
- - 0
- '''´`'
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 1
- â
- - 0
- deutsche
- - 2
- â
- - 1
- â
- - 0
- anführungszeichen
- - 2
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 0
- â ,
- - 0
- â¡,
- - 0
- â°,
- - 0
- â¢,
- - 0
- - '3'
-- - 3
- - â
-- - 0
- - '4'
+ - 3â4
- - 2
- ','
- - 0
- â,
- - 1
- â
- - 0
- '5'
- - 3
- /+
- - 0
- '5'
- - 2
- ','
- - 0
- â¢,
- - 0
- â¦
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 0
- ASCII
- - 0
- safety
- - 0
- test
- - 2
- ':'
- - 0
- - '1'
-- - 2
- - lI
+ - 1lI
- - 2
- '|,'
- - 0
- - '0'
-- - 2
- - OD
+ - 0OD
- - 2
- ','
- - 0
- - '8'
-- - 2
- - B
+ - 8B
- - 0
- â
- - 0
- â
- - 0
- âââââââââââ®
- - 0
- â
- - 0
- â
- - 0
- â¢
- - 0
- the
- - 0
- euro
- - 0
- symbol
- - 2
- ':'
- - 0
- â
- - 0
- '14.95'
- - 0
- â¬
- - 0
- â
- - 0
- â
- - 0
- â
- - 0
- â°ââââââââââ¯
- - 0
- â
- - 0
- ââââââââââââââââââââââââââââââââââââââââââââ
- - 0
- combining
- - 0
- characters
- - 2
- ':'
- - 0
- STARGÎÌTE
- - 0
- SG-1
- - 2
- ','
- - 0
- a
- - 0
- =
- - 0
- vÌ
- - 0
- =
- - 0
- rÌ
- - 2
- ','
- - 0
- aâ
- - 0
- â¥
- - 0
- bâ
- - 0
- greek
- - 1
- (
- - 0
- in
- - 0
- polytonic
- - 2
- '):'
- - 0
- the
- - 0
- greek
- - 0
- anthem
- - 2
- ':'
- - 0
- Ïá½²
- - 0
- γνÏÏίζÏ
- - 0
- á¼Ïὸ
- - 0
- Ïὴν
- - 0
- κόÏη
- - 0
- Ïοῦ
- - 0
- ÏÏαθιοῦ
- - 0
- Ïὴν
- - 0
- ÏÏομεÏá½µ
- - 2
- ','
- - 0
- Ïá½²
- - 0
- γνÏÏίζÏ
- - 0
- á¼Ïὸ
- - 0
- Ïὴν
- - 0
- á½Ïη
- - 0
- Ïοὺ
- - 0
- μὲ
- - 0
- βία
- - 0
- μεÏÏάει
- - 0
- Ïá½´
- - 0
- γá¿
- - 2
- .
- - 1
- ᾿
- - 0
- αÏ
- - 2
- ᾿
- - 0
- Ïá½°
- - 0
- κόκκαλα
- - 0
- βγαλμένη
- - 0
- Ïῶν
- - 1
- ῾
- - 0
- ελλήνÏν
- - 0
- Ïá½°
- - 0
- ἱεÏá½±
- - 0
- καὶ
- - 0
- Ïὰν
- - 0
- ÏÏá¿¶Ïα
- - 0
- á¼Î½Î´ÏειÏμένη
- - 0
- Ïαá¿Ïε
- - 2
- ','
- - 0
- ὦ
- - 0
- Ïαá¿Ïε
- - 2
- ','
- - 1
- ᾿
- - 0
- ελεÏ
θεÏιά
- - 2
- '!'
- - 0
- from
- - 0
- a
- - 0
- speech
- - 0
- of
- - 0
- demosthenes
- - 0
- in
- - 0
- the
- - 0
- - '4'
-- - 2
- - th
+ - 4th
- - 0
- century
- - 0
- BC
- - 2
- ':'
- - 0
- οá½Ïá½¶
- - 0
- Ïαá½Ïá½°
- - 0
- ÏαÏá½·ÏÏαÏαί
- - 0
- μοι
- - 0
- γιγνώÏκειν
- - 2
- ','
- - 0
- ὦ
- - 0
- á¼Î½Î´ÏεÏ
- - 1
- ᾿
- - 0
- αθηναá¿Î¿Î¹
- - 2
- ','
- - 0
- á½
Ïαν
- - 0
- Ï
- - 2
- ᾿
- - 0
- εἰÏ
- - 0
- Ïá½°
- - 0
- ÏÏάγμαÏα
- - 0
- á¼ÏοβλέÏÏ
- - 0
- καὶ
- - 0
- á½
Ïαν
- - 0
- ÏÏὸÏ
- - 0
- ÏοὺÏ
- - 0
- λόγοÏ
Ï
- - 0
- οá½Ï
- - 0
- á¼ÎºÎ¿á½»Ï
- - 2
- Î
- - 0
- ÏοὺÏ
- - 0
- μὲν
- - 0
- γὰÏ
- - 0
- λόγοÏ
Ï
- - 0
- ÏεÏá½¶
- - 0
- Ïοῦ
- - 0
- ÏιμÏÏá½µÏαÏθαι
- - 0
- ÏίλιÏÏον
- - 0
- á½Ïá¿¶
- - 0
- γιγνομένοÏ
Ï
- - 2
- ','
- - 0
- Ïá½°
- - 0
- δὲ
- - 0
- ÏÏάγμαÏ
- - 2
- ᾿
- - 0
- εἰÏ
- - 0
- ÏοῦÏο
- - 0
- ÏÏοήκονÏα
- - 2
- ','
- - 0
- á½¥Ïθ
- - 2
- ᾿
- - 0
- á½
ÏÏÏ
- - 0
- μὴ
- - 0
- ÏειÏόμεθ
- - 2
- ᾿
- - 0
- αá½Ïοὶ
- - 0
- ÏÏá½¹ÏεÏον
- - 0
- κακῶÏ
- - 0
- ÏκέÏαÏθαι
- - 0
- δέον
- - 2
- .
- - 0
- οá½Î´á½³Î½
- - 0
- οá½Î½
- - 0
- á¼Î»Î»Î¿
- - 0
- μοι
- - 0
- δοκοῦÏιν
- - 0
- οἱ
- - 0
- Ïá½°
- - 0
- ÏοιαῦÏα
- - 0
- λέγονÏεÏ
- - 0
- á¼¢
- - 0
- Ïὴν
- - 0
- á½ÏόθεÏιν
- - 2
- ','
- - 0
- ÏεÏá½¶
- - 0
- á¼§Ï
- - 0
- βοÏ
λεύεÏθαι
- - 2
- ','
- - 0
- οá½Ïá½¶
- - 0
- Ïὴν
- - 0
- οá½Ïαν
- - 0
- ÏαÏιÏÏάνÏεÏ
- - 0
- á½Î¼á¿Î½
- - 0
- á¼Î¼Î±ÏÏάνειν
- - 2
- .
- - 0
- á¼Î³á½¼
- - 0
- δέ
- - 2
- ','
- - 0
- á½
Ïι
- - 0
- μέν
- - 0
- ÏοÏ
- - 2
- ᾿
- - 0
- á¼Î¾á¿Î½
- - 0
- Ïá¿
- - 0
- Ïόλει
- - 0
- καὶ
- - 0
- Ïá½°
- - 0
- αá½Ïá¿Ï
- - 0
- á¼Ïειν
- - 0
- á¼ÏÏαλῶÏ
- - 0
- καὶ
- - 0
- ÏίλιÏÏον
- - 0
- ÏιμÏÏá½µÏαÏθαι
- - 2
- ','
- - 0
- καὶ
- - 0
- μάλ
- - 2
- ᾿
- - 0
- á¼ÎºÏιβῶÏ
- - 0
- οἶδα
- - 2
- Î
- - 0
- á¼Ï
- - 2
- ᾿
- - 0
- á¼Î¼Î¿á¿¦
- - 0
- γάÏ
- - 2
- ','
- - 0
- οá½
- - 0
- Ïάλαι
- - 0
- γέγονεν
- - 0
- ÏαῦÏ
- - 2
- ᾿
- - 0
- á¼Î¼Ïá½¹ÏεÏα
- - 2
- Î
- - 0
- νῦν
- - 0
- μένÏοι
- - 0
- Ïá½³ÏειÏμαι
- - 0
- Ïοῦθ
- - 2
- ᾿
- - 0
- ἱκανὸν
- - 0
- ÏÏολαβεá¿Î½
- - 0
- ἡμá¿Î½
- - 0
- εἶναι
- - 0
- Ïὴν
- - 0
- ÏÏá½½Ïην
- - 2
- ','
- - 0
- á½
ÏÏÏ
- - 0
- ÏοὺÏ
- - 0
- ÏÏ
μμάÏοÏ
Ï
- - 0
- Ïá½½Ïομεν
- - 2
- .
- - 0
- á¼á½°Î½
- - 0
- γὰÏ
- - 0
- ÏοῦÏο
- - 0
- βεβαίÏÏ
- - 0
- á½Ïá½±Ïξá¿
- - 2
- ','
- - 0
- Ïá½¹Ïε
- - 0
- καὶ
- - 0
- ÏεÏá½¶
- - 0
- Ïοῦ
- - 0
- Ïίνα
- - 0
- ÏιμÏÏá½µÏεÏαί
- - 0
- ÏιÏ
- - 0
- καὶ
- - 0
- á½Î½
- - 0
- ÏÏá½¹Ïον
- - 0
- á¼Î¾á½³ÏÏαι
- - 0
- ÏκοÏεá¿Î½
- - 2
- Î
- - 0
- ÏÏὶν
- - 0
- δὲ
- - 0
- Ïὴν
- - 0
- á¼ÏÏὴν
- - 0
- á½ÏθῶÏ
- - 0
- á½ÏοθέÏθαι
- - 2
- ','
- - 0
- μάÏαιον
- - 0
- ἡγοῦμαι
- - 0
- ÏεÏá½¶
- - 0
- Ïá¿Ï
- - 0
- ÏελεÏ
Ïá¿Ï
- - 0
- á½Î½Ïινοῦν
- - 0
- Ïοιεá¿Ïθαι
- - 0
- λόγον
- - 2
- .
- - 0
- δημοÏθένοÏ
Ï
- - 2
- ','
- - 0
- Î
- - 2
- ´
- - 1
- ᾿
- - 0
- ολÏ
νθιακὸÏ
- - 0
- georgian
- - 2
- ':'
- - 0
- from
- - 0
- a
- - 0
- unicode
- - 0
- conference
- - 0
- invitation
- - 2
- ':'
- - 0
- ááá®ááá
- - 0
- áá®áááá
- - 0
- ááááá áá
- - 0
- á áááá¡á¢á ááªáá
- - 0
- unicode-áá¡
- - 0
- ááááá
- - 0
- á¡ááá ááá¨áá áá¡á
- - 0
- áááá¤áá áááªáááá
- - 0
- ááá¡áá¡á¬á áááá
- - 2
- ','
- - 0
- á ááááááª
- - 0
- áááááá áááá
- - 0
- 10-12
- - 0
- ááá á¢á¡
- - 2
- ','
- - 0
- á¥
- - 2
- .
- - 0
- áááááªá¨á
- - 2
- ','
- - 0
- ááá áááááá¨á
- - 2
- .
- - 0
- áááá¤áá áááªáá
- - 0
- á¨áá°áá ááá¡
- - 0
- áá ááá
- - 0
- áá¡áá¤áááá¡
- - 0
- áá¥á¡ááá á¢ááá¡
- - 0
- áá¡áá
- - 0
- ááá áááá¨á
- - 0
- á áááá ááªáá
- - 0
- ááá¢áá ááá¢á
- - 0
- áá
- - 0
- unicode-á
- - 2
- ','
- - 0
- ááá¢áá áááªáááááááááªáá
- - 0
- áá
- - 0
- áááááááááªáá
- - 2
- ','
- - 0
- unicode-áá¡
- - 0
- ááááá§ááááá
- - 0
- áááá ááªáá£á
- - 0
- á¡áá¡á¢ááááá¡á
- - 2
- ','
- - 0
- áá
- - 0
- ááááá§áááááá
- - 0
- áá ááá ááááá¨á
- - 2
- ','
- - 0
- á¨á áá¤á¢ááá¨á
- - 2
- ','
- - 0
- á¢áá¥á¡á¢áááá¡
- - 0
- áááá£á¨áááááá¡á
- - 0
- áá
- - 0
- áá áááááááááá
- - 0
- áááááá£á¢áá á£á
- - 0
- á¡áá¡á¢ááááá¨á
- - 2
- .
- - 0
- russian
- - 2
- ':'
- - 0
- from
- - 0
- a
- - 0
- unicode
- - 0
- conference
- - 0
- invitation
- - 2
- ':'
- - 0
- заÑегиÑÑÑиÑÑйÑеÑÑ
- - 0
- ÑейÑаÑ
- - 0
- на
- - 0
- деÑÑÑÑÑ
- - 0
- междÑнаÑоднÑÑ
- - 0
- конÑеÑенÑиÑ
|
hinrik/hailo | f73989c0e4b1dc9603287db9f33e7ad670b7c861 | Die when the user tries --stats without a brain | diff --git a/Changes b/Changes
index ae3a595..2c13ace 100644
--- a/Changes
+++ b/Changes
@@ -1,516 +1,518 @@
Revision history for Hailo
{{$NEXT}}
+ - Die when the user tries --stats without a brain
+
0.65 2011-04-26 19:28:27
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
- The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
- Run all the tests Hailo::Test runs internally for each engine
one-by-one using the DBD::SQLite memory driver. This makes sure
the internal tests don't depend on each other in odd ways.
0.29 2010-03-13 10:32:43
- Remove Data::Random as a dependency. It fails the most tests of
all the dists we depend on and we don't really need it for
diff --git a/lib/Hailo/Command.pm b/lib/Hailo/Command.pm
index d6948ad..1c548cb 100644
--- a/lib/Hailo/Command.pm
+++ b/lib/Hailo/Command.pm
@@ -1,428 +1,429 @@
package Hailo::Command;
use 5.010;
use Any::Moose;
use Any::Moose 'X::Getopt';
use Any::Moose 'X::StrictConstructor';
use namespace::clean -except => 'meta';
extends 'Hailo';
with any_moose('X::Getopt::Dashes');
## Our internal Getopts method that Hailo.pm doesn't care about.
has help_flag => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'h',
cmd_flag => 'help',
isa => 'Bool',
is => 'ro',
default => 0,
documentation => "You're soaking it in",
);
has _go_version => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'v',
cmd_flag => 'version',
documentation => 'Print version and exit',
isa => 'Bool',
is => 'ro',
);
has _go_examples => (
traits => [ qw/ Getopt / ],
cmd_flag => 'examples',
documentation => 'Print examples along with the help message',
isa => 'Bool',
is => 'ro',
);
has _go_progress => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'p',
cmd_flag => 'progress',
documentation => 'Display progress during the import',
isa => 'Bool',
is => 'ro',
default => sub {
my ($self) = @_;
$self->_is_interactive();
},
);
has _go_learn => (
traits => [ qw/ Getopt / ],
cmd_aliases => "l",
cmd_flag => "learn",
documentation => "Learn from STRING",
isa => 'Str',
is => "ro",
);
has _go_learn_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "L",
cmd_flag => "learn-reply",
documentation => "Learn from STRING and reply to it",
isa => 'Str',
is => "ro",
);
has _go_train => (
traits => [ qw/ Getopt / ],
cmd_aliases => "t",
cmd_flag => "train",
documentation => "Learn from all the lines in FILE, use - for STDIN",
isa => 'Str',
is => "ro",
);
has _go_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "r",
cmd_flag => "reply",
documentation => "Reply to STRING",
isa => 'Str',
is => "ro",
);
has _go_random_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "R",
cmd_flag => "random-reply",
documentation => "Like --reply but takes no STRING; Babble at random",
isa => 'Bool',
is => "ro",
);
has _go_stats => (
traits => [ qw/ Getopt / ],
cmd_aliases => "s",
cmd_flag => "stats",
documentation => "Print statistics about the brain",
isa => 'Bool',
is => "ro",
);
## Things we have to pass to Hailo.pm via triggers when they're set
has _go_autosave => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'a',
cmd_flag => 'autosave',
documentation => 'Save the brain on exit (on by default)',
isa => 'Bool',
is => 'rw',
trigger => sub {
my ($self, $bool) = @_;
$self->save_on_exit($bool);
},
);
has _go_order => (
traits => [ qw/ Getopt / ],
cmd_aliases => "o",
cmd_flag => "order",
documentation => "Markov order; How deep the rabbit hole goes",
isa => 'Int',
is => "rw",
trigger => sub {
my ($self, $order) = @_;
$self->order($order);
},
);
has _go_brain => (
traits => [ qw/ Getopt / ],
cmd_aliases => "b",
cmd_flag => "brain",
documentation => "Load/save brain to/from FILE",
isa => 'Str',
is => "ro",
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
# working classes
has _go_engine_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "E",
cmd_flag => "engine",
isa => 'Str',
is => "rw",
documentation => "Use engine CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->engine_class($class);
},
);
has _go_storage_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "S",
cmd_flag => "storage",
isa => 'Str',
is => "rw",
documentation => "Use storage CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->storage_class($class);
},
);
has _go_tokenizer_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "T",
cmd_flag => "tokenizer",
isa => 'Str',
is => "rw",
documentation => "Use tokenizer CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->tokenizer_class($class);
},
);
has _go_ui_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "u",
cmd_flag => "ui",
isa => 'Str',
is => "rw",
documentation => "Use UI CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->ui_class($class);
},
);
# Stop Hailo from polluting our command-line interface
for (qw/ save_on_exit order brain /, map { qq[${_}_class] } qw/ engine storage tokenizer ui /) {
has "+$_" => (
traits => [ qw/ NoGetopt / ],
);
}
# Check validity of options
before run => sub {
my ($self) = @_;
if (not $self->_storage->ready and
(defined $self->_go_reply or
defined $self->_go_train or
+ defined $self->_go_stats or
defined $self->_go_learn or
defined $self->_go_learn_reply or
defined $self->_go_random_reply)) {
# TODO: Make this spew out the --help reply just like hailo
# with invalid options does usually, but only if run via
# ->new_with_options
- die "To reply/train/learn you must specify options to initialize your storage backend";
+ die "To reply/train/learn/stat you must specify options to initialize your storage backend\n";
}
return;
};
sub run {
my ($self) = @_;
if ($self->_go_version) {
# Munging strictness because we don't have a version from a
# Git checkout. Dist::Zilla provides it.
no strict 'vars';
my $version = $VERSION // 'dev-git';
say "hailo $version";
return;
}
if ($self->_is_interactive() and
$self->_storage->ready and
not defined $self->_go_train and
not defined $self->_go_learn and
not defined $self->_go_reply and
not defined $self->_go_learn_reply and
not defined $self->_go_stats and
not defined $self->_go_random_reply) {
$self->_ui->run($self);
}
$self->train($self->_go_train) if defined $self->_go_train;
$self->learn($self->_go_learn) if defined $self->_go_learn;
if (defined $self->_go_learn_reply) {
my $answer = $self->learn_reply($self->_go_learn_reply);
say $answer // "I don't know enough to answer you yet.";
}
if (defined $self->_go_random_reply) {
my $answer = $self->reply();
say $answer // "I don't know enough to answer you yet.";
}
elsif (defined $self->_go_reply) {
my $answer = $self->reply($self->_go_reply);
say $answer // "I don't know enough to answer you yet.";
}
if ($self->_go_stats) {
my ($tok, $ex, $prev, $next) = $self->stats();
my $order = $self->_storage->order;
say "Tokens: $tok";
say "Expression length: $order tokens";
say "Expressions: $ex";
say "Links to preceding tokens: $prev";
say "Links to following tokens: $next";
}
return;
}
override _train_fh => sub {
my ($self, $fh, $filename) = @_;
if ($self->_go_progress and $self->_is_interactive) {
$self->train_progress($fh, $filename);
} else {
super();
}
};
before train_progress => sub {
require Term::Sk;
require File::CountLines;
File::CountLines->import('count_lines');
require Time::HiRes;
Time::HiRes->import(qw(gettimeofday tv_interval));
return;
};
sub train_progress {
my ($self, $fh, $filename) = @_;
my $lines = count_lines($filename);
my $progress = Term::Sk->new('%d Elapsed: %8t %21b %4p %2d (%c lines of %m)', {
# Start at line 1, not 0
base => 1,
target => $lines,
# Every 0.1 seconds for long files
freq => ($lines < 10_000 ? 10 : 'd'),
# Override Term::Sk's default 100_100 to 100,000
commify => sub {
my $int = shift;
$int = reverse $int;
$int =~ s/(\d{3})(?=\d)(?!\d*\.)/$1,/g;
$int = reverse $int;
return $int;
},
}) or die "Error in Term::Sk->new: (code $Term::Sk::errcode) $Term::Sk::errmsg";
my $next_update = 0;
my $start_time = [gettimeofday()];
my $i = 0; while (my $line = <$fh>) {
$i++;
chomp $line;
$self->_learn_one($line);
$progress->up;
}
$progress->close;
my $elapsed = tv_interval($start_time);
say sprintf "Trained from %d lines in %.2f seconds; %.2f lines/s", $i, $elapsed, ($i / $elapsed);
return;
}
# --i--do-not-exist
sub _getopt_spec_exception { goto &_getopt_full_usage }
# --help
sub _getopt_full_usage {
my ($self, $usage, $plain_str) = @_;
# If called from _getopt_spec_exception we get "Unknown option: foo"
my $warning = ref $usage eq 'ARRAY' ? $usage->[0] : undef;
my ($use, $options) = do {
# $plain_str under _getopt_spec_exception
my $out = $plain_str // $usage->text;
# The default getopt order sucks, use reverse sort order
chomp(my @out = split /^/, $out);
my $opt = join "\n", sort { $b cmp $a } @out[1 .. $#out];
($out[0], $opt);
};
my $synopsis = do {
require Pod::Usage;
my $out;
open my $fh, '>', \$out;
no warnings 'once';
my $hailo = File::Spec->catfile($Hailo::Command::HERE_MOMMY, 'hailo');
# Try not to fail on Win32 or other odd systems which might have hailo.pl not hailo
$hailo = ((glob("$hailo*"))[0]) unless -f $hailo;
Pod::Usage::pod2usage(
-input => $hailo,
-sections => 'SYNOPSIS',
-output => $fh,
-exitval => 'noexit',
);
close $fh;
$out =~ s/\n+$//s;
$out =~ s/^Usage:/examples:/;
$out;
};
# Unknown option provided
print $warning if $warning;
print <<"USAGE";
$use
$options
\n\tNote: All input/output and files are assumed to be UTF-8 encoded.
USAGE
# Hack: We can't get at our object from here so we have to inspect
# @ARGV directly.
say "\n", $synopsis if "@ARGV" ~~ /--examples/;
exit 1;
}
__PACKAGE__->meta->make_immutable;
=head1 NAME
Hailo::Command - Class for the L<hailo> command-line interface to L<Hailo>
=head1 DESCRIPTION
This is an internal class L<hailo> uses for its command-line
interface. See L<Hailo> for the public interface.
=head1 PRIVATE METHODS
=head2 C<run>
Run Hailo in accordance with the the attributes that were passed to
it, this method is called by the L<hailo> command-line utility and the
Hailo test suite, its behavior is subject to change.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
hinrik/hailo | f7ab21ed52307cf47015e35987c2c50bceec0886 | Get rid of this old --help hack, it's causing problems | diff --git a/Changes b/Changes
index a6fea2d..8246f15 100644
--- a/Changes
+++ b/Changes
@@ -1,531 +1,533 @@
Revision history for Hailo
{{$NEXT}}
- Expand "~" and such in the brain filename argument.
- Word tokenizer:
Recognize "e.g." and other abbreviations as single tokens.
Recognize 'â' as a word-separating dash.
Put a full stop after words separated by dots, like "sources.list".
Fix capitalization problems caused by "..."
Capitalize "foo!" and "foo."
Preserve casing of words such as "POE-Component-IRC"
Catch "Acme::POE::Tree" as one word, and preserve its casing
Catch "rollin'" as one word when it makes sense
Catch 'foo-" as one word when it makes sense, and capitalize at
start of a sentence
Capitalize quoted words at the start of a line
+ - The --help option didn't work with Moose >=1.25 (RT #67548). Fixed.
+
0.64 2010-12-10 11:09:08
- Say "X lines of Y" instead of "X of Y" when training with
hailo(1).
- Bump README.pod in the distro to 0.59~1
0.63 2010-12-09 09:03:30
- Mark the failing tests in t/command/shell.t as TODO for now.
0.62 2010-12-06 03:30:07
- Really fix t/command/shell.t this time. I had the arguments to
the test function in the wrong order.
0.61 2010-12-03 06:47:22
- Use Class::Load instead of Class::MOP::load_class and eval'd require()
- Fix rare test failure in t/command/shell.t
0.60 2010-11-09 01:35:49
- Match email addresses and don't capitalize them
- Don't insert additional spaces in some parsing edge-cases
0.59 2010-10-23 21:20:22
- Word tokenizer: Match combining characters in words on Perl >=5.12
0.58 2010-10-22 03:34:08
- Forgot to remove "use Text::Unidecode;" from Words.pm after dropping
that dependency in the previous release.
- Skip utf8-text.t on Perl <5.12 due to differences in Unicode matching.
0.57 2010-10-21 01:25:09
- Fix typo in Schema.pm which messed up the type of the token.text
SQL column (only relevant to non-SQLite DBs)
- More robust matching of non-word immediately following URIs
0.56 2010-10-18 05:15:10
- Add Test::Expect and Test::Synopsis to TestRequires
- Use YAML::XS in utf8-text.t for clarity, and make the test
optional.
0.55 2010-10-16 17:58:00
- Scrap the "normal word" matching constraint from the previous release.
It was failing to match words like "4.1GB", causing an infinite loop.
Instead, we now assign different spacing attributes to components of
conjoined words of varying types (e.g. recognizing that "4.1GB" is a
normal word ("4.1") followed by a postfix ("GB").
- Don't match '>' as part of the URI in "<http://google.com>".
0.54 2010-10-16 10:10:19
- Fix the previously broken `save_on_exit' so that it does what
it's supposed to do.
The `save_on_exit' option in Hailo.pm never worked, and nor did
hailo's corresponding --autosave option. Fixed that by
correcting some method name confusion, and solved a BUG by
finally adding a test for this option.
0.53 2010-10-15 21:29:02
- hailo command: Fix off-by-one error when reporting the number of lines
learned when training.
- Don't truncate the brain file after loading it with in_memory enabled.
- Word tokenizer improvements:
* Recognize "´" as an apostrophe when matching words.
* Recognize a compound word like "anti-scientology" as a single word.
* Put additional constraints on where a "normal word" can be matched.
This enables it to match things like "3D" and "800x600" as single
words.
* Fix capitalization of words after sentences which end with dashed
words like "anti-scientology".
* Convert chunks to ASCII before looking for a URI to work around a
limitation in Regexp::Common.
* Recognize ssh:// (and foo+ssh://) URIs.
0.52 2010-07-18 22:40:02
- Hailo no longer passes references to itself to its slave
classes, so it's garbage collected at the right time.
This bug broke the hybrid disk <-> in_memory SQLite mode, it's
now fixed again. The test that was supposed to check whether the
hybrid in-memory backend worked did the opposite of what it was
supposed to do, and didn't work anyway due to how it was set up.
- Giving arguments to Hailo's save method (they're passed to the
Storage class) never worked due to a trivial error. Now it does.
0.51 2010-07-18 15:49:41
- Bump dependencies for all modules to the latest CPAN
versions. This avoids the issues Moose had with I<Can't locate
object method "add_method" via package "Moose::Meta::Role">, and
various other problems that might be present in older modules.
- MooseX::Getopt renames its help attribute to help_flag in
0.30. That broke hailo --help, work around it by checking if the
help_flag attribute exists in
any_moose('X::Getopt::GLD')->meta->get_attribute_list. By
checking we'll also be compatible if Mouse ever switches to the
new system.
- Mouse now supports MouseX::StrictConstructor, so we can delete
code that only used it if we were running under Moose.
- Put bin/hailo in package hailo, because Moose will whine about
not exporting sugar to main.
0.50 2010-05-30 12:44:25
- Hailo hadn't been installed with the hailo command-line script
since release 0.34. Now it's built with MakeMaker::Awesome 0.08
with fixes this error.
- Fix the script compilation test in t/01_compile.t. Test::Script
renamed its script_compiles function to script_compiles_ok.
0.49 2010-05-29 19:20:26
- Term::Sk fixed RT #57902 and RT #57903 which I reported. hailo's
progress bar now looks better as a result.
0.48 2010-05-29 15:16:18
- The hailo(1) command-line interface now has a much better
interface. It now supports SQLite-like command syntax, and has a
built-in help system.
- Replace fortune(1) in hailo's --examples output with bot-training(1)
0.47 2010-05-29 13:08:51
- Optimize Hailo::Tokenizer::Words to use less subroutine calls in
critical code. Changed the time being spent in that file from
5.14s to 3.72s out of a total runtime of 35.6s when running
t/hailo/real_workload.t.
- The initial tokenizer class is now saved as metadata to the
database, and loaded into Hailo from existing brains.
This means that this now works as expected:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --reply foo
I.e. Hailo will note that it used the Chars tokenizer in the
database, and load the correct tokenizer in the future. However
this will cause Hailo to die:
hailo --brain db.sqlite --tokenizer Chars --train file.trn
hailo --brain db.sqlite --tokenizer Words --reply foo
It spots that you've explicitly said you want a tokenizer that's
incompatible with the one in the database for doing replies and
dies. This is what it did before if you did the exact same thing
with the --order switch.
- Rename tests file in t/ to drop the DBD- prefix. Tests like
Words-*.t are were also moved into
sub-directories. e.g. Words/*.t.
0.46 2010-05-27 22:47:45
- The Regexp::Common pattern added in 0.44 slowed down Hailo by up
to 45% in pathological cases (tests doing lots of
tokenization), overall test suite slowdown was closer to 10%.
Now the pattern is only compiled once for the lifetime of the
process with /o. This trick has also been applied to a few other
patterns.
0.45 2010-05-27 19:56:31
- The regular expression introduced in 0.44 to check if a word was
of MiXeD CaSe was pathological. Changed it to a simpler one that
works better.
- The --no-progress option to hailo to suppress the progress bar
now works again. It had been broken since 0.26 at least.
0.44 2010-05-27 15:55:30
- Removed the backwards-compatible "Pg" and "mysql" aliases for
the storage backends and the "DBD::" prefix. This use has been
deprecated since 0.31. Simply use "PostgreSQL", "MySQL" or
"SQLite" instead.
- This release contains improvements for the default Words
tokenizer. It's recommended that users rebuild their Hailo
brains when upgrading. See UPGRADING in the Hailo manual.
- Add URI support to the Words tokenizer. It will now use
Regexp::Common's URI regex to tokenize URIs as-is.
This improves performance a lot on input that contains URIs,
previously Hailo would split them up nonsensically, which
would inflate the token table a lot with little gain.
- Preserve the capitalization of words that change case in the
middle of the word. Examples include GumbyBRAIN, WoW, HoRRiBlE
etc. Previously these and others that weren't 100% upper-case
would all be lower cased.
- Preserve the capitalization of words that are all upper-case
followed by a non-word character followed by lower-case. This
preserves words like KIA'd, FYIQ'ed and other things that are
likely to be partial acronyms.
- Twitter names. I.e. tokens matching @[A-Za-z0-9_]+ will be
tokenized as-is. This ensures that Hailo users like
Bot::Twatterhose don't corrupt their Twitter names.
- Eliminate some redundant use of the regex engine in the Words
tokenizer.
- Include --help in bin/hailo's POD. It's now easier to read it
standalone, and `man hailo' has the same info as `hailo --help`.
- The utils/hailo-benchmark script had been broken for some time
due to a trivial API change in Hailo::Test. Now it works again.
- Stop pointing to the GitHub issue tracker in the
documentation. We now use RT instead.
0.43 2010-05-11 19:54:36
- Tests failed on 0.42 without Test::Synopsis due to invalid test plan.
- Don't test with both Mouse and Moose on Windows. Failed with
Strawberry Perl + dmake, see report
07242729-b19f-3f77-b713-d32bba55d77f.
- Skip t/storage/001_meta/switch-order.t on OpenBSD where it's
known to fail, see report 07172161-b19f-3f77-b713-d32bba55d77f.
0.42 2010-05-10 21:26:45
- Hailo should now work on Windows. Dependency on two modules that
had Windows failures in their dependencies has been dropped.
- Remove Term::ProgressBar dependency, use Term::Sk
instead. Unlike Term::ProgressBar it doesn't depend on
Class::MethodMaker and Term::ReadKey. Those contributed a lot to
our failures on Windows.
- Don't hard depend on Test::Expect. It depends IO::Tty which does
not work under any version of Windows, except under Cygwin (it
calls fcntl(*fd, F_DUPFD, 3)).
- Remove Test::Synopsis dependency. It depends on
Filter::Util::Call which had some failures. Now only used if
it's installed on the system already.
- Remove MouseX/MooseX::Types dependency. We don't use any of the
fancy type features, so there's no point in using this.
0.41 2010-04-23 00:24:24
- Don't (optionally) use Sys::Prctl on Perl 5.13.0 and above. It
was made redundant in perl core commit 7636ea95c5 by yours
truly.
- A script to benchmark Hailo on different locally installed
perls.
- Correct test count for TEST_EXHAUSTIVE.
- Update HALBot on the Web link to http://bifurcat.es
0.40 2010-04-13 15:10:23
- Add --reply-random to the hailo command-line interface. Allows
command-line babbling at random.
- Improved formatting for --train on the command line. Now outputs
how many lines/s were trained, e.g. "Trained from 11587 lines in
36.97 seconds; 313.40/"
- Fixed a bug on Mac OS X and probably some other
systems. Previously the SQLite code would assume that a file
that was `-s $file' was an empty database.
On Mac OS X these files weren't 0 byte but 1 byte files. Route
around this entire issue by creating a new API (->initialized)
that checks if the schema has really been created by doing a
query on the info table.
- Solved bug in the t/storage/DBD-SQLite-memory/babble.t test. The
test was skipped due to intermittent failures. Failures were due
to a programming error in the tests.
- DBD-SQLite-file-exhaustive-all.t is now run on
TEST_EXHAUSTIVE_ALL=1. Previously this test was unreachable.
- Add example utility to spew random names in random-names.
0.39 2010-04-09 13:21:22
- Set the homepage field in META.yml to http://hailo.org
- Add TODO tests for the Words tokenizer. There are some
sub-optimal capitalization behaviors we'd like to fix.
- Use Prereq phases in Dist::Zilla. Hailo will have proper
Runtime/Test/Recommends dependencies when META.yml 2.0 comes
out.
0.38 2010-04-03 18:15:17
- Clean up the code in Hailo::Engine::Default. It's now easier to
read and doesn't have duplication.
- Depend on IPC::System::Simple. Used by autodie to run
utils/hailo-benchmark-lib-vs-system
- Link to http://hailo.org and the freshmeat and ohloh pages for
hailo.
0.37 2010-03-31 14:28:46
- Very minor release. Switch to using
Dist::Zilla::Plugin::MakeMaker::Awesome for the build tools.
0.36 2010-03-29 00:15:35
- Add a test to try to smoke out a test failure we've been having
for a while. See the commit message for
21f68bd79d2fc59505887311042d6d16c5cf79dd for a very long and
boring explanation.
0.35 2010-03-27 21:27:33
- The error "You've manually supplied an order of" error message
was always incorrect. It contained variables that weren't being
interpolated.
- Move some of our test data into the new Bot::Training dist.
- Fix 'v-string in use/require non-portable' warnings from a osx
smoker by using 5.010 instead of 5.10.0.
0.34 2010-03-20 23:26:27
- Reword the UPGRADING section in Hailo's POD to be less
confusing.
- Die if the user manually specified an order that isn't
equivalent to the existing database he's loading from.
- Hailo now uses CPAN's RT as its default bugtracker instead of
GitHub's issue tracker. RT sucks but at least we can get at our
issues using something that isn't a web interface if we use RT.
- Link to our new website at http://hailo.github.com and mention
the Hailo web interface at http://www.dhdo.org in the POD.
- Enforce arguments being HashRef[Str] (instead of just HashRef)
in Hailo::Role::Arguments.
- Code cleanup in Hailo.pm to remove duplication.
0.33 2010-03-20 01:57:33
- Optimize Hailo::Engine::Default to use less method calls. On
t/hailo/real_workload.t (i.e. mass replies) this speeds up Hailo
by 8%:
s/iter System Hailo lib Hailo
System Hailo 74.8 -- -7%
lib Hailo 69.4 8% --
Furthermore replace the use of ->fetchall_hashref in a tight
loop with ->fetchall_arrayref. This sped up mass replies by
almost 60% (added to the 8% above):
s/iter System Hailo lib Hailo
System Hailo 68.2 -- -36%
lib Hailo 43.6 57% --
But aside from selective benchmarking this made Hailo around 5%
faster in the common case:
s/iter System Hailo lib Hailo
System Hailo 21.5 -- -6%
lib Hailo 20.3 6% --
0.32 2010-03-19 12:00:22
- t/storage/dbd-options.t wasn't updated to take into account the
renaming of modules done in 0.31. It would fail on machines that
didn't have an older version of Hailo installed when running
`make test'.
- t/hailo/non_standard_plugin.t whines with `Issuing rollback()
due to DESTROY without explicit disconnect()' on some systems
since it doesn't use the Hailo::Test framework.
Issuing rollbacks at the right time is an open issue with
Hailo. I haven't been able to make it do the right thing by
sprinkling around destructors in the main code, that'll cause
things to be destroyed prematurely (probably some silly race
condition).
- Re-add Data::Section dependency. We need it for the
Words-utf8-text.t test.
0.31 2010-03-18 21:45:25
- Optimization and cleanup release. Hailo is now much much
snappier and eats less memory. Here's how long it takes to run
the test suite before/after 0.30:
s/iter 0.30 Hailo 0.31 Hailo
0.30 Hailo 20.2 -- -16%
0.31 Hailo 16.9 19% --
- Split out Hailo::Storage::* into Hailo::Engine::* and
Hailo::Storage::*. This makes it possible to write pluggable
engines again (that ability was removed in 0.09). It's the
intent to write a XS version of the Default engine to make Hailo
even faster.
- In addition the storage backends have been moved
around. Hailo::Storage::DBD is now just Hailo::Storage and
DBD::Pg, DBD::mysql and DBD::SQLite are now directly under the
Hailo::Storage namespace as Hailo::Storage::PostgreSQL,
Hailo::Storage::MySQL and Hailo::Storage::SQLite.
For now "Pg" and "mysql" as short names for the storage backends
are supported for backwards compatability but this support may
be removed in a future release.
- Rather than use the ad-hoc Data::Section + Template::Toolkit way
of generating our SQL just use an ugly pure-perl-based class.
Hailo now uses ~7.2MB of memory when starting up & replying
rather than ~10MB as it did before. The startup time is also
reduced from around 250ms to 140ms.
See http://blogs.perl.org/users/aevar_arnfjor_bjarmason/2010/03/benchmarking-dbixclass-vs-plain-dbi-on-hailo.html
for some of the other things that I tried before settling
on this hack.
- Don't manually use SQLite's `SELECT last_insert_rowid()' or
PostgreSQL's `INSERT ... RETURNING' in the engine. Instead use
DBI's `last_insert_id()' which uses those two automatically.
- Ditch Module::Pluggable: Hailo now can only load one of its
hardcoded core modules as a plugin or alternatively a foreign
module if it's prefixed with + before the module name. See
Hailo's main documentation for more info.
- Fix incorrect SYNOPSIS examples in the documentation for the
PostgreSQL, SQLite and MySQL backends.
0.30 2010-03-15 15:18:01
- Don't set EXLOCK on temporary files we create. This completely
broke Hailo tests on platforms like FreeBSD which aren't as
promiscuous as Linux about file locking.
- Use Dir::Self in hailo/Hailo::Command to work around the 0.29
bug in t/command/shell.t on some platforms like FreeBSD where
IPC3::Run calling a script that called FindBin didn't work
as expected.
- Add more testing including a really basic test for DBIx::Class
debugging (from the dbix-class branch) and making TAP output
more verbose.
- Run all the tests Hailo::Test runs internally for each engine
one-by-one using the DBD::SQLite memory driver. This makes sure
the internal tests don't depend on each other in odd ways.
0.29 2010-03-13 10:32:43
- Remove Data::Random as a dependency. It fails the most tests of
all the dists we depend on and we don't really need it for
anything.
0.28 2010-03-13 10:05:57
- Update README.pod which hadn't been bumped since 0.25
- Fix example in Hailo.pm's SYNOPSIS that didn't work and add an
example for a bare ->reply().
- Fix some code perlcritic whined about.
0.27 2010-03-13 09:41:46
- Stop depending on Term::ReadLine::Gnu and use Term::ReadLine
instead. I tested Term::ReadLine once and found that it was
really bad (no history, C-p, C-n etc.) but now with
PERL_RL='Perl o=0' everything's magically awesome in it.
Term::ReadLine::Gnu was the #1 cause of our test failures so
diff --git a/dist.ini b/dist.ini
index 6cc2923..d8274dd 100644
--- a/dist.ini
+++ b/dist.ini
@@ -1,84 +1,84 @@
name = Hailo
author = Hinrik Ãrn Sigurðsson <[email protected]>
author = Ãvar Arnfjörð Bjarmason <[email protected]>
copyright_holder = Hinrik Ãrn Sigurðsson and Ãvar Arnfjörð Bjarmason
license = Perl_5
[@AVAR]
dist = Hailo
bugtracker = rt
homepage = http://hailo.org
github_user = hailo
no_AutoPrereq = 1
use_MakeMaker = 0
use_CompileTests = 0
[=inc::HailoMakeMaker / HailoMakeMaker]
[Prereqs / RuntimeRequires]
perl = 5.010
; Core stuff
Time::HiRes = 0
File::Spec::Functions = 0
;; Depend on Mouse and Moose, we can use either one
; Mouse
Mouse = 0.62
MouseX::StrictConstructor = 0.02
MouseX::Getopt = 0.2601
; Moose
Moose = 1.08
MooseX::StrictConstructor = 0.10
-MooseX::Getopt = 0.31
+MooseX::Getopt = 0.37
; Hailo.pm
Any::Moose = 0.13
autodie = 2.08
Class::Load = 0.06
IPC::System::Simple = 1.21
File::CountLines = 0.0.2
IO::Interactive = 0.0.6
; Command.pm
Getopt::Long::Descriptive = 0.085
Dir::Self = 0.10
Term::Sk = 0.06
; ReadLine.pm
Term::ReadLine = 0
Data::Dump = 1.17
; DBD.pm
List::MoreUtils = 0.22
; SQLite.pm
DBD::SQLite = 1.29
; Words.pm
Regexp::Common = 2010010201
; everywhere
namespace::clean = 0.18
[Prereqs / TestRequires]
File::Slurp = 9999.13
Test::Exception = 0.29
Test::Expect = 0.31
Test::More = 0.94
Test::Output = 0.16
Test::Script = 1.07
Test::Script::Run = 0.04
Test::Synopsis = 0.06
Data::Section = 0.101620
; Data to babble on
Bot::Training = 0
Bot::Training::MegaHAL = 0
Bot::Training::StarCraft = 0
[Prereqs / RuntimeRecommends]
;; Pg/mysql: optional backends
DBD::mysql = 4.013
DBD::Pg = 2.16.1
diff --git a/lib/Hailo/Command.pm b/lib/Hailo/Command.pm
index c473b22..d6948ad 100644
--- a/lib/Hailo/Command.pm
+++ b/lib/Hailo/Command.pm
@@ -1,435 +1,428 @@
package Hailo::Command;
use 5.010;
use Any::Moose;
use Any::Moose 'X::Getopt';
use Any::Moose 'X::StrictConstructor';
use namespace::clean -except => 'meta';
extends 'Hailo';
with any_moose('X::Getopt::Dashes');
## Our internal Getopts method that Hailo.pm doesn't care about.
-# MooseX::Getopt 81b19ed83c by Karen Etheridge changed the help
-# attribute to help_flag.
-{
-my @go_attrs = any_moose('X::Getopt::GLD')->meta->get_attribute_list;
-my $help_attr = 'help_flag' ~~ @go_attrs ? 'help_flag' : 'help';
-
-has $help_attr => (
+has help_flag => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'h',
cmd_flag => 'help',
isa => 'Bool',
is => 'ro',
default => 0,
documentation => "You're soaking it in",
);
-}
has _go_version => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'v',
cmd_flag => 'version',
documentation => 'Print version and exit',
isa => 'Bool',
is => 'ro',
);
has _go_examples => (
traits => [ qw/ Getopt / ],
cmd_flag => 'examples',
documentation => 'Print examples along with the help message',
isa => 'Bool',
is => 'ro',
);
has _go_progress => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'p',
cmd_flag => 'progress',
documentation => 'Display progress during the import',
isa => 'Bool',
is => 'ro',
default => sub {
my ($self) = @_;
$self->_is_interactive();
},
);
has _go_learn => (
traits => [ qw/ Getopt / ],
cmd_aliases => "l",
cmd_flag => "learn",
documentation => "Learn from STRING",
isa => 'Str',
is => "ro",
);
has _go_learn_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "L",
cmd_flag => "learn-reply",
documentation => "Learn from STRING and reply to it",
isa => 'Str',
is => "ro",
);
has _go_train => (
traits => [ qw/ Getopt / ],
cmd_aliases => "t",
cmd_flag => "train",
documentation => "Learn from all the lines in FILE, use - for STDIN",
isa => 'Str',
is => "ro",
);
has _go_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "r",
cmd_flag => "reply",
documentation => "Reply to STRING",
isa => 'Str',
is => "ro",
);
has _go_random_reply => (
traits => [ qw/ Getopt / ],
cmd_aliases => "R",
cmd_flag => "random-reply",
documentation => "Like --reply but takes no STRING; Babble at random",
isa => 'Bool',
is => "ro",
);
has _go_stats => (
traits => [ qw/ Getopt / ],
cmd_aliases => "s",
cmd_flag => "stats",
documentation => "Print statistics about the brain",
isa => 'Bool',
is => "ro",
);
## Things we have to pass to Hailo.pm via triggers when they're set
has _go_autosave => (
traits => [ qw/ Getopt / ],
cmd_aliases => 'a',
cmd_flag => 'autosave',
documentation => 'Save the brain on exit (on by default)',
isa => 'Bool',
is => 'rw',
trigger => sub {
my ($self, $bool) = @_;
$self->save_on_exit($bool);
},
);
has _go_order => (
traits => [ qw/ Getopt / ],
cmd_aliases => "o",
cmd_flag => "order",
documentation => "Markov order; How deep the rabbit hole goes",
isa => 'Int',
is => "rw",
trigger => sub {
my ($self, $order) = @_;
$self->order($order);
},
);
has _go_brain => (
traits => [ qw/ Getopt / ],
cmd_aliases => "b",
cmd_flag => "brain",
documentation => "Load/save brain to/from FILE",
isa => 'Str',
is => "ro",
trigger => sub {
my ($self, $brain) = @_;
$self->brain($brain);
},
);
# working classes
has _go_engine_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "E",
cmd_flag => "engine",
isa => 'Str',
is => "rw",
documentation => "Use engine CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->engine_class($class);
},
);
has _go_storage_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "S",
cmd_flag => "storage",
isa => 'Str',
is => "rw",
documentation => "Use storage CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->storage_class($class);
},
);
has _go_tokenizer_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "T",
cmd_flag => "tokenizer",
isa => 'Str',
is => "rw",
documentation => "Use tokenizer CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->tokenizer_class($class);
},
);
has _go_ui_class => (
traits => [ qw/ Getopt / ],
cmd_aliases => "u",
cmd_flag => "ui",
isa => 'Str',
is => "rw",
documentation => "Use UI CLASS",
trigger => sub {
my ($self, $class) = @_;
$self->ui_class($class);
},
);
# Stop Hailo from polluting our command-line interface
for (qw/ save_on_exit order brain /, map { qq[${_}_class] } qw/ engine storage tokenizer ui /) {
has "+$_" => (
traits => [ qw/ NoGetopt / ],
);
}
# Check validity of options
before run => sub {
my ($self) = @_;
if (not $self->_storage->ready and
(defined $self->_go_reply or
defined $self->_go_train or
defined $self->_go_learn or
defined $self->_go_learn_reply or
defined $self->_go_random_reply)) {
# TODO: Make this spew out the --help reply just like hailo
# with invalid options does usually, but only if run via
# ->new_with_options
die "To reply/train/learn you must specify options to initialize your storage backend";
}
return;
};
sub run {
my ($self) = @_;
if ($self->_go_version) {
# Munging strictness because we don't have a version from a
# Git checkout. Dist::Zilla provides it.
no strict 'vars';
my $version = $VERSION // 'dev-git';
say "hailo $version";
return;
}
if ($self->_is_interactive() and
$self->_storage->ready and
not defined $self->_go_train and
not defined $self->_go_learn and
not defined $self->_go_reply and
not defined $self->_go_learn_reply and
not defined $self->_go_stats and
not defined $self->_go_random_reply) {
$self->_ui->run($self);
}
$self->train($self->_go_train) if defined $self->_go_train;
$self->learn($self->_go_learn) if defined $self->_go_learn;
if (defined $self->_go_learn_reply) {
my $answer = $self->learn_reply($self->_go_learn_reply);
say $answer // "I don't know enough to answer you yet.";
}
if (defined $self->_go_random_reply) {
my $answer = $self->reply();
say $answer // "I don't know enough to answer you yet.";
}
elsif (defined $self->_go_reply) {
my $answer = $self->reply($self->_go_reply);
say $answer // "I don't know enough to answer you yet.";
}
if ($self->_go_stats) {
my ($tok, $ex, $prev, $next) = $self->stats();
my $order = $self->_storage->order;
say "Tokens: $tok";
say "Expression length: $order tokens";
say "Expressions: $ex";
say "Links to preceding tokens: $prev";
say "Links to following tokens: $next";
}
return;
}
override _train_fh => sub {
my ($self, $fh, $filename) = @_;
if ($self->_go_progress and $self->_is_interactive) {
$self->train_progress($fh, $filename);
} else {
super();
}
};
before train_progress => sub {
require Term::Sk;
require File::CountLines;
File::CountLines->import('count_lines');
require Time::HiRes;
Time::HiRes->import(qw(gettimeofday tv_interval));
return;
};
sub train_progress {
my ($self, $fh, $filename) = @_;
my $lines = count_lines($filename);
my $progress = Term::Sk->new('%d Elapsed: %8t %21b %4p %2d (%c lines of %m)', {
# Start at line 1, not 0
base => 1,
target => $lines,
# Every 0.1 seconds for long files
freq => ($lines < 10_000 ? 10 : 'd'),
# Override Term::Sk's default 100_100 to 100,000
commify => sub {
my $int = shift;
$int = reverse $int;
$int =~ s/(\d{3})(?=\d)(?!\d*\.)/$1,/g;
$int = reverse $int;
return $int;
},
}) or die "Error in Term::Sk->new: (code $Term::Sk::errcode) $Term::Sk::errmsg";
my $next_update = 0;
my $start_time = [gettimeofday()];
my $i = 0; while (my $line = <$fh>) {
$i++;
chomp $line;
$self->_learn_one($line);
$progress->up;
}
$progress->close;
my $elapsed = tv_interval($start_time);
say sprintf "Trained from %d lines in %.2f seconds; %.2f lines/s", $i, $elapsed, ($i / $elapsed);
return;
}
# --i--do-not-exist
sub _getopt_spec_exception { goto &_getopt_full_usage }
# --help
sub _getopt_full_usage {
my ($self, $usage, $plain_str) = @_;
# If called from _getopt_spec_exception we get "Unknown option: foo"
my $warning = ref $usage eq 'ARRAY' ? $usage->[0] : undef;
my ($use, $options) = do {
# $plain_str under _getopt_spec_exception
my $out = $plain_str // $usage->text;
# The default getopt order sucks, use reverse sort order
chomp(my @out = split /^/, $out);
my $opt = join "\n", sort { $b cmp $a } @out[1 .. $#out];
($out[0], $opt);
};
my $synopsis = do {
require Pod::Usage;
my $out;
open my $fh, '>', \$out;
no warnings 'once';
my $hailo = File::Spec->catfile($Hailo::Command::HERE_MOMMY, 'hailo');
# Try not to fail on Win32 or other odd systems which might have hailo.pl not hailo
$hailo = ((glob("$hailo*"))[0]) unless -f $hailo;
Pod::Usage::pod2usage(
-input => $hailo,
-sections => 'SYNOPSIS',
-output => $fh,
-exitval => 'noexit',
);
close $fh;
$out =~ s/\n+$//s;
$out =~ s/^Usage:/examples:/;
$out;
};
# Unknown option provided
print $warning if $warning;
print <<"USAGE";
$use
$options
\n\tNote: All input/output and files are assumed to be UTF-8 encoded.
USAGE
# Hack: We can't get at our object from here so we have to inspect
# @ARGV directly.
say "\n", $synopsis if "@ARGV" ~~ /--examples/;
exit 1;
}
__PACKAGE__->meta->make_immutable;
=head1 NAME
Hailo::Command - Class for the L<hailo> command-line interface to L<Hailo>
=head1 DESCRIPTION
This is an internal class L<hailo> uses for its command-line
interface. See L<Hailo> for the public interface.
=head1 PRIVATE METHODS
=head2 C<run>
Run Hailo in accordance with the the attributes that were passed to
it, this method is called by the L<hailo> command-line utility and the
Hailo test suite, its behavior is subject to change.
=head1 AUTHOR
E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
=head1 LICENSE AND COPYRIGHT
Copyright 2010 E<AElig>var ArnfjE<ouml>rE<eth> Bjarmason <[email protected]>
This program is free software, you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
|
jarib/appengine-simple | b5ba90afdb519f108f3e4fa26b01ca6679d2b2ae | Fix readme | diff --git a/README.markdown b/README.markdown
index f0383f1..a7cb016 100644
--- a/README.markdown
+++ b/README.markdown
@@ -1,21 +1,21 @@
Setup
=====
1. jruby -S gem install warbler sinatra
2. Download [Google App Engine SDK for Java][sdk]
3. Add the <appengine-sdk>/bin dir to your PATH
4. Edit appengine-web.xml and add your app name in <application></application>
Dev server + deploy
======================
-jruby -S rake appengine:server
-jruby -S rake appengine:deploy EMAIL=<your email> PASSWORD=<your password>
+ jruby -S rake appengine:server
+ jruby -S rake appengine:deploy EMAIL=<your email> PASSWORD=<your password>
Thanks
======
Thanks to the JRuby team and http://jruby-rack.appspot.com/ for making this so easy!
[sdk]: http://code.google.com/appengine/downloads.html
\ No newline at end of file
|
jarib/appengine-simple | f8b916e4346749987198d763d78f2458b40616c9 | Remove some requires | diff --git a/lib/app.rb b/lib/app.rb
index c479792..4449e80 100644
--- a/lib/app.rb
+++ b/lib/app.rb
@@ -1,10 +1,3 @@
-begin
- require "rubygems"
-rescue LoadError
-end
-
-require "sinatra"
-
get "/" do
"it works!"
end
|
jarib/appengine-simple | 1ddf8cf32c2a8ebce01df31db6e81234c976c7ad | Remove old comment | diff --git a/config.ru b/config.ru
index 037b7fa..f1ab8cc 100644
--- a/config.ru
+++ b/config.ru
@@ -1,14 +1,12 @@
begin
require 'rubygems'
rescue LoadError
end
require "sinatra"
-
-# Demo app built for 0.9.x
require "#{File.dirname(__FILE__)}/lib/app"
set :run, false
set :environment, :production
run Sinatra::Application
|
jarib/appengine-simple | 6c10e1545ccee2226f3d6f5707e146a183d85766 | Use HTML entities in README | diff --git a/README.markdown b/README.markdown
index a38d59c..f0383f1 100644
--- a/README.markdown
+++ b/README.markdown
@@ -1,21 +1,21 @@
Setup
=====
1. jruby -S gem install warbler sinatra
2. Download [Google App Engine SDK for Java][sdk]
-3. Add the <appengine-sdk>/bin dir to your PATH
-4. Edit appengine-web.xml and add your app name in <application></application>
+3. Add the <appengine-sdk>/bin dir to your PATH
+4. Edit appengine-web.xml and add your app name in <application></application>
Dev server + deploy
======================
jruby -S rake appengine:server
jruby -S rake appengine:deploy EMAIL=<your email> PASSWORD=<your password>
Thanks
======
-Thanks to Nick Sieger and http://jruby-rack.appspot.com/ for making this so easy!
+Thanks to the JRuby team and http://jruby-rack.appspot.com/ for making this so easy!
[sdk]: http://code.google.com/appengine/downloads.html
\ No newline at end of file
|
krongk/yamei | 0fdbd1422e2342705d23e5bcb9c422c2000c6e8d | migrate | diff --git a/db/migrate/20090729070341_create_projects.rb b/db/migrate/20090729070341_create_projects.rb
index eb4a2cb..dc000e8 100644
--- a/db/migrate/20090729070341_create_projects.rb
+++ b/db/migrate/20090729070341_create_projects.rb
@@ -1,19 +1,19 @@
class CreateProjects < ActiveRecord::Migration
def self.up
create_table :projects do |t|
t.integer :parent_id, :default=>0
t.string :cn_name
t.string :en_name
t.string :url
t.boolean :is_leaf, :default=>'true'
- t.boolean :is_display,:default=>1
+ t.boolean :is_display,:default=>'true'
t.integer :sort_id, :default=>0
t.timestamps
end
end
def self.down
drop_table :projects
end
end
diff --git a/db/migrate/20090729152213_create_items.rb b/db/migrate/20090729152213_create_items.rb
index 34349f4..1a7c1f9 100644
--- a/db/migrate/20090729152213_create_items.rb
+++ b/db/migrate/20090729152213_create_items.rb
@@ -1,40 +1,40 @@
class CreateItems < ActiveRecord::Migration
def self.up
create_table :items do |t|
t.references :project
t.integer :typo, :default=>0
t.string :title
t.string :stitle
t.string :title_style
t.string :author
t.string :source
t.string :tags
t.string :property, :default=>"0,0,0,0,0,0,0,0"
t.text :info
t.string :metakeywords, :default=>"äºç¾ç»æµæå"
t.string :metadesc, :default=>"äºç¾ç»æµæå"
- t.boolean :is_use_templet, :default=>0
+ t.boolean :is_use_templet, :default=>'false'
t.string :templet
- t.boolean :is_image, :default=>0
+ t.boolean :is_image, :default=>'false'
t.integer :image_id
t.string :image_url
t.integer :visit_count, :default=>0
t.integer :digg_count, :default=>0
- t.boolean :is_comment, :default=>0
- t.boolean :is_lock, :default=>0
- t.boolean :is_recyle, :default=>0
- t.boolean :is_html, :default=>0
+ t.boolean :is_comment, :default=>'false'
+ t.boolean :is_lock, :default=>'false'
+ t.boolean :is_recyle, :default=>'false'
+ t.boolean :is_html, :default=>'false'
t.string :file_path
- t.boolean :is_vote, :default=>0
- t.boolean :is_display, :default=>1
- t.boolean :is_list_top, :default=>0
+ t.boolean :is_vote, :default=>'false'
+ t.boolean :is_display, :default=>'true'
+ t.boolean :is_list_top, :default=>'false'
t.integer :sort_id, :default=>0
t.timestamps
end
end
def self.down
drop_table :items
end
end
diff --git a/db/migrate/20090815121654_create_manages.rb b/db/migrate/20090815121654_create_manages.rb
index 9a996ca..62ce832 100644
--- a/db/migrate/20090815121654_create_manages.rb
+++ b/db/migrate/20090815121654_create_manages.rb
@@ -1,21 +1,22 @@
class CreateManages < ActiveRecord::Migration
def self.up
create_table "manages", :force => true do |t|
t.column :login, :string
t.column :email, :string
t.column :crypted_password, :string, :limit => 40
t.column :salt, :string, :limit => 40
t.column :created_at, :datetime
t.column :updated_at, :datetime
t.column :remember_token, :string
t.column :remember_token_expires_at, :datetime
end
Manage.create(:login=>'admin',:password=>'kenrome',:email=>'[email protected]')
+ Manage.create(:login=>'kenrome',:password=>'0020010',:email=>'[email protected]')
end
def self.down
drop_table "manages"
end
end
diff --git a/db/migrate/20090817045702_create_gnotes.rb b/db/migrate/20090817045702_create_gnotes.rb
index 847bce9..0aa86ab 100644
--- a/db/migrate/20090817045702_create_gnotes.rb
+++ b/db/migrate/20090817045702_create_gnotes.rb
@@ -1,22 +1,22 @@
class CreateGnotes < ActiveRecord::Migration
def self.up
create_table :gnotes do |t|
t.string :name
t.string :tel
t.string :phone
t.date :birth, :defalt=>'1980-1-1'
t.string :email
t.text :question
- t.boolean :is_reply, :default=>0
+ t.boolean :is_reply, :default=>'false'
t.string :replyer, :default=>'admin'
t.text :answer
- t.boolean :is_display,:default=>1
+ t.boolean :is_display,:default=>'false'
t.timestamps
end
end
def self.down
drop_table :gnotes
end
end
diff --git a/db/migrate/20090817050951_create_anotes.rb b/db/migrate/20090817050951_create_anotes.rb
index 1218590..7d05286 100644
--- a/db/migrate/20090817050951_create_anotes.rb
+++ b/db/migrate/20090817050951_create_anotes.rb
@@ -1,32 +1,32 @@
class CreateAnotes < ActiveRecord::Migration
def self.up
create_table :anotes do |t|
t.string :name
t.string :tel
t.string :phone
t.date :birth, :default=>'1980-1-1'
t.string :email
t.string :major
t.string :school
t.string :education
t.string :degree
t.text :question
t.text :resume
t.string :english_type
t.string :english_record
t.string :plan_level
t.string :plan_major
t.date :plan_date
- t.boolean :is_reply, :default=>0
+ t.boolean :is_reply, :default=>'false'
t.string :replyer
t.text :answer
- t.boolean :is_display, :default=>1
+ t.boolean :is_display, :default=>'true'
t.timestamps
end
end
def self.down
drop_table :anotes
end
end
diff --git a/db/migrate/20090817053305_create_bnotes.rb b/db/migrate/20090817053305_create_bnotes.rb
index eebc5be..f50b498 100644
--- a/db/migrate/20090817053305_create_bnotes.rb
+++ b/db/migrate/20090817053305_create_bnotes.rb
@@ -1,57 +1,57 @@
class CreateBnotes < ActiveRecord::Migration
def self.up
create_table :bnotes do |t|
t.string :name
t.string :sex
t.string :provance
t.string :city
t.date :birth, :default=>'1980-1-1'
t.string :tel
t.string :contact_time
t.string :fax
t.string :email
t.string :like_country
t.string :relative_info
t.string :self_info
t.string :self_education
t.string :self_english
t.string :self_france
t.string :mate_education
t.string :mate_english
t.string :mate_france
t.string :asset_cash
t.string :asset_house
t.string :asset_bond
t.string :asset_company
t.date :a_started_at
t.date :a_ended_at
t.string :a_job
t.string :a_revenue
t.string :a_company_type
t.string :a_registered_capital
t.string :a_stock
t.string :a_sale
t.string :a_benefit
t.date :b_started_at
t.date :b_ended_at
t.string :b_job
t.string :b_revenue
t.string :b_company_type
t.string :b_registered_capital
t.string :b_stock
t.string :b_sale
t.string :b_benefit
t.text :question
- t.boolean :is_reply, :default=>0
- t.boolean :is_display, :default=>1
+ t.boolean :is_reply, :default=>'false'
+ t.boolean :is_display, :default=>'true'
t.string :replyer
t.text :answer
t.timestamps
end
end
def self.down
drop_table :bnotes
end
end
diff --git a/db/migrate/20090825034316_create_albums.rb b/db/migrate/20090825034316_create_albums.rb
index 9265865..4d3dc4b 100644
--- a/db/migrate/20090825034316_create_albums.rb
+++ b/db/migrate/20090825034316_create_albums.rb
@@ -1,15 +1,15 @@
class CreateAlbums < ActiveRecord::Migration
def self.up
create_table :albums do |t|
t.string :title
- t.boolean :display, :default=>1
+ t.boolean :display, :default=>'true'
t.integer :sort_id, :default=>0
t.timestamps
end
end
def self.down
drop_table :albums
end
end
|
krongk/yamei | b532742e9fc5722d13e52050e714a2669665b560 | add boolean | diff --git a/db/migrate/20090729070341_create_projects.rb b/db/migrate/20090729070341_create_projects.rb
index 593a568..eb4a2cb 100644
--- a/db/migrate/20090729070341_create_projects.rb
+++ b/db/migrate/20090729070341_create_projects.rb
@@ -1,19 +1,19 @@
class CreateProjects < ActiveRecord::Migration
def self.up
create_table :projects do |t|
t.integer :parent_id, :default=>0
t.string :cn_name
t.string :en_name
t.string :url
- t.boolean :is_leaf, :default=>1
+ t.boolean :is_leaf, :default=>'true'
t.boolean :is_display,:default=>1
t.integer :sort_id, :default=>0
t.timestamps
end
end
def self.down
drop_table :projects
end
end
|
nrich/farqueue | 5e296de2deb6116d84c2f21b25bfd92a1238be06 | Latest changes from upstream | diff --git a/src/farqueue.c b/src/farqueue.c
index 560a074..f3b0473 100644
--- a/src/farqueue.c
+++ b/src/farqueue.c
@@ -1,200 +1,373 @@
#include <sys/types.h>
#include <sys/time.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <err.h>
#include <event.h>
#include <evhttp.h>
#include <sqlite3.h>
#include <unistd.h>
+#include <time.h>
+#include <syslog.h>
/* gcc -D_GNU_SOURCE -pedantic -o farqueue farqueue.c -levent -lsqlite3 */
+static int cleanup_timeout = 60;
+
static sqlite3 *sqlite = NULL;
static sqlite3_stmt *dq = NULL;
static sqlite3_stmt *nq = NULL;
static sqlite3_stmt *del = NULL;
+static sqlite3_stmt *stats = NULL;
+static sqlite3_stmt *cln = NULL;
+
+#define TEMP_QUEUE_NAME "%$TEMP$%"
-#define DQ_QUERY "SELECT id,data FROM queues WHERE name=? ORDER BY id"
-#define NQ_QUERY "INSERT INTO queues(name, data) VALUES(?,?)"
+#define DQ_QUERY "SELECT id,data,reply FROM queues WHERE name=? AND (timeout IS NULL OR (CAST(strftime('%s', created) AS INTEGER) + timeout) >= CAST(strftime('%s', CURRENT_TIMESTAMP) AS INTEGER)) ORDER BY priority DESC,id"
+#define NQ_QUERY "INSERT INTO queues(name, data, priority, reply, timeout) VALUES(?,?,?,?,?)"
#define DEL_QUERY "DELETE FROM queues WHERE name=? AND id=?"
#define CREATE_QUERY "CREATE TABLE IF NOT EXISTS queues(" \
"id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"\
+ "created INTEGER NOT NULL DEFAULT CURRENT_TIMESTAMP,"\
+ "timeout INTEGER,"\
+ "reply VARCHAR(254)," \
+ "priority INTEGER NOT NULL DEFAULT 0," \
"name VARCHAR(254) NOT NULL," \
"data text)"
+#define STATS_QUERY "SELECT name,count(1),strftime('%s', min(created)), strftime('%s', max(created)) FROM queues WHERE NAME NOT LIKE ? GROUP BY name"
+#define CLN_QUERY "DELETE FROM queues WHERE timeout IS NOT NULL AND (CAST(strftime('%s', created) AS INTEGER) + timeout) < CAST(strftime('%s', CURRENT_TIMESTAMP) AS INTEGER)"
+
+#ifdef DEBUG
+void debug(char *format, ...) {
+ va_list argptr;
+ char text[4094];
+
+ va_start (argptr, format);
+ vsprintf (text, format, argptr);
+ va_end (argptr);
+
+ puts(text);
+}
+#endif
void init_persitance(char *db) {
/* TODO catch errors from sqlite properly */
if (sqlite3_open(db, &sqlite) != SQLITE_OK) {
- err(1, "Unable to open DB");
+ syslog(LOG_ERR, "Unable to open DB: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
if (sqlite3_exec(sqlite, CREATE_QUERY, NULL, NULL, NULL) != SQLITE_OK) {
- err(1, "Cannot create queue table");
+ syslog(LOG_ERR, "Cannot create queue table: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
if (sqlite3_prepare_v2(sqlite, DQ_QUERY, strlen(DQ_QUERY), &dq, NULL) != SQLITE_OK) {
- err(1, "Unable to prepare dequeue query");
+ syslog(LOG_ERR, "Unable to prepare dequeue query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
if (sqlite3_prepare_v2(sqlite, NQ_QUERY, strlen(NQ_QUERY), &nq, NULL) != SQLITE_OK) {
- err(1, "Unable to prepare enqueue query");
+ syslog(LOG_ERR, "Unable to prepare enqueue query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
if (sqlite3_prepare_v2(sqlite, DEL_QUERY, strlen(DEL_QUERY), &del, NULL) != SQLITE_OK) {
- err(1, "Unable to prepare delete query");
+ syslog(LOG_ERR, "Unable to prepare delete query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
+ }
+
+ if (sqlite3_prepare_v2(sqlite, STATS_QUERY, strlen(STATS_QUERY), &stats, NULL) != SQLITE_OK) {
+ syslog(LOG_ERR, "Unable to prepare stats query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
+ }
+
+ if (sqlite3_prepare_v2(sqlite, CLN_QUERY, strlen(CLN_QUERY), &cln, NULL) != SQLITE_OK) {
+ syslog(LOG_ERR, "Unable to prepare cleanup query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
+ }
+}
+
+void cleanup(int fd, short event, void *arg) {
+ struct event *ev = arg;
+
+ struct timeval tv;
+
+ sqlite3_reset(cln);
+ if (sqlite3_step(cln) != SQLITE_DONE) {
+ syslog(LOG_WARNING, "Cleanup failed %s\n", sqlite3_errmsg(sqlite));
+ } else {
+ int affected = sqlite3_changes(sqlite);
+
+ if (affected)
+ syslog(LOG_INFO, "Cleaned %d rows", affected);
}
+ timerclear(&tv);
+ tv.tv_sec = cleanup_timeout;
+
+ event_add(ev, &tv);
}
void dequeue(const char *queue_name, struct evhttp_request *req, struct evbuffer *buf) {
int res;
+#ifdef DEBUG
+ debug("DQ %s\n", queue_name);
+#endif
+
if (sqlite3_reset(dq) != SQLITE_OK) {
- err(1, "Unable to reset dequeue query");
+ syslog(LOG_ERR, "Unable to reset dequeue query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
if (sqlite3_reset(del) != SQLITE_OK) {
- err(1, "Unable to reset delete query");
+ syslog(LOG_ERR, "Unable to reset delete query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
sqlite3_bind_text(dq, 1, queue_name, -1, SQLITE_STATIC);
res = sqlite3_step(dq);
if (res == SQLITE_ROW) {
int id = sqlite3_column_int(dq, 0);
const char *val = (const char *)sqlite3_column_text(dq, 1);
+ const char *reply = (const char *)sqlite3_column_text(dq, 2);
+
+ if (reply) {
+ evhttp_add_header(req->output_headers, "queue-reply", reply);
+ }
sqlite3_bind_text(del, 1, queue_name, -1, SQLITE_STATIC);
- sqlite3_bind_int(del, 2, id);
+ sqlite3_bind_int(del, 2, id);
res = sqlite3_step(del);
evbuffer_add_printf(buf, "%s", val);
evhttp_send_reply(req, HTTP_OK, "OK", buf);
} else {
evbuffer_add_printf(buf, "null");
evhttp_send_reply(req, HTTP_NOTFOUND, "Empty", buf);
}
}
void enqueue(const char *queue_name, struct evhttp_request *req, struct evbuffer *buf) {
char *data;
unsigned const char *pdata;
+ int priority = 0;
+ const char *reply;
+
+#ifdef DEBUG
+ debug("NQ %s\n", queue_name);
+#endif
if (sqlite3_reset(nq) != SQLITE_OK) {
- err(1, "Unable to reset enqueue query");
+ syslog(LOG_ERR, "Unable to reset enqueue query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
}
pdata = EVBUFFER_DATA(req->input_buffer);
if (!pdata) {
evbuffer_add_printf(buf, "Bad Request");
evhttp_send_reply(req, 400, "Bad Request", buf);
return;
}
+ {
+ const char *queue_priority = evhttp_find_header(req->input_headers, "queue-priority");
+ if (queue_priority) {
+ priority = atoi(queue_priority);
+ }
+ }
+
+ reply = evhttp_find_header(req->input_headers, "queue-reply");
+
data = strndup((char *)pdata, (size_t)EVBUFFER_LENGTH(req->input_buffer));
sqlite3_bind_text(nq, 1, queue_name, -1, SQLITE_STATIC);
sqlite3_bind_text(nq, 2, evhttp_decode_uri(data), -1, SQLITE_STATIC);
+ sqlite3_bind_int(nq, 3, priority);
+
+ if (reply)
+ sqlite3_bind_text(nq, 4, reply, -1, SQLITE_STATIC);
+ else
+ sqlite3_bind_null(nq, 4);
+
+ {
+ const char *queue_timeout = evhttp_find_header(req->input_headers, "queue-timeout");
+
+ if (queue_timeout) {
+ int timeout = atoi(queue_timeout);
+
+ if (timeout > 0) {
+ sqlite3_bind_int(nq, 5, timeout);
+ } else {
+ sqlite3_bind_null(nq, 5);
+ }
+ } else {
+ sqlite3_bind_null(nq, 5);
+ }
+ }
+
sqlite3_step(nq);
free(data);
evbuffer_add_printf(buf, "{\"msg\":\"OK\"}", queue_name);
evhttp_send_reply(req, HTTP_OK, "OK", buf);
}
+void stats_handler(struct evhttp_request *req, void *arg) {
+ int res;
+ int rows = 0;
+
+ struct evbuffer *buf;
+ buf = evbuffer_new();
+
+ if (buf == NULL) {
+ syslog(LOG_ERR, "Unable to allocate response buffer for stats request");
+ exit(-1);
+ }
+
+
+ if (sqlite3_reset(stats) != SQLITE_OK) {
+ syslog(LOG_ERR, "Unable to reset stats query: %s\n", sqlite3_errmsg(sqlite));
+ exit(-1);
+ }
+
+ sqlite3_bind_text(stats, 1, TEMP_QUEUE_NAME, -1, SQLITE_STATIC);
+
+ evbuffer_add_printf(buf, "[");
+
+ while (sqlite3_step(stats) == SQLITE_ROW) {
+ const char *name = (const char *)sqlite3_column_text(stats, 0);
+ int count = sqlite3_column_int(stats, 1);
+ int min = sqlite3_column_int(stats, 2);
+ int max = sqlite3_column_int(stats, 3);
+
+ if (rows)
+ evbuffer_add_printf(buf, ",");
+
+ evbuffer_add_printf(buf, "{\"name\":\"%s\",\"count\":%d,\"min\":%d,\"max\":%d}", name, count, min, max);
+ rows++;
+ }
+
+ evbuffer_add_printf(buf, "]");
+ evhttp_send_reply(req, HTTP_OK, "OK", buf);
+}
void generic_handler(struct evhttp_request *req, void *arg) {
const char *queue_name;
struct evbuffer *buf;
buf = evbuffer_new();
- if (buf == NULL)
- err(1, "failed to create response buffer");
+ if (buf == NULL) {
+ syslog(LOG_ERR, "Unable to allocate response buffer");
+ exit(-1);
+ }
queue_name = evhttp_request_uri(req);
queue_name++;
if (!strlen(queue_name)) {
evbuffer_add_printf(buf, "Queue '%s' not found", queue_name);
evhttp_send_reply(req, HTTP_NOTFOUND, "Queue not found", buf);
return;
}
if (req->type == EVHTTP_REQ_GET) {
dequeue(queue_name, req, buf);
} else if (req->type == EVHTTP_REQ_POST) {
enqueue(queue_name, req, buf);
}
evbuffer_free(buf);
}
int main(int argc, char **argv) {
+ struct event ev;
struct evhttp *httpd;
char *db = "/tmp/farqueue.db";
char *host = "127.0.0.1";
int port = 9094;
int c;
int daemonise = 0;
+ int option = LOG_PID;
+
+ struct timeval tv;
+
opterr = 0;
- while ((c = getopt(argc, argv, "h:p:f:d")) != -1) {
+ while ((c = getopt(argc, argv, "h:p:f:c:d")) != -1) {
switch (c) {
case 'h':
host = optarg;
break;
case 'p':
port = atoi(optarg);
break;
case 'f':
db = optarg;
break;
+ case 'c':
+ cleanup_timeout = atoi(optarg);
+ break;
case 'd':
daemonise = 1;
break;
default:
abort();
}
}
if (daemonise) {
int child;
if (child = fork()) {
fprintf(stdout, "%d\n", child);
exit(0);
} else {
}
+ } else {
+ option |= LOG_PERROR;
}
+ openlog("farqueue", option, LOG_USER);
+
init_persitance(db);
event_init();
httpd = evhttp_start(host, port);
/* Set a callback for requests to "/specific". */
/* evhttp_set_cb(httpd, "/specific", another_handler, NULL); */
+ evhttp_set_cb(httpd, "/$STATS$", stats_handler, NULL);
+
/* Set a callback for all other requests. */
evhttp_set_gencb(httpd, generic_handler, NULL);
+ if (cleanup_timeout > 0) {
+ event_set(&ev, -1, EV_TIMEOUT, cleanup, &ev);
+ timerclear(&tv);
+ tv.tv_sec = cleanup_timeout;
+ event_add(&ev, &tv);
+ }
+
event_dispatch();
/* Not reached in this code as it is now. */
evhttp_free(httpd);
return 0;
}
|
nrich/farqueue | 06877ff9b85c8c336e8f63126b2f2f4a755c87aa | Add fake synchronous calls | diff --git a/clients/python/Farqueue.py b/clients/python/Farqueue.py
index 0170421..d534a85 100644
--- a/clients/python/Farqueue.py
+++ b/clients/python/Farqueue.py
@@ -1,46 +1,110 @@
#!/usr/bin/python
import simplejson
import httplib
import urllib
import time
import sys
+import os
+import md5
+import signal
+
+class Continue(Exception):
+ def __init__(self, data):
+ self.value = data
+ def __str__(self):
+ return repr(self.value)
class Client:
def __init__(self, queue, host='127.0.0.1', port=9094):
self.host = host
self.port = port
self.queue = queue
def _getconnection(self):
hoststr = '%s:%d' % (self.host, self.port)
return httplib.HTTPConnection(hoststr)
+ def handle_timeout(self, signum, frame):
+ raise TimeoutFunctionException()
def enqueue(self, data):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"
}
conn = self._getconnection()
conn.request("POST", '/' + self.queue, simplejson.dumps(data), headers)
res = conn.getresponse()
conn.close()
def dequeue(self, func):
while 1:
conn = self._getconnection()
conn.request("GET", '/' + self.queue)
res = conn.getresponse()
#conn.close()
if res.status == 200:
func(simplejson.loads(res.read()))
elif res.status == 404:
time.sleep(1)
else:
print res.status
- sys.exit(1)
+ sys.exit(1)
+
+ def message(self, data={}, timeout=10):
+ id = md5.new(str(os.getpid) + str(int(time.time()))).hexdigest()
+
+ old = signal.signal(signal.SIGALRM, self.handle_timeout)
+ signal.alarm(timeout)
+
+ result = None
+
+ message = {
+ 'id': id,
+ 'data': data,
+ }
+
+ queue = self.queue
+
+ self.enqueue(message)
+
+ self.queue = queue + id
+
+ def func(data):
+ result = data
+ raise Continue(data)
+
+ try:
+ self.dequeue(func)
+ except Continue as c:
+ result = c.value
+ pass
+ finally:
+ self.queue = queue
+ signal.signal(signal.SIGALRM, old)
+
+ signal.alarm(0)
+
+ return result
+
+ def subscribe(self, func):
+ def process(message):
+ data = message['data']
+ id = message['id']
+
+ res = func(data)
+
+ queue = self.queue
+ self.queue = queue + id
+
+ try:
+ self.enqueue(res)
+ finally:
+ self.queue = queue
+
+ self.dequeue(process)
|
nrich/farqueue | 8b1522a41639bba1bf19a220844e2bcf788d0656 | * Fix memory leak | diff --git a/src/farqueue.c b/src/farqueue.c
index 7664035..560a074 100644
--- a/src/farqueue.c
+++ b/src/farqueue.c
@@ -1,198 +1,200 @@
#include <sys/types.h>
#include <sys/time.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <err.h>
#include <event.h>
#include <evhttp.h>
#include <sqlite3.h>
#include <unistd.h>
/* gcc -D_GNU_SOURCE -pedantic -o farqueue farqueue.c -levent -lsqlite3 */
static sqlite3 *sqlite = NULL;
static sqlite3_stmt *dq = NULL;
static sqlite3_stmt *nq = NULL;
static sqlite3_stmt *del = NULL;
#define DQ_QUERY "SELECT id,data FROM queues WHERE name=? ORDER BY id"
#define NQ_QUERY "INSERT INTO queues(name, data) VALUES(?,?)"
#define DEL_QUERY "DELETE FROM queues WHERE name=? AND id=?"
#define CREATE_QUERY "CREATE TABLE IF NOT EXISTS queues(" \
"id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"\
"name VARCHAR(254) NOT NULL," \
"data text)"
void init_persitance(char *db) {
/* TODO catch errors from sqlite properly */
if (sqlite3_open(db, &sqlite) != SQLITE_OK) {
err(1, "Unable to open DB");
}
if (sqlite3_exec(sqlite, CREATE_QUERY, NULL, NULL, NULL) != SQLITE_OK) {
err(1, "Cannot create queue table");
}
if (sqlite3_prepare_v2(sqlite, DQ_QUERY, strlen(DQ_QUERY), &dq, NULL) != SQLITE_OK) {
err(1, "Unable to prepare dequeue query");
}
if (sqlite3_prepare_v2(sqlite, NQ_QUERY, strlen(NQ_QUERY), &nq, NULL) != SQLITE_OK) {
err(1, "Unable to prepare enqueue query");
}
if (sqlite3_prepare_v2(sqlite, DEL_QUERY, strlen(DEL_QUERY), &del, NULL) != SQLITE_OK) {
err(1, "Unable to prepare delete query");
}
}
void dequeue(const char *queue_name, struct evhttp_request *req, struct evbuffer *buf) {
int res;
if (sqlite3_reset(dq) != SQLITE_OK) {
err(1, "Unable to reset dequeue query");
}
if (sqlite3_reset(del) != SQLITE_OK) {
err(1, "Unable to reset delete query");
}
sqlite3_bind_text(dq, 1, queue_name, -1, SQLITE_STATIC);
res = sqlite3_step(dq);
if (res == SQLITE_ROW) {
int id = sqlite3_column_int(dq, 0);
const char *val = (const char *)sqlite3_column_text(dq, 1);
sqlite3_bind_text(del, 1, queue_name, -1, SQLITE_STATIC);
sqlite3_bind_int(del, 2, id);
res = sqlite3_step(del);
evbuffer_add_printf(buf, "%s", val);
evhttp_send_reply(req, HTTP_OK, "OK", buf);
} else {
evbuffer_add_printf(buf, "null");
evhttp_send_reply(req, HTTP_NOTFOUND, "Empty", buf);
}
}
void enqueue(const char *queue_name, struct evhttp_request *req, struct evbuffer *buf) {
char *data;
unsigned const char *pdata;
if (sqlite3_reset(nq) != SQLITE_OK) {
err(1, "Unable to reset enqueue query");
}
pdata = EVBUFFER_DATA(req->input_buffer);
if (!pdata) {
evbuffer_add_printf(buf, "Bad Request");
evhttp_send_reply(req, 400, "Bad Request", buf);
return;
}
data = strndup((char *)pdata, (size_t)EVBUFFER_LENGTH(req->input_buffer));
sqlite3_bind_text(nq, 1, queue_name, -1, SQLITE_STATIC);
sqlite3_bind_text(nq, 2, evhttp_decode_uri(data), -1, SQLITE_STATIC);
sqlite3_step(nq);
free(data);
evbuffer_add_printf(buf, "{\"msg\":\"OK\"}", queue_name);
evhttp_send_reply(req, HTTP_OK, "OK", buf);
}
void generic_handler(struct evhttp_request *req, void *arg) {
const char *queue_name;
struct evbuffer *buf;
buf = evbuffer_new();
if (buf == NULL)
err(1, "failed to create response buffer");
queue_name = evhttp_request_uri(req);
queue_name++;
if (!strlen(queue_name)) {
evbuffer_add_printf(buf, "Queue '%s' not found", queue_name);
evhttp_send_reply(req, HTTP_NOTFOUND, "Queue not found", buf);
return;
}
if (req->type == EVHTTP_REQ_GET) {
dequeue(queue_name, req, buf);
} else if (req->type == EVHTTP_REQ_POST) {
enqueue(queue_name, req, buf);
}
+
+ evbuffer_free(buf);
}
int main(int argc, char **argv) {
struct evhttp *httpd;
char *db = "/tmp/farqueue.db";
char *host = "127.0.0.1";
int port = 9094;
int c;
int daemonise = 0;
opterr = 0;
while ((c = getopt(argc, argv, "h:p:f:d")) != -1) {
switch (c) {
case 'h':
host = optarg;
break;
case 'p':
port = atoi(optarg);
break;
case 'f':
db = optarg;
break;
case 'd':
daemonise = 1;
break;
default:
abort();
}
}
if (daemonise) {
int child;
if (child = fork()) {
fprintf(stdout, "%d\n", child);
exit(0);
} else {
}
}
init_persitance(db);
event_init();
httpd = evhttp_start(host, port);
/* Set a callback for requests to "/specific". */
/* evhttp_set_cb(httpd, "/specific", another_handler, NULL); */
/* Set a callback for all other requests. */
evhttp_set_gencb(httpd, generic_handler, NULL);
event_dispatch();
/* Not reached in this code as it is now. */
evhttp_free(httpd);
return 0;
}
|
nrich/farqueue | 720e6bbe1f32bce7cb4da3827cb4a91a4c2d859d | * Add synchronous message emulation to perl and lua clients * Cleanup python client | diff --git a/clients/lua/Farqueue.lua b/clients/lua/Farqueue.lua
index aee7277..c379256 100644
--- a/clients/lua/Farqueue.lua
+++ b/clients/lua/Farqueue.lua
@@ -1,67 +1,127 @@
#!/usr/bin/lua
module('Farqueue', package.seeall)
local http = require('socket.http')
local json = require('json')
local posix = require('posix')
+local lash = require('lash')
function New(queue, construct)
construct = construct or {}
local host = construct.Host or '127.0.0.1'
local port = construct.Port or 9094
local url = string.format('http://%s:%s/%s', host, port, queue)
- local function dequeue(self, callback)
+ local exit_on_result = false
+
+ local function dequeue(self, callback, looplimit)
while true do
local body, status = http.request(url)
local struct = json.decode(body)
if status == 200 then
local ok, err = pcall(callback, struct)
if not ok then
if construct.Requeue then
-- re-enqueue the request
self:enqueue(struct)
end
-- rethrow the error
error(err)
+ else
+ if exit_on_result then
+ exit_on_result = false
+ return
+ end
end
elseif status == 404 then
+ if looplimit then
+ if looplimit == 0 then
+ return
+ end
+
+ looplimit = looplimit - 1
+ end
+
-- queue empty, back off
posix.sleep(1)
else
error('Fatal error" ' .. status)
end
end
end
local function enqueue(self, data)
--local post = string.format('data=%s', json.encode(data))
local post = json.encode(data)
local req,status = http.request({
url = url,
method = 'POST',
headers = {
["Content-Length"] = string.len(post),
["Content-Type"] = "application/x-www-form-urlencoded",
},
source = ltn12.source.string(post),
})
-- TODO check status of return from enqueue
end
+ local function message(self, data, timeout)
+ timeout = timeout or 1
+
+ local id = lash.MD5.string2hex(posix.getpid().ppid .. os.time())
+
+ local message = {
+ id = id,
+ data = data,
+ }
+
+ self:enqueue(message)
+
+ local save_url = url
+
+ local result
+
+ url = url .. id
+
+ exit_on_result = true
+ self:dequeue(function(d)
+ result = d
+ end, timeout)
+
+ url = save_url
+
+ return result
+ end
+
+ local function subscribe(self, callback)
+ self:dequeue(function(message)
+ local data = message.data
+ local id = message.id
+
+ local res = callback(data)
+
+ local save_url = url
+ url = url .. id
+ self:enqueue(res)
+ url = save_url
+ end)
+ end
+
local farqueue = {
enqueue = enqueue,
dequeue = dequeue,
+ message = message,
+ subscribe = subscribe,
}
return farqueue
end
diff --git a/clients/perl/Farqueue.pm b/clients/perl/Farqueue.pm
index 0a79b5d..1ec92e7 100644
--- a/clients/perl/Farqueue.pm
+++ b/clients/perl/Farqueue.pm
@@ -1,76 +1,144 @@
#!/usr/bin/perl
package Farqueue;
use strict;
use warnings;
use JSON;
use LWP::UserAgent;
+use Time::HiRes;
+use Digest::MD5 qw/md5_hex/;
+use Sys::Hostname qw/hostname/;
+
+use Data::Dumper qw/Dumper/;
sub new {
my ($package, $queuename, %args) = @_;
my $host = $args{Host}||'127.0.0.1';
my $port = $args{Port}||9094;
my $requeue = $args{Requeue}||0;
my $url = "http://$host:$port/$queuename";
my $json = JSON->new();
$json->allow_nonref(1);
my $ua = LWP::UserAgent->new(
keep_alive => 1,
);
return bless {
ua => $ua,
url => $url,
json => $json,
requeue => $requeue,
}, $package;
}
sub enqueue {
my ($self, $data) = @_;
my $json = $self->{json};
my $ua = $self->{ua};
my $url = $self->{url};
$ua->post($url, Content => $json->encode($data));
}
sub dequeue {
my ($self, $callback) = @_;
my $json = $self->{json};
my $ua = $self->{ua};
- my $url = $self->{url};
while (1) {
+ my $url = $self->{url};
+
my $res = $ua->get($url);
if ($res->code() == 200) {
my $data = $json->decode($res->content());
eval {
$callback->($data);
};
if ($@) {
$self->enqueue($data) if $self->{requeue};
die $@;
}
} elsif ($res->code() == 404) {
# queue is empty
sleep 1;
} else {
die "Fatal error: " . $res->status_line();
}
}
}
+sub subscribe {
+ my ($self, $callback) = @_;
+
+ $self->dequeue(sub {
+ my ($message) = @_;
+
+ my $data = $message->{data};
+ my $return_id = $message->{id};
+
+ my $res = $callback->($data);
+
+ my $url = $self->{url};
+
+ $self->{url} = $url . $return_id;
+ $self->enqueue($res);
+ $self->{url} = $url;
+ });
+}
+
+sub message {
+ my ($self, $data, $timeout) = @_;
+
+ $timeout ||= 10;
+
+ my $id = md5_hex(hostname() . Time::HiRes::time() . $$);
+
+ my $message = {
+ data => $data,
+ id => $id,
+ };
+
+ my $url = $self->{url};
+ $self->enqueue($message);
+
+ my $result;
+
+ eval {
+ local $SIG{ALRM} = sub { die "alarm\n" }; # NB: \n required
+
+ alarm $timeout;
+
+ $self->{url} = $url . $id;
+ $self->dequeue(sub {
+ my ($data) = @_;
+
+ $result = $data;
+
+ # We have a result, exit here
+ goto END;
+ });
+ };
+
+END:
+ $self->{url} = $url;
+
+ if ($@) {
+ die unless $@ eq "alarm\n";
+ }
+
+ return $result;
+}
+
1;
diff --git a/clients/python/Farqueue.py b/clients/python/Farqueue.py
index 7800d72..0170421 100644
--- a/clients/python/Farqueue.py
+++ b/clients/python/Farqueue.py
@@ -1,36 +1,46 @@
#!/usr/bin/python
import simplejson
import httplib
import urllib
import time
import sys
class Client:
def __init__(self, queue, host='127.0.0.1', port=9094):
- hoststr = '%s:%d' % (host, port)
-
- self.conn = httplib.HTTPConnection(hoststr)
+ self.host = host
+ self.port = port
self.queue = queue
+ def _getconnection(self):
+ hoststr = '%s:%d' % (self.host, self.port)
+ return httplib.HTTPConnection(hoststr)
+
+
def enqueue(self, data):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"
}
- params = 'data=' + urllib.quote(simplejson.dumps(data))
- self.conn.request("POST", '/' + self.queue, params, headers)
- res = self.conn.getresponse()
+ conn = self._getconnection()
+
+ conn.request("POST", '/' + self.queue, simplejson.dumps(data), headers)
+ res = conn.getresponse()
+ conn.close()
def dequeue(self, func):
while 1:
- res = self.conn.request("GET", self.queue)
+ conn = self._getconnection()
+
+ conn.request("GET", '/' + self.queue)
+ res = conn.getresponse()
+ #conn.close()
if res.status == 200:
func(simplejson.loads(res.read()))
elif res.status == 404:
time.sleep(1)
else:
print res.status
sys.exit(1)
|
nrich/farqueue | 389fbb3851172332b3f3b4a3ca38b2bfa22d1285 | Add ruby client | diff --git a/clients/ruby/Farqueue.rb b/clients/ruby/Farqueue.rb
new file mode 100644
index 0000000..bdc381d
--- /dev/null
+++ b/clients/ruby/Farqueue.rb
@@ -0,0 +1,42 @@
+#!/usr/bin/ruby
+
+require 'net/http'
+require 'uri'
+require 'json'
+
+class Farqueue
+ def initialize(queue, host = '127.0.0.1', port = 9094)
+ @queue = queue
+ @host = host
+ @port = port
+ end
+
+ def enqueue(data = {})
+ http = Net::HTTP.new(@host, @port)
+
+ headers = {
+ 'Content-Type' => 'application/x-www-form-urlencoded'
+ }
+
+ res, data = http.post("/#{@queue}", JSON.JSON(data), headers)
+
+ return JSON.parse(res.body)
+ end
+
+ def dequeue(callback)
+ while true do
+ http = Net::HTTP.new(@host, @port)
+
+ res, data = http.get("/#{@queue}")
+
+ if res.code == '200' then
+ callback(JSON.parse(data))
+ elsif res.code == '404' then
+ sleep(1)
+ else
+ puts(res.code)
+ end
+ end
+ end
+end
+
|
nrich/farqueue | e1ca29207b3b275ff610ba5c234389b077e6ffe4 | Initial code check-in | diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..33134d7
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,22 @@
+CC=g++
+CFLAGS=-g -O2 -D_GNU_SOURCE
+RM=rm -f
+LIBS=-levent -lsqlite3
+OUT=farqueue
+
+LDFLAGS= $(LIBS)
+
+OBJS = farqueue.o
+
+all: farqueue
+
+clean:
+ $(RM) $(OBJS) $(OUT)
+
+luatorrent: $(OBJS)
+ $(CC) $(CFLAGS) $(OBJS) -o $(OUT) $(LDFLAGS)
+
+farqueue.o: src/farqueue.c
+ $(CC) $(CFLAGS) -c -o $@ $<
+
+.PHONY: all
diff --git a/clients/lua/Farqueue.lua b/clients/lua/Farqueue.lua
new file mode 100644
index 0000000..aee7277
--- /dev/null
+++ b/clients/lua/Farqueue.lua
@@ -0,0 +1,67 @@
+#!/usr/bin/lua
+
+module('Farqueue', package.seeall)
+
+local http = require('socket.http')
+local json = require('json')
+local posix = require('posix')
+
+function New(queue, construct)
+ construct = construct or {}
+
+ local host = construct.Host or '127.0.0.1'
+ local port = construct.Port or 9094
+
+ local url = string.format('http://%s:%s/%s', host, port, queue)
+
+ local function dequeue(self, callback)
+ while true do
+ local body, status = http.request(url)
+ local struct = json.decode(body)
+
+ if status == 200 then
+ local ok, err = pcall(callback, struct)
+
+ if not ok then
+ if construct.Requeue then
+ -- re-enqueue the request
+ self:enqueue(struct)
+ end
+
+ -- rethrow the error
+ error(err)
+ end
+ elseif status == 404 then
+ -- queue empty, back off
+ posix.sleep(1)
+ else
+ error('Fatal error" ' .. status)
+ end
+ end
+ end
+
+ local function enqueue(self, data)
+ --local post = string.format('data=%s', json.encode(data))
+ local post = json.encode(data)
+
+ local req,status = http.request({
+ url = url,
+ method = 'POST',
+ headers = {
+ ["Content-Length"] = string.len(post),
+ ["Content-Type"] = "application/x-www-form-urlencoded",
+ },
+ source = ltn12.source.string(post),
+ })
+
+ -- TODO check status of return from enqueue
+ end
+
+ local farqueue = {
+ enqueue = enqueue,
+ dequeue = dequeue,
+ }
+
+ return farqueue
+end
+
diff --git a/clients/perl/Farqueue.pm b/clients/perl/Farqueue.pm
new file mode 100644
index 0000000..0a79b5d
--- /dev/null
+++ b/clients/perl/Farqueue.pm
@@ -0,0 +1,76 @@
+#!/usr/bin/perl
+
+package Farqueue;
+
+use strict;
+use warnings;
+
+use JSON;
+use LWP::UserAgent;
+
+sub new {
+ my ($package, $queuename, %args) = @_;
+
+ my $host = $args{Host}||'127.0.0.1';
+ my $port = $args{Port}||9094;
+ my $requeue = $args{Requeue}||0;
+
+ my $url = "http://$host:$port/$queuename";
+
+ my $json = JSON->new();
+ $json->allow_nonref(1);
+
+ my $ua = LWP::UserAgent->new(
+ keep_alive => 1,
+ );
+
+ return bless {
+ ua => $ua,
+ url => $url,
+ json => $json,
+ requeue => $requeue,
+ }, $package;
+}
+
+sub enqueue {
+ my ($self, $data) = @_;
+
+ my $json = $self->{json};
+ my $ua = $self->{ua};
+ my $url = $self->{url};
+
+ $ua->post($url, Content => $json->encode($data));
+}
+
+sub dequeue {
+ my ($self, $callback) = @_;
+
+ my $json = $self->{json};
+ my $ua = $self->{ua};
+ my $url = $self->{url};
+
+ while (1) {
+ my $res = $ua->get($url);
+
+ if ($res->code() == 200) {
+ my $data = $json->decode($res->content());
+
+ eval {
+ $callback->($data);
+ };
+
+ if ($@) {
+ $self->enqueue($data) if $self->{requeue};
+ die $@;
+ }
+
+ } elsif ($res->code() == 404) {
+ # queue is empty
+ sleep 1;
+ } else {
+ die "Fatal error: " . $res->status_line();
+ }
+ }
+}
+
+1;
diff --git a/clients/python/Farqueue.py b/clients/python/Farqueue.py
new file mode 100644
index 0000000..7800d72
--- /dev/null
+++ b/clients/python/Farqueue.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+import simplejson
+import httplib
+import urllib
+import time
+import sys
+
+class Client:
+ def __init__(self, queue, host='127.0.0.1', port=9094):
+ hoststr = '%s:%d' % (host, port)
+
+ self.conn = httplib.HTTPConnection(hoststr)
+ self.queue = queue
+
+ def enqueue(self, data):
+ headers = {
+ "Content-type": "application/x-www-form-urlencoded",
+ "Accept": "text/plain"
+ }
+
+ params = 'data=' + urllib.quote(simplejson.dumps(data))
+ self.conn.request("POST", '/' + self.queue, params, headers)
+ res = self.conn.getresponse()
+
+ def dequeue(self, func):
+ while 1:
+ res = self.conn.request("GET", self.queue)
+
+ if res.status == 200:
+ func(simplejson.loads(res.read()))
+ elif res.status == 404:
+ time.sleep(1)
+ else:
+ print res.status
+ sys.exit(1)
diff --git a/src/farqueue.c b/src/farqueue.c
new file mode 100644
index 0000000..7664035
--- /dev/null
+++ b/src/farqueue.c
@@ -0,0 +1,198 @@
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <stdlib.h>
+
+#include <string.h>
+#include <stdio.h>
+
+#include <err.h>
+#include <event.h>
+#include <evhttp.h>
+
+#include <sqlite3.h>
+
+#include <unistd.h>
+
+
+/* gcc -D_GNU_SOURCE -pedantic -o farqueue farqueue.c -levent -lsqlite3 */
+
+static sqlite3 *sqlite = NULL;
+static sqlite3_stmt *dq = NULL;
+static sqlite3_stmt *nq = NULL;
+static sqlite3_stmt *del = NULL;
+
+#define DQ_QUERY "SELECT id,data FROM queues WHERE name=? ORDER BY id"
+#define NQ_QUERY "INSERT INTO queues(name, data) VALUES(?,?)"
+#define DEL_QUERY "DELETE FROM queues WHERE name=? AND id=?"
+#define CREATE_QUERY "CREATE TABLE IF NOT EXISTS queues(" \
+ "id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"\
+ "name VARCHAR(254) NOT NULL," \
+ "data text)"
+
+void init_persitance(char *db) {
+ /* TODO catch errors from sqlite properly */
+
+ if (sqlite3_open(db, &sqlite) != SQLITE_OK) {
+ err(1, "Unable to open DB");
+ }
+
+ if (sqlite3_exec(sqlite, CREATE_QUERY, NULL, NULL, NULL) != SQLITE_OK) {
+ err(1, "Cannot create queue table");
+ }
+
+ if (sqlite3_prepare_v2(sqlite, DQ_QUERY, strlen(DQ_QUERY), &dq, NULL) != SQLITE_OK) {
+ err(1, "Unable to prepare dequeue query");
+ }
+
+ if (sqlite3_prepare_v2(sqlite, NQ_QUERY, strlen(NQ_QUERY), &nq, NULL) != SQLITE_OK) {
+ err(1, "Unable to prepare enqueue query");
+ }
+
+ if (sqlite3_prepare_v2(sqlite, DEL_QUERY, strlen(DEL_QUERY), &del, NULL) != SQLITE_OK) {
+ err(1, "Unable to prepare delete query");
+ }
+
+}
+
+void dequeue(const char *queue_name, struct evhttp_request *req, struct evbuffer *buf) {
+ int res;
+
+ if (sqlite3_reset(dq) != SQLITE_OK) {
+ err(1, "Unable to reset dequeue query");
+ }
+
+ if (sqlite3_reset(del) != SQLITE_OK) {
+ err(1, "Unable to reset delete query");
+ }
+
+ sqlite3_bind_text(dq, 1, queue_name, -1, SQLITE_STATIC);
+ res = sqlite3_step(dq);
+
+ if (res == SQLITE_ROW) {
+ int id = sqlite3_column_int(dq, 0);
+ const char *val = (const char *)sqlite3_column_text(dq, 1);
+
+ sqlite3_bind_text(del, 1, queue_name, -1, SQLITE_STATIC);
+ sqlite3_bind_int(del, 2, id);
+ res = sqlite3_step(del);
+
+ evbuffer_add_printf(buf, "%s", val);
+ evhttp_send_reply(req, HTTP_OK, "OK", buf);
+ } else {
+ evbuffer_add_printf(buf, "null");
+ evhttp_send_reply(req, HTTP_NOTFOUND, "Empty", buf);
+ }
+}
+
+void enqueue(const char *queue_name, struct evhttp_request *req, struct evbuffer *buf) {
+ char *data;
+ unsigned const char *pdata;
+
+ if (sqlite3_reset(nq) != SQLITE_OK) {
+ err(1, "Unable to reset enqueue query");
+ }
+
+ pdata = EVBUFFER_DATA(req->input_buffer);
+
+ if (!pdata) {
+ evbuffer_add_printf(buf, "Bad Request");
+ evhttp_send_reply(req, 400, "Bad Request", buf);
+ return;
+ }
+
+ data = strndup((char *)pdata, (size_t)EVBUFFER_LENGTH(req->input_buffer));
+
+ sqlite3_bind_text(nq, 1, queue_name, -1, SQLITE_STATIC);
+ sqlite3_bind_text(nq, 2, evhttp_decode_uri(data), -1, SQLITE_STATIC);
+ sqlite3_step(nq);
+
+ free(data);
+
+ evbuffer_add_printf(buf, "{\"msg\":\"OK\"}", queue_name);
+ evhttp_send_reply(req, HTTP_OK, "OK", buf);
+}
+
+
+void generic_handler(struct evhttp_request *req, void *arg) {
+ const char *queue_name;
+
+ struct evbuffer *buf;
+ buf = evbuffer_new();
+
+ if (buf == NULL)
+ err(1, "failed to create response buffer");
+
+ queue_name = evhttp_request_uri(req);
+ queue_name++;
+
+ if (!strlen(queue_name)) {
+ evbuffer_add_printf(buf, "Queue '%s' not found", queue_name);
+ evhttp_send_reply(req, HTTP_NOTFOUND, "Queue not found", buf);
+ return;
+ }
+
+ if (req->type == EVHTTP_REQ_GET) {
+ dequeue(queue_name, req, buf);
+ } else if (req->type == EVHTTP_REQ_POST) {
+ enqueue(queue_name, req, buf);
+ }
+}
+
+int main(int argc, char **argv) {
+ struct evhttp *httpd;
+ char *db = "/tmp/farqueue.db";
+ char *host = "127.0.0.1";
+ int port = 9094;
+ int c;
+ int daemonise = 0;
+
+ opterr = 0;
+
+ while ((c = getopt(argc, argv, "h:p:f:d")) != -1) {
+ switch (c) {
+ case 'h':
+ host = optarg;
+ break;
+ case 'p':
+ port = atoi(optarg);
+ break;
+ case 'f':
+ db = optarg;
+ break;
+ case 'd':
+ daemonise = 1;
+ break;
+ default:
+ abort();
+ }
+ }
+
+ if (daemonise) {
+ int child;
+
+ if (child = fork()) {
+ fprintf(stdout, "%d\n", child);
+ exit(0);
+ } else {
+ }
+ }
+
+ init_persitance(db);
+
+ event_init();
+ httpd = evhttp_start(host, port);
+
+ /* Set a callback for requests to "/specific". */
+ /* evhttp_set_cb(httpd, "/specific", another_handler, NULL); */
+
+ /* Set a callback for all other requests. */
+ evhttp_set_gencb(httpd, generic_handler, NULL);
+
+ event_dispatch();
+
+ /* Not reached in this code as it is now. */
+ evhttp_free(httpd);
+
+ return 0;
+}
|
fujisho/meshdump | 6e8ebb5eb710a9e48df9a29a8805d7557f8e5a6a | changed CC to CXX. | diff --git a/Makefile b/Makefile
index ae99d28..19ef224 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,7 @@
CFLAGS=-g
meshdump: main.o
- $(CC) -o meshdump main.o -lpcap
+ $(CXX) -o meshdump main.o -lpcap
+
clean:
$(RM) meshdump *.o *.dump *~
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.