code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
private static boolean isAcceptNode(Object nodeObj)
{
if (nodeObj != null)
{
Class nodeObjClass = nodeObj.getClass();
if (nodeObjClass.equals(MDAGNode.class))
return ((MDAGNode) nodeObj).isAcceptNode();
else if (nodeObjClass.equals(SimpleMDAGNode.class))
return ((SimpleMDAGNode) nodeObj).isAcceptNode();
}
throw new IllegalArgumentException("Argument is not an MDAGNode or SimpleMDAGNode");
} | Determines if a child node object is accepting.
@param nodeObj an Object
@return if {@code nodeObj} is either an MDAGNode or a SimplifiedMDAGNode,
true if the node is accepting, false otherwise
throws IllegalArgumentException if {@code nodeObj} is not an MDAGNode or SimplifiedMDAGNode |
void idf(HashMap<Integer, Integer> df, int ndocs)
{
for (Map.Entry<Integer, Double> entry : feature_.entrySet())
{
Integer denom = df.get(entry.getKey());
if (denom == null) denom = 1;
entry.setValue((double) (entry.getValue() * Math.log(ndocs / denom)));
}
} | Apply IDF(inverse document frequency) weighting.
@param df document frequencies
@param ndocs the number of documents |
public Collection<TermFrequency> top(int N)
{
MaxHeap<TermFrequency> heap = new MaxHeap<TermFrequency>(N, new Comparator<TermFrequency>()
{
@Override
public int compare(TermFrequency o1, TermFrequency o2)
{
return o1.compareTo(o2);
}
});
heap.addAll(termFrequencyMap.values());
return heap.toList();
} | 取前N个高频词
@param N
@return |
@Override
public List<String> getKeywords(List<Term> termList, int size)
{
clear();
add(termList);
Collection<TermFrequency> topN = top(size);
List<String> r = new ArrayList<String>(topN.size());
for (TermFrequency termFrequency : topN)
{
r.add(termFrequency.getTerm());
}
return r;
} | 提取关键词(非线程安全)
@param termList
@param size
@return |
public static List<String> getKeywordList(String document, int size)
{
return new TermFrequencyCounter().getKeywords(document, size);
} | 提取关键词(线程安全)
@param document 文档内容
@param size 希望提取几个关键词
@return 一个列表 |
public List<WordInfo> discover(BufferedReader reader, int size) throws IOException
{
String doc;
Map<String, WordInfo> word_cands = new TreeMap<String, WordInfo>();
int totalLength = 0;
Pattern delimiter = Pattern.compile("[\\s\\d,.<>/?:;'\"\\[\\]{}()\\|~!@#$%^&*\\-_=+,。《》、?:;“”‘’{}【】()…¥!—┄-]+");
while ((doc = reader.readLine()) != null)
{
doc = delimiter.matcher(doc).replaceAll("\0");
int docLength = doc.length();
for (int i = 0; i < docLength; ++i)
{
int end = Math.min(i + 1 + max_word_len, docLength + 1);
for (int j = i + 1; j < end; ++j)
{
String word = doc.substring(i, j);
if (word.indexOf('\0') >= 0)
continue; // 含有分隔符的不认为是词语
WordInfo info = word_cands.get(word);
if (info == null)
{
info = new WordInfo(word);
word_cands.put(word, info);
}
info.update(i == 0 ? '\0' : doc.charAt(i - 1), j < docLength ? doc.charAt(j) : '\0');
}
}
totalLength += docLength;
}
for (WordInfo info : word_cands.values())
{
info.computeProbabilityEntropy(totalLength);
}
for (WordInfo info : word_cands.values())
{
info.computeAggregation(word_cands);
}
// 过滤
List<WordInfo> wordInfoList = new LinkedList<WordInfo>(word_cands.values());
ListIterator<WordInfo> listIterator = wordInfoList.listIterator();
while (listIterator.hasNext())
{
WordInfo info = listIterator.next();
if (info.text.trim().length() < 2 || info.p < min_freq || info.entropy < min_entropy || info.aggregation < min_aggregation
|| (filter && LexiconUtility.getFrequency(info.text) > 0)
)
{
listIterator.remove();
}
}
// 按照频率排序
MaxHeap<WordInfo> topN = new MaxHeap<WordInfo>(size, new Comparator<WordInfo>()
{
public int compare(WordInfo o1, WordInfo o2)
{
return Float.compare(o1.p, o2.p);
}
});
topN.addAll(wordInfoList);
return topN.toList();
} | 提取词语
@param reader 大文本
@param size 需要提取词语的数量
@return 一个词语列表 |
public List<WordInfo> discover(String doc, int size)
{
try
{
return discover(new BufferedReader(new StringReader(doc)), size);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
} | 提取词语
@param doc 大文本
@param size 需要提取词语的数量
@return 一个词语列表 |
public static Object[] extractAllParseFeatures(Configuration configuration, int length)
{
if (length == 26)
return extractBasicFeatures(configuration, length);
else if (length == 72)
return extractExtendedFeatures(configuration, length);
else
return extractExtendedFeaturesWithBrownClusters(configuration, length);
} | Given a list of templates, extracts all features for the given state
@param configuration
@return
@throws Exception |
private static Object[] extractExtendedFeatures(Configuration configuration, int length)
{
Object[] featureMap = new Object[length];
State state = configuration.state;
Sentence sentence = configuration.sentence;
int b0Position = 0;
int b1Position = 0;
int b2Position = 0;
int s0Position = 0;
long svr = 0; // stack right valency
long svl = 0; // stack left valency
long bvl = 0; // buffer left valency
long b0w = 0;
long b0p = 0;
long b1w = 0;
long b1p = 0;
long b2w = 0;
long b2p = 0;
long s0w = 0;
long s0p = 0;
long s0l = 0;
long bl0p = 0;
long bl0w = 0;
long bl0l = 0;
long bl1w = 0;
long bl1p = 0;
long bl1l = 0;
long sr0p = 0;
long sr0w = 0;
long sr0l = 0;
long sh0w = 0;
long sh0p = 0;
long sh0l = 0;
long sl0p = 0;
long sl0w = 0;
long sl0l = 0;
long sr1w = 0;
long sr1p = 0;
long sr1l = 0;
long sh1w = 0;
long sh1p = 0;
long sl1w = 0;
long sl1p = 0;
long sl1l = 0;
long sdl = 0;
long sdr = 0;
long bdl = 0;
int[] words = sentence.getWords();
int[] tags = sentence.getTags();
if (0 < state.bufferSize())
{
b0Position = state.bufferHead();
b0w = b0Position == 0 ? 0 : words[b0Position - 1];
b0w += 2;
b0p = b0Position == 0 ? 0 : tags[b0Position - 1];
b0p += 2;
bvl = state.leftValency(b0Position);
int leftMost = state.leftMostModifier(state.getBufferItem(0));
if (leftMost >= 0)
{
bl0p = leftMost == 0 ? 0 : tags[leftMost - 1];
bl0p += 2;
bl0w = leftMost == 0 ? 0 : words[leftMost - 1];
bl0w += 2;
bl0l = state.getDependent(leftMost);
bl0l += 2;
int l2 = state.leftMostModifier(leftMost);
if (l2 >= 0)
{
bl1w = l2 == 0 ? 0 : words[l2 - 1];
bl1w += 2;
bl1p = l2 == 0 ? 0 : tags[l2 - 1];
bl1p += 2;
bl1l = state.getDependent(l2);
bl1l += 2;
}
}
if (1 < state.bufferSize())
{
b1Position = state.getBufferItem(1);
b1w = b1Position == 0 ? 0 : words[b1Position - 1];
b1w += 2;
b1p = b1Position == 0 ? 0 : tags[b1Position - 1];
b1p += 2;
if (2 < state.bufferSize())
{
b2Position = state.getBufferItem(2);
b2w = b2Position == 0 ? 0 : words[b2Position - 1];
b2w += 2;
b2p = b2Position == 0 ? 0 : tags[b2Position - 1];
b2p += 2;
}
}
}
if (0 < state.stackSize())
{
s0Position = state.stackTop();
s0w = s0Position == 0 ? 0 : words[s0Position - 1];
s0w += 2;
s0p = s0Position == 0 ? 0 : tags[s0Position - 1];
s0p += 2;
s0l = state.getDependent(s0Position);
s0l += 2;
svl = state.leftValency(s0Position);
svr = state.rightValency(s0Position);
int leftMost = state.leftMostModifier(s0Position);
if (leftMost >= 0)
{
sl0p = leftMost == 0 ? 0 : tags[leftMost - 1];
sl0p += 2;
sl0w = leftMost == 0 ? 0 : words[leftMost - 1];
sl0w += 2;
sl0l = state.getDependent(leftMost);
sl0l += 2;
}
int rightMost = state.rightMostModifier(s0Position);
if (rightMost >= 0)
{
sr0p = rightMost == 0 ? 0 : tags[rightMost - 1];
sr0p += 2;
sr0w = rightMost == 0 ? 0 : words[rightMost - 1];
sr0w += 2;
sr0l = state.getDependent(rightMost);
sr0l += 2;
}
int headIndex = state.getHead(s0Position);
if (headIndex >= 0)
{
sh0w = headIndex == 0 ? 0 : words[headIndex - 1];
sh0w += 2;
sh0p = headIndex == 0 ? 0 : tags[headIndex - 1];
sh0p += 2;
sh0l = state.getDependent(headIndex);
sh0l += 2;
}
if (leftMost >= 0)
{
int l2 = state.leftMostModifier(leftMost);
if (l2 >= 0)
{
sl1w = l2 == 0 ? 0 : words[l2 - 1];
sl1w += 2;
sl1p = l2 == 0 ? 0 : tags[l2 - 1];
sl1p += 2;
sl1l = state.getDependent(l2);
sl1l += 2;
}
}
if (headIndex >= 0)
{
if (state.hasHead(headIndex))
{
int h2 = state.getHead(headIndex);
sh1w = h2 == 0 ? 0 : words[h2 - 1];
sh1w += 2;
sh1p = h2 == 0 ? 0 : tags[h2 - 1];
sh1p += 2;
}
}
if (rightMost >= 0)
{
int r2 = state.rightMostModifier(rightMost);
if (r2 >= 0)
{
sr1w = r2 == 0 ? 0 : words[r2 - 1];
sr1w += 2;
sr1p = r2 == 0 ? 0 : tags[r2 - 1];
sr1p += 2;
sr1l = state.getDependent(r2);
sr1l += 2;
}
}
}
int index = 0;
long b0wp = b0p;
b0wp |= (b0w << 8);
long b1wp = b1p;
b1wp |= (b1w << 8);
long s0wp = s0p;
s0wp |= (s0w << 8);
long b2wp = b2p;
b2wp |= (b2w << 8);
/**
* From single words
*/
if (s0w != 1)
{
featureMap[index++] = s0wp;
featureMap[index++] = s0w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = s0p;
if (b0w != 1)
{
featureMap[index++] = b0wp;
featureMap[index++] = b0w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = b0p;
if (b1w != 1)
{
featureMap[index++] = b1wp;
featureMap[index++] = b1w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = b1p;
if (b2w != 1)
{
featureMap[index++] = b2wp;
featureMap[index++] = b2w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = b2p;
/**
* from word pairs
*/
if (s0w != 1 && b0w != 1)
{
featureMap[index++] = (s0wp << 28) | b0wp;
featureMap[index++] = (s0wp << 20) | b0w;
featureMap[index++] = (s0w << 28) | b0wp;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
featureMap[index++] = null;
}
if (s0w != 1)
{
featureMap[index++] = (s0wp << 8) | b0p;
}
else
{
featureMap[index++] = null;
}
if (b0w != 1)
{
featureMap[index++] = (s0p << 28) | b0wp;
}
else
{
featureMap[index++] = null;
}
if (s0w != 1 && b0w != 1)
{
featureMap[index++] = (s0w << 20) | b0w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = (s0p << 8) | b0p;
featureMap[index++] = (b0p << 8) | b1p;
/**
* from three words
*/
featureMap[index++] = (b0p << 16) | (b1p << 8) | b2p;
featureMap[index++] = (s0p << 16) | (b0p << 8) | b1p;
featureMap[index++] = (sh0p << 16) | (s0p << 8) | b0p;
featureMap[index++] = (s0p << 16) | (sl0p << 8) | b0p;
featureMap[index++] = (s0p << 16) | (sr0p << 8) | b0p;
featureMap[index++] = (s0p << 16) | (b0p << 8) | bl0p;
/**
* distance
*/
long distance = 0;
if (s0Position > 0 && b0Position > 0)
distance = Math.abs(b0Position - s0Position);
if (s0w != 1)
{
featureMap[index++] = s0w | (distance << 20);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = s0p | (distance << 8);
if (b0w != 1)
{
featureMap[index++] = b0w | (distance << 20);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = b0p | (distance << 8);
if (s0w != 1 && b0w != 1)
{
featureMap[index++] = s0w | (b0w << 20) | (distance << 40);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = s0p | (b0p << 8) | (distance << 28);
/**
* Valency information
*/
if (s0w != 1)
{
featureMap[index++] = s0w | (svr << 20);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = s0p | (svr << 8);
if (s0w != 1)
{
featureMap[index++] = s0w | (svl << 20);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = s0p | (svl << 8);
if (b0w != 1)
{
featureMap[index++] = b0w | (bvl << 20);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = b0p | (bvl << 8);
/**
* Unigrams
*/
if (sh0w != 1)
{
featureMap[index++] = sh0w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = sh0p;
featureMap[index++] = s0l;
if (sl0w != 1)
{
featureMap[index++] = sl0w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = sl0p;
featureMap[index++] = sl0l;
if (sr0w != 1)
{
featureMap[index++] = sr0w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = sr0p;
featureMap[index++] = sr0l;
if (bl0w != 1)
{
featureMap[index++] = bl0w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = bl0p;
featureMap[index++] = bl0l;
/**
* From third order features
*/
if (sh1w != 1)
{
featureMap[index++] = sh1w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = sh1p;
featureMap[index++] = sh0l;
if (sl1w != 1)
{
featureMap[index++] = sl1w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = sl1p;
featureMap[index++] = sl1l;
if (sr1w != 1)
{
featureMap[index++] = sr1w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = sr1p;
featureMap[index++] = sr1l;
if (bl1w != 1)
{
featureMap[index++] = bl1w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = bl1p;
featureMap[index++] = bl1l;
featureMap[index++] = s0p | (sl0p << 8) | (sl1p << 16);
featureMap[index++] = s0p | (sr0p << 8) | (sr1p << 16);
featureMap[index++] = s0p | (sh0p << 8) | (sh1p << 16);
featureMap[index++] = b0p | (bl0p << 8) | (bl1p << 16);
/**
* label set
*/
if (s0Position >= 0)
{
sdl = state.leftDependentLabels(s0Position);
sdr = state.rightDependentLabels(s0Position);
}
if (b0Position >= 0)
{
bdl = state.leftDependentLabels(b0Position);
}
if (s0w != 1)
{
featureMap[index++] = (s0w + "|" + sdr);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = (s0p + "|" + sdr);
if (s0w != 1)
{
featureMap[index++] = s0w + "|" + sdl;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = (s0p + "|" + sdl);
if (b0w != 1)
{
featureMap[index++] = (b0w + "|" + bdl);
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = (b0p + "|" + bdl);
return featureMap;
} | 根据特征模板为状态提取特征
@param configuration
@return
@throws Exception |
private static Long[] extractBasicFeatures(Configuration configuration, int length)
{
Long[] featureMap = new Long[length];
State state = configuration.state;
Sentence sentence = configuration.sentence;
int b0Position = 0;
int b1Position = 0;
int b2Position = 0;
int s0Position = 0;
long b0w = 0;
long b0p = 0;
long b1w = 0;
long b1p = 0;
long b2w = 0;
long b2p = 0;
long s0w = 0;
long s0p = 0;
long bl0p = 0;
long sr0p = 0;
long sh0p = 0;
long sl0p = 0;
int[] words = sentence.getWords();
int[] tags = sentence.getTags();
if (0 < state.bufferSize())
{
b0Position = state.bufferHead();
b0w = b0Position == 0 ? 0 : words[b0Position - 1];
b0w += 2;
b0p = b0Position == 0 ? 0 : tags[b0Position - 1];
b0p += 2;
int leftMost = state.leftMostModifier(state.getBufferItem(0));
if (leftMost >= 0)
{
bl0p = leftMost == 0 ? 0 : tags[leftMost - 1];
bl0p += 2;
}
if (1 < state.bufferSize())
{
b1Position = state.getBufferItem(1);
b1w = b1Position == 0 ? 0 : words[b1Position - 1];
b1w += 2;
b1p = b1Position == 0 ? 0 : tags[b1Position - 1];
b1p += 2;
if (2 < state.bufferSize())
{
b2Position = state.getBufferItem(2);
b2w = b2Position == 0 ? 0 : words[b2Position - 1];
b2w += 2;
b2p = b2Position == 0 ? 0 : tags[b2Position - 1];
b2p += 2;
}
}
}
if (0 < state.stackSize())
{
s0Position = state.stackTop();
s0w = s0Position == 0 ? 0 : words[s0Position - 1];
s0w += 2;
s0p = s0Position == 0 ? 0 : tags[s0Position - 1];
s0p += 2;
int leftMost = state.leftMostModifier(s0Position);
if (leftMost >= 0)
{
sl0p = leftMost == 0 ? 0 : tags[leftMost - 1];
sl0p += 2;
}
int rightMost = state.rightMostModifier(s0Position);
if (rightMost >= 0)
{
sr0p = rightMost == 0 ? 0 : tags[rightMost - 1];
sr0p += 2;
}
int headIndex = state.getHead(s0Position);
if (headIndex >= 0)
{
sh0p = headIndex == 0 ? 0 : tags[headIndex - 1];
sh0p += 2;
}
}
int index = 0;
long b0wp = b0p;
b0wp |= (b0w << 8);
long b1wp = b1p;
b1wp |= (b1w << 8);
long s0wp = s0p;
s0wp |= (s0w << 8);
long b2wp = b2p;
b2wp |= (b2w << 8);
/**
* From single words
*/
if (s0w != 1)
{
featureMap[index++] = s0wp;
featureMap[index++] = s0w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = s0p;
if (b0w != 1)
{
featureMap[index++] = b0wp;
featureMap[index++] = b0w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = b0p;
if (b1w != 1)
{
featureMap[index++] = b1wp;
featureMap[index++] = b1w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = b1p;
if (b2w != 1)
{
featureMap[index++] = b2wp;
featureMap[index++] = b2w;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
}
featureMap[index++] = b2p;
/**
* from word pairs
*/
if (s0w != 1 && b0w != 1)
{
featureMap[index++] = (s0wp << 28) | b0wp;
featureMap[index++] = (s0wp << 20) | b0w;
featureMap[index++] = (s0w << 28) | b0wp;
}
else
{
featureMap[index++] = null;
featureMap[index++] = null;
featureMap[index++] = null;
}
if (s0w != 1)
{
featureMap[index++] = (s0wp << 8) | b0p;
}
else
{
featureMap[index++] = null;
}
if (b0w != 1)
{
featureMap[index++] = (s0p << 28) | b0wp;
}
else
{
featureMap[index++] = null;
}
if (s0w != 1 && b0w != 1)
{
featureMap[index++] = (s0w << 20) | b0w;
}
else
{
featureMap[index++] = null;
}
featureMap[index++] = (s0p << 8) | b0p;
featureMap[index++] = (b0p << 8) | b1p;
/**
* from three words
*/
featureMap[index++] = (b0p << 16) | (b1p << 8) | b2p;
featureMap[index++] = (s0p << 16) | (b0p << 8) | b1p;
featureMap[index++] = (sh0p << 16) | (s0p << 8) | b0p;
featureMap[index++] = (s0p << 16) | (sl0p << 8) | b0p;
featureMap[index++] = (s0p << 16) | (sr0p << 8) | b0p;
featureMap[index++] = (s0p << 16) | (b0p << 8) | bl0p;
return featureMap;
} | Given a list of templates, extracts all features for the given state
@param configuration
@return
@throws Exception |
public static long distance(String A, String B)
{
CommonSynonymDictionary.SynonymItem itemA = get(A);
CommonSynonymDictionary.SynonymItem itemB = get(B);
if (itemA == null || itemB == null) return Long.MAX_VALUE;
return distance(itemA, itemB);
} | 判断两个单词之间的语义距离
@param A
@param B
@return |
public static double similarity(String A, String B)
{
long distance = distance(A, B);
if (distance > dictionary.getMaxSynonymItemIdDistance()) return 0.0;
return (dictionary.getMaxSynonymItemIdDistance() - distance) / (double) dictionary.getMaxSynonymItemIdDistance();
} | 计算两个单词之间的相似度,0表示不相似,1表示完全相似
@param A
@param B
@return |
public static List<CommonSynonymDictionary.SynonymItem> convert(List<Term> sentence, boolean withUndefinedItem)
{
List<CommonSynonymDictionary.SynonymItem> synonymItemList = new ArrayList<CommonSynonymDictionary.SynonymItem>(sentence.size());
for (Term term : sentence)
{
CommonSynonymDictionary.SynonymItem item = get(term.word);
if (item == null)
{
if (withUndefinedItem)
{
item = CommonSynonymDictionary.SynonymItem.createUndefined(term.word);
synonymItemList.add(item);
}
}
else
{
synonymItemList.add(item);
}
}
return synonymItemList;
} | 将分词结果转换为同义词列表
@param sentence 句子
@param withUndefinedItem 是否保留词典中没有的词语
@return |
public int combine(TFDictionary dictionary, int limit, boolean add)
{
int preSize = trie.size();
for (Map.Entry<String, TermFrequency> entry : dictionary.trie.entrySet())
{
TermFrequency termFrequency = trie.get(entry.getKey());
if (termFrequency == null)
{
trie.put(entry.getKey(), new TermFrequency(entry.getKey(), Math.min(limit, entry.getValue().getValue())));
}
else
{
if (add)
{
termFrequency.setValue(termFrequency.getValue() + Math.min(limit, entry.getValue().getValue()));
}
}
}
return trie.size() - preSize;
} | 合并自己(主词典)和某个词频词典
@param dictionary 某个词频词典
@param limit 如果该词频词典试图引入一个词语,其词频不得超过此limit(如果不需要使用limit功能,可以传入Integer.MAX_VALUE)
@param add 设为true则是词频叠加模式,否则是词频覆盖模式
@return 词条的增量 |
public static int combine(String... path)
{
TFDictionary dictionaryMain = new TFDictionary();
dictionaryMain.load(path[0]);
int preSize = dictionaryMain.trie.size();
for (int i = 1; i < path.length; ++i)
{
TFDictionary dictionary = new TFDictionary();
dictionary.load(path[i]);
dictionaryMain.combine(dictionary, 1, true);
}
try
{
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(IOUtil.newOutputStream(path[0]), "UTF-8"));
for (Map.Entry<String, TermFrequency> entry : dictionaryMain.trie.entrySet())
{
bw.write(entry.getKey());
bw.write(' ');
bw.write(String.valueOf(entry.getValue().getValue()));
bw.newLine();
}
bw.close();
}
catch (Exception e)
{
e.printStackTrace();
return -1;
}
return dictionaryMain.trie.size() - preSize;
} | 合并多个词典
@param path 多个词典的路径,第一个是主词典。主词典与其他词典的区别详见com.hankcs.hanlp.corpus.dictionary.TFDictionary#combine(com.hankcs.hanlp.corpus.dictionary.TFDictionary, int, boolean)
@return 词条的增量 |
public int getFrequency(String key)
{
TermFrequency termFrequency = get(key);
if (termFrequency == null) return 0;
return termFrequency.getFrequency();
} | 获取频次
@param key
@return |
public boolean saveKeyTo(String path)
{
LinkedList<String> keyList = new LinkedList<String>();
for (Map.Entry<String, TermFrequency> entry : trie.entrySet())
{
keyList.add(entry.getKey());
}
return IOUtil.saveCollectionToTxt(keyList, path);
} | 仅仅将值保存到文件
@param path
@return |
public TreeSet<TermFrequency> values()
{
TreeSet<TermFrequency> set = new TreeSet<TermFrequency>(Collections.reverseOrder());
for (Map.Entry<String, TermFrequency> entry : entrySet())
{
set.add(entry.getValue());
}
return set;
} | 按照频率从高到低排序的条目
@return |
@Override
public String[] tag(String... words)
{
POSInstance instance = new POSInstance(words, model.featureMap);
return tag(instance);
} | 标注
@param words
@return |
@Override
public String[] tag(List<String> wordList)
{
String[] termArray = new String[wordList.size()];
wordList.toArray(termArray);
return tag(termArray);
} | 标注
@param wordList
@return |
public boolean learn(String... wordTags)
{
String[] words = new String[wordTags.length];
String[] tags = new String[wordTags.length];
for (int i = 0; i < wordTags.length; i++)
{
String[] wordTag = wordTags[i].split("//");
words[i] = wordTag[0];
tags[i] = wordTag[1];
}
return learn(new POSInstance(words, tags, model.featureMap));
} | 在线学习
@param wordTags [单词]/[词性]数组
@return 是否学习成功(失败的原因是参数错误) |
public static String translate(String tag)
{
String cn = translator.get(tag);
if (cn == null) return tag;
return cn;
} | 翻译词性
@param tag
@return |
public void add(IWord word)
{
Item item = trie.get(word.getValue());
if (item == null)
{
item = new Item(word.getValue(), word.getLabel());
trie.put(item.key, item);
}
else
{
item.addLabel(word.getLabel());
}
} | 向词典中加入一个词语
@param word 词语 |
public static List<Item> loadAsItemList(String path)
{
List<Item> itemList = new LinkedList<Item>();
try
{
BufferedReader br = new BufferedReader(new InputStreamReader(IOAdapter == null ? new FileInputStream(path) :
IOAdapter.open(path), "UTF-8"));
String line;
while ((line = br.readLine()) != null)
{
Item item = Item.create(line);
if (item == null)
{
logger.warning("使用【" + line + "】创建Item失败");
return null;
// continue;
}
itemList.add(item);
}
}
catch (Exception e)
{
logger.warning("读取词典" + path + "发生异常" + e);
return null;
}
return itemList;
} | 读取所有条目
@param path
@return |
public static DictionaryMaker load(String path)
{
DictionaryMaker dictionaryMaker = new DictionaryMaker();
dictionaryMaker.addAll(DictionaryMaker.loadAsItemList(path));
return dictionaryMaker;
} | 从磁盘加载
@param path
@return |
public void add(Item item)
{
Item innerItem = trie.get(item.key);
if (innerItem == null)
{
innerItem = item;
trie.put(innerItem.key, innerItem);
}
else
{
innerItem.combine(item);
}
} | 插入条目
@param item |
public void addNotCombine(Item item)
{
Item innerItem = trie.get(item.key);
if (innerItem == null)
{
innerItem = item;
trie.put(innerItem.key, innerItem);
}
} | 插入条目,但是不合并,如果已有则忽略
@param item |
public static DictionaryMaker combine(String pathA, String pathB)
{
DictionaryMaker dictionaryMaker = new DictionaryMaker();
dictionaryMaker.addAll(DictionaryMaker.loadAsItemList(pathA));
dictionaryMaker.addAll(DictionaryMaker.loadAsItemList(pathB));
return dictionaryMaker;
} | 合并两部词典
@param pathA
@param pathB
@return |
public static DictionaryMaker combine(String... pathArray)
{
DictionaryMaker dictionaryMaker = new DictionaryMaker();
for (String path : pathArray)
{
logger.warning("正在处理" + path);
dictionaryMaker.addAll(DictionaryMaker.loadAsItemList(path));
}
return dictionaryMaker;
} | 合并多部词典
@param pathArray
@return |
public static DictionaryMaker combineWithNormalization(String[] pathArray)
{
DictionaryMaker dictionaryMaker = new DictionaryMaker();
logger.info("正在处理主词典" + pathArray[0]);
dictionaryMaker.addAll(DictionaryMaker.loadAsItemList(pathArray[0]));
for (int i = 1; i < pathArray.length; ++i)
{
logger.info("正在处理副词典" + pathArray[i] + ",将执行新词合并模式");
dictionaryMaker.addAllNotCombine(DictionaryMaker.loadAsItemList(pathArray[i]));
}
return dictionaryMaker;
} | 对除第一个之外的词典执行标准化,并且合并
@param pathArray
@return |
public static DictionaryMaker combineWhenNotInclude(String[] pathArray)
{
DictionaryMaker dictionaryMaker = new DictionaryMaker();
logger.info("正在处理主词典" + pathArray[0]);
dictionaryMaker.addAll(DictionaryMaker.loadAsItemList(pathArray[0]));
for (int i = 1; i < pathArray.length; ++i)
{
logger.info("正在处理副词典" + pathArray[i] + ",并且过滤已有词典");
dictionaryMaker.addAllNotCombine(DictionaryMaker.normalizeFrequency(DictionaryMaker.loadAsItemList(pathArray[i])));
}
return dictionaryMaker;
} | 合并,只补充除第一个词典外其他词典的新词
@param pathArray
@return |
public static List<Item> normalizeFrequency(List<Item> itemList)
{
for (Item item : itemList)
{
ArrayList<Map.Entry<String, Integer>> entryArray = new ArrayList<Map.Entry<String, Integer>>(item.labelMap.entrySet());
Collections.sort(entryArray, new Comparator<Map.Entry<String, Integer>>()
{
@Override
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2)
{
return o1.getValue().compareTo(o2.getValue());
}
});
int index = 1;
for (Map.Entry<String, Integer> pair : entryArray)
{
item.labelMap.put(pair.getKey(), index);
++index;
}
}
return itemList;
} | 调整频次,按排序后的次序给定频次
@param itemList
@return 处理后的列表 |
public boolean learn(String templFile, String trainFile, String modelFile, boolean textModelFile,
int maxitr, int freq, double eta, double C, int threadNum, int shrinkingSize,
Algorithm algorithm)
{
if (eta <= 0)
{
System.err.println("eta must be > 0.0");
return false;
}
if (C < 0.0)
{
System.err.println("C must be >= 0.0");
return false;
}
if (shrinkingSize < 1)
{
System.err.println("shrinkingSize must be >= 1");
return false;
}
if (threadNum <= 0)
{
System.err.println("thread must be > 0");
return false;
}
EncoderFeatureIndex featureIndex = new EncoderFeatureIndex(threadNum);
List<TaggerImpl> x = new ArrayList<TaggerImpl>();
if (!featureIndex.open(templFile, trainFile))
{
System.err.println("Fail to open " + templFile + " " + trainFile);
}
// File file = new File(trainFile);
// if (!file.exists())
// {
// System.err.println("train file " + trainFile + " does not exist.");
// return false;
// }
BufferedReader br = null;
try
{
InputStreamReader isr = new InputStreamReader(IOUtil.newInputStream(trainFile), "UTF-8");
br = new BufferedReader(isr);
int lineNo = 0;
while (true)
{
TaggerImpl tagger = new TaggerImpl(TaggerImpl.Mode.LEARN);
tagger.open(featureIndex);
TaggerImpl.ReadStatus status = tagger.read(br);
if (status == TaggerImpl.ReadStatus.ERROR)
{
System.err.println("error when reading " + trainFile);
return false;
}
if (!tagger.empty())
{
if (!tagger.shrink())
{
System.err.println("fail to build feature index ");
return false;
}
tagger.setThread_id_(lineNo % threadNum);
x.add(tagger);
}
else if (status == TaggerImpl.ReadStatus.EOF)
{
break;
}
else
{
continue;
}
if (++lineNo % 100 == 0)
{
System.out.print(lineNo + ".. ");
}
}
br.close();
}
catch (IOException e)
{
System.err.println("train file " + trainFile + " does not exist.");
return false;
}
featureIndex.shrink(freq, x);
double[] alpha = new double[featureIndex.size()];
Arrays.fill(alpha, 0.0);
featureIndex.setAlpha_(alpha);
System.out.println("Number of sentences: " + x.size());
System.out.println("Number of features: " + featureIndex.size());
System.out.println("Number of thread(s): " + threadNum);
System.out.println("Freq: " + freq);
System.out.println("eta: " + eta);
System.out.println("C: " + C);
System.out.println("shrinking size: " + shrinkingSize);
switch (algorithm)
{
case CRF_L1:
if (!runCRF(x, featureIndex, alpha, maxitr, C, eta, shrinkingSize, threadNum, true))
{
System.err.println("CRF_L1 execute error");
return false;
}
break;
case CRF_L2:
if (!runCRF(x, featureIndex, alpha, maxitr, C, eta, shrinkingSize, threadNum, false))
{
System.err.println("CRF_L2 execute error");
return false;
}
break;
case MIRA:
if (!runMIRA(x, featureIndex, alpha, maxitr, C, eta, shrinkingSize, threadNum))
{
System.err.println("MIRA execute error");
return false;
}
break;
default:
break;
}
if (!featureIndex.save(modelFile, textModelFile))
{
System.err.println("Failed to save model");
}
System.out.println("Done!");
return true;
} | 训练
@param templFile 模板文件
@param trainFile 训练文件
@param modelFile 模型文件
@param textModelFile 是否输出文本形式的模型文件
@param maxitr 最大迭代次数
@param freq 特征最低频次
@param eta 收敛阈值
@param C cost-factor
@param threadNum 线程数
@param shrinkingSize
@param algorithm 训练算法
@return |
private boolean runCRF(List<TaggerImpl> x,
EncoderFeatureIndex featureIndex,
double[] alpha,
int maxItr,
double C,
double eta,
int shrinkingSize,
int threadNum,
boolean orthant)
{
double oldObj = 1e+37;
int converge = 0;
LbfgsOptimizer lbfgs = new LbfgsOptimizer();
List<CRFEncoderThread> threads = new ArrayList<CRFEncoderThread>();
for (int i = 0; i < threadNum; i++)
{
CRFEncoderThread thread = new CRFEncoderThread(alpha.length);
thread.start_i = i;
thread.size = x.size();
thread.threadNum = threadNum;
thread.x = x;
threads.add(thread);
}
int all = 0;
for (int i = 0; i < x.size(); i++)
{
all += x.get(i).size();
}
ExecutorService executor = Executors.newFixedThreadPool(threadNum);
for (int itr = 0; itr < maxItr; itr++)
{
featureIndex.clear();
try
{
executor.invokeAll(threads);
}
catch (Exception e)
{
e.printStackTrace();
return false;
}
for (int i = 1; i < threadNum; i++)
{
threads.get(0).obj += threads.get(i).obj;
threads.get(0).err += threads.get(i).err;
threads.get(0).zeroone += threads.get(i).zeroone;
}
for (int i = 1; i < threadNum; i++)
{
for (int k = 0; k < featureIndex.size(); k++)
{
threads.get(0).expected[k] += threads.get(i).expected[k];
}
}
int numNonZero = 0;
if (orthant)
{
for (int k = 0; k < featureIndex.size(); k++)
{
threads.get(0).obj += Math.abs(alpha[k] / C);
if (alpha[k] != 0.0)
{
numNonZero++;
}
}
}
else
{
numNonZero = featureIndex.size();
for (int k = 0; k < featureIndex.size(); k++)
{
threads.get(0).obj += (alpha[k] * alpha[k] / (2.0 * C));
threads.get(0).expected[k] += alpha[k] / C;
}
}
for (int i = 1; i < threadNum; i++)
{
// try to free some memory
threads.get(i).expected = null;
}
double diff = (itr == 0 ? 1.0 : Math.abs(oldObj - threads.get(0).obj) / oldObj);
StringBuilder b = new StringBuilder();
b.append("iter=").append(itr);
b.append(" terr=").append(1.0 * threads.get(0).err / all);
b.append(" serr=").append(1.0 * threads.get(0).zeroone / x.size());
b.append(" act=").append(numNonZero);
b.append(" obj=").append(threads.get(0).obj);
b.append(" diff=").append(diff);
System.out.println(b.toString());
oldObj = threads.get(0).obj;
if (diff < eta)
{
converge++;
}
else
{
converge = 0;
}
if (itr > maxItr || converge == 3)
{
break;
}
int ret = lbfgs.optimize(featureIndex.size(), alpha, threads.get(0).obj, threads.get(0).expected, orthant, C);
if (ret <= 0)
{
return false;
}
}
executor.shutdown();
try
{
executor.awaitTermination(-1, TimeUnit.SECONDS);
}
catch (Exception e)
{
e.printStackTrace();
System.err.println("fail waiting executor to shutdown");
}
return true;
} | CRF训练
@param x 句子列表
@param featureIndex 特征编号表
@param alpha 特征函数的代价
@param maxItr 最大迭代次数
@param C cost factor
@param eta 收敛阈值
@param shrinkingSize 未使用
@param threadNum 线程数
@param orthant 是否使用L1范数
@return 是否成功 |
public List<Intervalable> removeOverlaps(List<Intervalable> intervals)
{
// 排序,按照先大小后左端点的顺序
Collections.sort(intervals, new IntervalableComparatorBySize());
Set<Intervalable> removeIntervals = new TreeSet<Intervalable>();
for (Intervalable interval : intervals)
{
// 如果区间已经被移除了,就忽略它
if (removeIntervals.contains(interval))
{
continue;
}
// 否则就移除它
removeIntervals.addAll(findOverlaps(interval));
}
// 移除所有的重叠区间
for (Intervalable removeInterval : removeIntervals)
{
intervals.remove(removeInterval);
}
// 排序,按照左端顺序
Collections.sort(intervals, new IntervalableComparatorByPosition());
return intervals;
} | 从区间列表中移除重叠的区间
@param intervals
@return |
public static Word compile(Word word)
{
word.value = PosTagCompiler.compile(word.label, word.value);
// switch (word.label)
// {
// case "ns":
// case "nsf":
// {
// word.value = Predefine.TAG_PLACE;
// }
// break;
//// case "nz":
// case "nx":
// {
// word.value = Predefine.TAG_PROPER;
// }
// break;
// case "nt":
// case "ntc":
// case "ntcf":
// case "ntcb":
// case "ntch":
// case "nto":
// case "ntu":
// case "nts":
// case "nth":
// {
// word.value = Predefine.TAG_GROUP;
// }
// break;
// case "m":
// case "mq":
// {
// word.value = Predefine.TAG_NUMBER;
// }
// break;
// case "x":
// {
// word.value = Predefine.TAG_CLUSTER;
// }
// break;
// case "xx":
// {
// word.value = Predefine.TAG_OTHER;
// }
// break;
// case "t":
// {
// word.value = Predefine.TAG_TIME;
// }
// break;
// case "nr":
// case "nrf":
// {
// word.value = Predefine.TAG_PEOPLE;
// }
// break;
// }
return word;
} | 将一个单词编译为等效字串
@param word
@return |
public static void compileWithoutNS(List<IWord> wordList)
{
for (IWord word : wordList)
{
if (word.getLabel().startsWith("ns")) continue;
word.setValue(PosTagCompiler.compile(word.getLabel(), word.getValue()));
// switch (word.getLabel())
// {
// case "nx":
// {
// word.setValue(Predefine.TAG_PROPER);
// }
// break;
// case "nt":
// case "ntc":
// case "ntcf":
// case "ntcb":
// case "ntch":
// case "nto":
// case "ntu":
// case "nts":
// case "nth":
// {
// word.setValue(Predefine.TAG_GROUP);
// }
// break;
// case "m":
// case "mq":
// {
// word.setValue(Predefine.TAG_NUMBER);
// }
// break;
// case "x":
// {
// word.setValue(Predefine.TAG_CLUSTER);
// }
// break;
// case "xx":
// {
// word.setValue(Predefine.TAG_OTHER);
// }
// break;
// case "t":
// {
// word.setValue(Predefine.TAG_TIME);
// }
// break;
// case "nr":
// {
// word.setValue(Predefine.TAG_PEOPLE);
// }
// break;
// }
}
} | 在忽略ns的前提下预编译
@param wordList |
public ArrayList<Pair<String, V>> commonPrefixSearch(String key, int offset, int maxResults)
{
byte[] keyBytes = key.getBytes(utf8);
List<Pair<Integer, Integer>> pairList = commonPrefixSearch(keyBytes, offset, maxResults);
ArrayList<Pair<String, V>> resultList = new ArrayList<Pair<String, V>>(pairList.size());
for (Pair<Integer, Integer> pair : pairList)
{
resultList.add(new Pair<String, V>(new String(keyBytes, 0, pair.first), valueArray[pair.second]));
}
return resultList;
} | 前缀查询
@param key
@param offset
@param maxResults
@return |
public void calcExpectation(double[] expected, double Z, int size)
{
double c = Math.exp(alpha + beta - cost - Z);
for (int i = 0; fVector.get(i) != -1; i++)
{
int idx = fVector.get(i) + y;
expected[idx] += c;
}
for (Path p : lpath)
{
p.calcExpectation(expected, Z, size);
}
} | 计算节点期望
@param expected 输出期望
@param Z 规范化因子
@param size 标签个数 |
public List<List<Vertex>> biSegment(char[] sSentence, int nKind, WordNet wordNetOptimum, WordNet wordNetAll)
{
List<List<Vertex>> coarseResult = new LinkedList<List<Vertex>>();
////////////////生成词网////////////////////
generateWordNet(wordNetAll);
// logger.trace("词网大小:" + wordNetAll.size());
// logger.trace("打印词网:\n" + wordNetAll);
///////////////生成词图////////////////////
Graph graph = generateBiGraph(wordNetAll);
// logger.trace(graph.toString());
if (HanLP.Config.DEBUG)
{
System.out.printf("打印词图:%s\n", graph.printByTo());
}
///////////////N-最短路径////////////////////
NShortPath nShortPath = new NShortPath(graph, nKind);
List<int[]> spResult = nShortPath.getNPaths(nKind * 2);
if (spResult.size() == 0)
{
throw new RuntimeException(nKind + "-最短路径求解失败,请检查上面的词网是否存在负圈或悬孤节点");
}
// logger.trace(nKind + "-最短路径");
// for (int[] path : spResult)
// {
// logger.trace(Graph.parseResult(graph.parsePath(path)));
// }
//////////////日期、数字合并策略
for (int[] path : spResult)
{
List<Vertex> vertexes = graph.parsePath(path);
generateWord(vertexes, wordNetOptimum);
coarseResult.add(vertexes);
}
return coarseResult;
} | 二元语言模型分词
@param sSentence 待分词的句子
@param nKind 需要几个结果
@param wordNetOptimum
@param wordNetAll
@return 一系列粗分结果 |
public static int ed(String wrongWord, String rightWord)
{
final int m = wrongWord.length();
final int n = rightWord.length();
int[][] d = new int[m + 1][n + 1];
for (int j = 0; j <= n; ++j)
{
d[0][j] = j;
}
for (int i = 0; i <= m; ++i)
{
d[i][0] = i;
}
for (int i = 1; i <= m; ++i)
{
char ci = wrongWord.charAt(i - 1);
for (int j = 1; j <= n; ++j)
{
char cj = rightWord.charAt(j - 1);
if (ci == cj)
{
d[i][j] = d[i - 1][j - 1];
}
else if (i > 1 && j > 1 && ci == rightWord.charAt(j - 2) && cj == wrongWord.charAt(i - 2))
{
// 交错相等
d[i][j] = 1 + Math.min(d[i - 2][j - 2], Math.min(d[i][j - 1], d[i - 1][j]));
}
else
{
// 等号右边的分别代表 将ci改成cj 错串加cj 错串删ci
d[i][j] = Math.min(d[i - 1][j - 1] + 1, Math.min(d[i][j - 1] + 1, d[i - 1][j] + 1));
}
}
}
return d[m][n];
} | 编辑距离
@param wrongWord 串A,其实它们两个调换位置还是一样的
@param rightWord 串B
@return 它们之间的距离 |
public static int compute(char[] wrongWord, char[] rightWord)
{
final int m = wrongWord.length;
final int n = rightWord.length;
int[][] d = new int[m + 1][n + 1];
for (int j = 0; j <= n; ++j)
{
d[0][j] = j;
}
for (int i = 0; i <= m; ++i)
{
d[i][0] = i;
}
for (int i = 1; i <= m; ++i)
{
char ci = wrongWord[i - 1];
for (int j = 1; j <= n; ++j)
{
char cj = rightWord[j - 1];
if (ci == cj)
{
d[i][j] = d[i - 1][j - 1];
}
else if (i > 1 && j > 1 && ci == rightWord[j - 2] && cj == wrongWord[i - 2])
{
// 交错相等
d[i][j] = 1 + Math.min(d[i - 2][j - 2], Math.min(d[i][j - 1], d[i - 1][j]));
}
else
{
// 等号右边的分别代表 将ci改成cj 错串加cj 错串删ci
d[i][j] = Math.min(d[i - 1][j - 1] + 1, Math.min(d[i][j - 1] + 1, d[i - 1][j] + 1));
}
}
}
return d[m][n];
} | 编辑距离
@param wrongWord 串A,其实它们两个调换位置还是一样的
@param rightWord 串B
@return 它们之间的距离 |
public static List<String> toSentenceList(String content, boolean shortest)
{
return toSentenceList(content.toCharArray(), shortest);
} | 文本分句
@param content 文本
@param shortest 是否切割为最细的单位(将逗号也视作分隔符)
@return |
public static boolean hasNature(List<Term> sentence, Nature nature)
{
for (Term term : sentence)
{
if (term.nature == nature)
{
return true;
}
}
return false;
} | 句子中是否含有词性
@param sentence
@param nature
@return |
public static boolean containsKey(String key, int length)
{
if (!trie.containsKey(key)) return false;
return key.length() >= length;
} | 时报包含key,且key至少长length
@param key
@param length
@return |
public static String preprocess(String text)
{
return text.replaceAll("\\p{P}", " ").replaceAll("\\s+", " ").toLowerCase(Locale.getDefault());
} | 预处理,去除标点,空格和停用词
@param text
@return |
public static String[] extractKeywords(String text)
{
List<Term> termList = NotionalTokenizer.segment(text);
String[] wordArray = new String[termList.size()];
Iterator<Term> iterator = termList.iterator();
for (int i = 0; i < wordArray.length; i++)
{
wordArray[i] = iterator.next().word;
}
return wordArray;
} | 提取关键词,在真实的应用场景中,还应该涉及到短语
@param text
@return |
public static Map<String, Integer> getKeywordCounts(String[] keywordArray)
{
Map<String, Integer> counts = new HashMap<String, Integer>();
Integer counter;
for (int i = 0; i < keywordArray.length; ++i)
{
counter = counts.get(keywordArray[i]);
if (counter == null)
{
counter = 0;
}
counts.put(keywordArray[i], ++counter); //增加词频
}
return counts;
} | 统计每个词的词频
@param keywordArray
@return |
public static Map<String, String[]> loadCorpus(String path)
{
Map<String, String[]> dataSet = new TreeMap<String, String[]>();
File root = new File(path);
File[] folders = root.listFiles();
if (folders == null) return null;
for (File folder : folders)
{
if (folder.isFile()) continue;
File[] files = folder.listFiles();
if (files == null) continue;
String[] documents = new String[files.length];
for (int i = 0; i < files.length; i++)
{
documents[i] = IOUtil.readTxt(files[i].getAbsolutePath());
}
dataSet.put(folder.getName(), documents);
}
return dataSet;
} | 加载一个文件夹下的所有语料
@param path
@return |
public static Map<String, String[]> loadCorpusWithException(String folderPath, String charsetName) throws IOException
{
if (folderPath == null) throw new IllegalArgumentException("参数 folderPath == null");
File root = new File(folderPath);
if (!root.exists()) throw new IllegalArgumentException(String.format("目录 %s 不存在", root.getAbsolutePath()));
if (!root.isDirectory())
throw new IllegalArgumentException(String.format("目录 %s 不是一个目录", root.getAbsolutePath()));
Map<String, String[]> dataSet = new TreeMap<String, String[]>();
File[] folders = root.listFiles();
if (folders == null) return null;
for (File folder : folders)
{
if (folder.isFile()) continue;
File[] files = folder.listFiles();
if (files == null) continue;
String[] documents = new String[files.length];
for (int i = 0; i < files.length; i++)
{
documents[i] = readTxt(files[i], charsetName);
}
dataSet.put(folder.getName(), documents);
}
return dataSet;
} | 加载一个文件夹下的所有语料
@param folderPath
@return |
public static boolean sortDictionary(String path)
{
try
{
BufferedReader br = new BufferedReader(new InputStreamReader(IOUtil.newInputStream(path), "UTF-8"));
TreeMap<String, String> map = new TreeMap<String, String>();
String line;
while ((line = br.readLine()) != null)
{
String[] param = line.split("\\s");
map.put(param[0], line);
}
br.close();
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(IOUtil.newOutputStream(path)));
for (Map.Entry<String, String> entry : map.entrySet())
{
bw.write(entry.getValue());
bw.newLine();
}
bw.close();
}
catch (Exception e)
{
e.printStackTrace();
return false;
}
return true;
} | 给某个字典排序
@param path
@return |
public StringDictionary reverse()
{
StringDictionary dictionary = new StringDictionary(separator);
for (Map.Entry<String, String> entry : entrySet())
{
dictionary.trie.put(entry.getValue(), entry.getKey());
}
return dictionary;
} | 将自己逆转过来返回
@return |
public SimpleMDAGNode transition(SimpleMDAGNode[] mdagDataArray, char letter)
{
SimpleMDAGNode targetNode = null;
int offset = binarySearch(mdagDataArray, letter);
if (offset >= 0)
{
targetNode = mdagDataArray[offset];
}
/////
return targetNode;
} | Follows an outgoing _transition from this node.
@param mdagDataArray the array of SimpleMDAGNodes containing this node
@param letter the char representation of the desired _transition's label
@return the SimpleMDAGNode that is the target of the _transition labeled with {@code letter},
or null if there is no such labeled _transition from this node |
private int binarySearch(SimpleMDAGNode[] mdagDataArray, char node)
{
if (transitionSetSize < 1)
{
return -1;
}
int high = transitionSetBeginIndex + transitionSetSize - 1;
int low = transitionSetBeginIndex;
while (low <= high)
{
int mid = ((low + high) >>> 1);
int cmp = mdagDataArray[mid].getLetter() - node;
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid;
}
return -1;
} | 二分搜索
@param mdagDataArray
@param node
@return |
public static SimpleMDAGNode traverseMDAG(SimpleMDAGNode[] mdagDataArray, SimpleMDAGNode sourceNode, String str)
{
// char firstLetter = str.charAt(0);
//Loop through the SimpleMDAGNodes in the processing MDAG's source node's _transition set,
//searching for the the one with a letter (char) equal to the first char of str.
//We can use that target node to _transition through the MDAG with the rest of the string
return sourceNode.transition(mdagDataArray, str.toCharArray());
// for(int i = 0; i < sourceNode.transitionSetSize; i++)
// {
// if(mdagDataArray[i].getLetter() == firstLetter)
// return mdagDataArray[i]._transition(mdagDataArray, str.substring(1));
// }
// /////
//
// return null;
} | Follows a _transition path starting from the source node of a MDAG.
@param mdagDataArray the array containing the data of the MDAG to be traversed
@param sourceNode the dummy SimpleMDAGNode which functions as the source of the MDAG data in {@code mdagDataArray}
@param str a String corresponding to a _transition path in the to-be-traversed MDAG
@return the SimpleMDAGNode at the end of the _transition path corresponding to
{@code str}, or null if such a _transition path is not present in the MDAG |
public static String compile(String tag, String name)
{
if (tag.startsWith("m")) return Predefine.TAG_NUMBER;
else if (tag.startsWith("nr")) return Predefine.TAG_PEOPLE;
else if (tag.startsWith("ns")) return Predefine.TAG_PLACE;
else if (tag.startsWith("nt")) return Predefine.TAG_GROUP;
else if (tag.startsWith("t")) return Predefine.TAG_TIME;
else if (tag.equals("x")) return Predefine.TAG_CLUSTER;
else if (tag.equals("nx")) return Predefine.TAG_PROPER;
else if (tag.equals("xx")) return Predefine.TAG_OTHER;
// switch (tag)
// {
// case "m":
// case "mq":
// return Predefine.TAG_NUMBER;
// case "nr":
// case "nr1":
// case "nr2":
// case "nrf":
// case "nrj":
// return Predefine.TAG_PEOPLE;
// case "ns":
// case "nsf":
// return Predefine.TAG_PLACE;
// case "nt":
// return Predefine.TAG_TIME;
// case "x":
// return Predefine.TAG_CLUSTER;
// case "nx":
// return Predefine.TAG_PROPER;
// }
return name;
} | 编译,比如将词性为数词的转为##数##
@param tag 标签
@param name 原词
@return 编译后的等效词 |
protected static void addFeatureThenClear(StringBuilder rawFeature, List<Integer> featureVector, FeatureMap featureMap)
{
int id = featureMap.idOf(rawFeature.toString());
if (id != -1)
{
featureVector.add(id);
}
rawFeature.setLength(0);
} | 添加特征,同时清空缓存
@param rawFeature
@param featureVector
@param featureMap |
public String[] tags(TagSet tagSet)
{
assert tagArray != null;
String[] tags = new String[tagArray.length];
for (int i = 0; i < tags.length; i++)
{
tags[i] = tagSet.stringOf(tagArray[i]);
}
return tags;
} | 根据标注集还原字符形式的标签
@param tagSet
@return |
private boolean openTemplate(String filename)
{
InputStreamReader isr = null;
try
{
isr = new InputStreamReader(IOUtil.newInputStream(filename), "UTF-8");
BufferedReader br = new BufferedReader(isr);
String line;
while ((line = br.readLine()) != null)
{
if (line.length() == 0 || line.charAt(0) == ' ' || line.charAt(0) == '#')
{
continue;
}
else if (line.charAt(0) == 'U')
{
unigramTempls_.add(line.trim());
}
else if (line.charAt(0) == 'B')
{
bigramTempls_.add(line.trim());
}
else
{
System.err.println("unknown type: " + line);
}
}
br.close();
templs_ = makeTempls(unigramTempls_, bigramTempls_);
}
catch (Exception e)
{
if (isr != null)
{
try
{
isr.close();
}
catch (Exception e2)
{
}
}
e.printStackTrace();
System.err.println("Error reading " + filename);
return false;
}
return true;
} | 读取特征模板文件
@param filename
@return |
private boolean openTagSet(String filename)
{
int max_size = 0;
InputStreamReader isr = null;
y_.clear();
try
{
isr = new InputStreamReader(IOUtil.newInputStream(filename), "UTF-8");
BufferedReader br = new BufferedReader(isr);
String line;
while ((line = br.readLine()) != null)
{
if (line.length() == 0)
{
continue;
}
char firstChar = line.charAt(0);
if (firstChar == '\0' || firstChar == ' ' || firstChar == '\t')
{
continue;
}
String[] cols = line.split("[\t ]", -1);
if (max_size == 0)
{
max_size = cols.length;
}
if (max_size != cols.length)
{
String msg = "inconsistent column size: " + max_size +
" " + cols.length + " " + filename;
throw new RuntimeException(msg);
}
xsize_ = cols.length - 1;
if (y_.indexOf(cols[max_size - 1]) == -1)
{
y_.add(cols[max_size - 1]);
}
}
Collections.sort(y_);
br.close();
}
catch (Exception e)
{
if (isr != null)
{
try
{
isr.close();
}
catch (Exception e2)
{
}
}
e.printStackTrace();
System.err.println("Error reading " + filename);
return false;
}
return true;
} | 读取训练文件中的标注集
@param filename
@return |
public void calcExpectation(double[] expected, double Z, int size)
{
double c = Math.exp(lnode.alpha + cost + rnode.beta - Z);
for (int i = 0; fvector.get(i) != -1; i++)
{
int idx = fvector.get(i) + lnode.y * size + rnode.y;
expected[idx] += c;
}
} | 计算边的期望
@param expected 输出期望
@param Z 规范化因子
@param size 标签个数 |
private String compileRealWord(String realWord, CoreDictionary.Attribute attribute)
{
if (attribute.nature.length == 1)
{
Nature nature = attribute.nature[0];
if (nature.startsWith("nr"))
{
wordID = CoreDictionary.NR_WORD_ID;
// this.attribute = CoreDictionary.get(CoreDictionary.NR_WORD_ID);
return Predefine.TAG_PEOPLE;
}
else if (nature.startsWith("ns"))
{
wordID = CoreDictionary.NS_WORD_ID;
// 在地名识别的时候,希望类似"河镇"的词语保持自己的词性,而不是未##地的词性
// this.attribute = CoreDictionary.get(CoreDictionary.NS_WORD_ID);
return Predefine.TAG_PLACE;
}
// case nz:
else if (nature == Nature.nx)
{
wordID = CoreDictionary.NX_WORD_ID;
if (wordID == -1)
wordID = CoreDictionary.X_WORD_ID;
// this.attribute = CoreDictionary.get(wordID);
return Predefine.TAG_PROPER;
}
else if (nature.startsWith("nt") || nature == Nature.nit)
{
wordID = CoreDictionary.NT_WORD_ID;
// this.attribute = CoreDictionary.get(CoreDictionary.NT_WORD_ID);
return Predefine.TAG_GROUP;
}
else if (nature.startsWith('m'))
{
wordID = CoreDictionary.M_WORD_ID;
this.attribute = CoreDictionary.get(CoreDictionary.M_WORD_ID);
return Predefine.TAG_NUMBER;
}
else if (nature.startsWith('x'))
{
wordID = CoreDictionary.X_WORD_ID;
this.attribute = CoreDictionary.get(CoreDictionary.X_WORD_ID);
return Predefine.TAG_CLUSTER;
}
// case xx:
// case w:
// {
// word= Predefine.TAG_OTHER;
// }
// break;
else if (nature == Nature.t)
{
wordID = CoreDictionary.T_WORD_ID;
this.attribute = CoreDictionary.get(CoreDictionary.T_WORD_ID);
return Predefine.TAG_TIME;
}
}
return realWord;
} | 将原词转为等效词串
@param realWord 原来的词
@param attribute 等效词串
@return |
public boolean confirmNature(Nature nature)
{
if (attribute.nature.length == 1 && attribute.nature[0] == nature)
{
return true;
}
boolean result = true;
int frequency = attribute.getNatureFrequency(nature);
if (frequency == 0)
{
frequency = 1000;
result = false;
}
attribute = new CoreDictionary.Attribute(nature, frequency);
return result;
} | 将属性的词性锁定为nature
@param nature 词性
@return 如果锁定词性在词性列表中,返回真,否则返回假 |
public boolean confirmNature(Nature nature, boolean updateWord)
{
switch (nature.firstChar())
{
case 'm':
word = Predefine.TAG_NUMBER;
break;
case 't':
word = Predefine.TAG_TIME;
break;
default:
logger.warning("没有与" + nature + "对应的case");
break;
}
return confirmNature(nature);
} | 将属性的词性锁定为nature,此重载会降低性能
@param nature 词性
@param updateWord 是否更新预编译字串
@return 如果锁定词性在词性列表中,返回真,否则返回假 |
public static Vertex newNumberInstance(String realWord)
{
return new Vertex(Predefine.TAG_NUMBER, realWord, new CoreDictionary.Attribute(Nature.m, 1000));
} | 创建一个数词实例
@param realWord 数字对应的真实字串
@return 数词顶点 |
public static Vertex newAddressInstance(String realWord)
{
return new Vertex(Predefine.TAG_PLACE, realWord, new CoreDictionary.Attribute(Nature.ns, 1000));
} | 创建一个地名实例
@param realWord 数字对应的真实字串
@return 地名顶点 |
public static Vertex newPunctuationInstance(String realWord)
{
return new Vertex(realWord, new CoreDictionary.Attribute(Nature.w, 1000));
} | 创建一个标点符号实例
@param realWord 标点符号对应的真实字串
@return 标点符号顶点 |
public static Vertex newTranslatedPersonInstance(String realWord, int frequency)
{
return new Vertex(Predefine.TAG_PEOPLE, realWord, new CoreDictionary.Attribute(Nature.nrf, frequency));
} | 创建一个音译人名实例
@param realWord
@return |
public static Vertex newJapanesePersonInstance(String realWord, int frequency)
{
return new Vertex(Predefine.TAG_PEOPLE, realWord, new CoreDictionary.Attribute(Nature.nrj, frequency));
} | 创建一个日本人名实例
@param realWord
@return |
public static Vertex newPersonInstance(String realWord, int frequency)
{
return new Vertex(Predefine.TAG_PEOPLE, realWord, new CoreDictionary.Attribute(Nature.nr, frequency));
} | 创建一个人名实例
@param realWord
@param frequency
@return |
public static Vertex newPlaceInstance(String realWord, int frequency)
{
return new Vertex(Predefine.TAG_PLACE, realWord, new CoreDictionary.Attribute(Nature.ns, frequency));
} | 创建一个地名实例
@param realWord
@param frequency
@return |
public static Vertex newOrganizationInstance(String realWord, int frequency)
{
return new Vertex(Predefine.TAG_GROUP, realWord, new CoreDictionary.Attribute(Nature.nt, frequency));
} | 创建一个机构名实例
@param realWord
@param frequency
@return |
public static Vertex newTimeInstance(String realWord)
{
return new Vertex(Predefine.TAG_TIME, realWord, new CoreDictionary.Attribute(Nature.t, 1000));
} | 创建一个时间实例
@param realWord 时间对应的真实字串
@return 时间顶点 |
public static Vertex newB()
{
return new Vertex(Predefine.TAG_BIGIN, " ", new CoreDictionary.Attribute(Nature.begin, Predefine.MAX_FREQUENCY / 10), CoreDictionary.getWordID(Predefine.TAG_BIGIN));
} | 生成线程安全的起始节点
@return |
public static Vertex newE()
{
return new Vertex(Predefine.TAG_END, " ", new CoreDictionary.Attribute(Nature.end, Predefine.MAX_FREQUENCY / 10), CoreDictionary.getWordID(Predefine.TAG_END));
} | 生成线程安全的终止节点
@return |
public static void load(String coreStopWordDictionaryPath, boolean loadCacheIfPossible)
{
ByteArray byteArray = loadCacheIfPossible ? ByteArray.createByteArray(coreStopWordDictionaryPath + Predefine.BIN_EXT) : null;
if (byteArray == null)
{
try
{
dictionary = new StopWordDictionary(HanLP.Config.CoreStopWordDictionaryPath);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(IOUtil.newOutputStream(HanLP.Config.CoreStopWordDictionaryPath + Predefine.BIN_EXT)));
dictionary.save(out);
out.close();
}
catch (Exception e)
{
logger.severe("载入停用词词典" + HanLP.Config.CoreStopWordDictionaryPath + "失败" + TextUtility.exceptionToString(e));
throw new RuntimeException("载入停用词词典" + HanLP.Config.CoreStopWordDictionaryPath + "失败");
}
}
else
{
dictionary = new StopWordDictionary();
dictionary.load(byteArray);
}
} | 加载另一部停用词词典
@param coreStopWordDictionaryPath 词典路径
@param loadCacheIfPossible 是否优先加载缓存(速度更快) |
public static void apply(List<Term> termList)
{
ListIterator<Term> listIterator = termList.listIterator();
while (listIterator.hasNext())
{
if (shouldRemove(listIterator.next())) listIterator.remove();
}
} | 对分词结果应用过滤
@param termList |
@Override
protected void ensureAvailableBytes(int size)
{
if (offset + size > bufferSize)
{
try
{
int availableBytes = (int) (fileChannel.size() - fileChannel.position());
ByteBuffer byteBuffer = ByteBuffer.allocate(Math.min(availableBytes, offset));
int readBytes = fileChannel.read(byteBuffer);
if (readBytes == availableBytes)
{
fileChannel.close();
fileChannel = null;
}
assert readBytes > 0 : "已到达文件尾部!";
byteBuffer.flip();
byte[] bytes = byteBuffer.array();
System.arraycopy(this.bytes, offset, this.bytes, offset - readBytes, bufferSize - offset);
System.arraycopy(bytes, 0, this.bytes, bufferSize - readBytes, readBytes);
offset -= readBytes;
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
} | 确保buffer数组余有size个字节
@param size |
public static String convertToPinyinString(String text, String separator, boolean remainNone)
{
List<Pinyin> pinyinList = PinyinDictionary.convertToPinyin(text, true);
int length = pinyinList.size();
StringBuilder sb = new StringBuilder(length * (5 + separator.length()));
int i = 1;
for (Pinyin pinyin : pinyinList)
{
if (pinyin == Pinyin.none5 && !remainNone)
{
sb.append(text.charAt(i - 1));
}
else sb.append(pinyin.getPinyinWithoutTone());
if (i < length)
{
sb.append(separator);
}
++i;
}
return sb.toString();
} | 转化为拼音
@param text 文本
@param separator 分隔符
@param remainNone 有些字没有拼音(如标点),是否保留它们的拼音(true用none表示,false用原字符表示)
@return 一个字符串,由[拼音][分隔符][拼音]构成 |
public static String convertToPinyinFirstCharString(String text, String separator, boolean remainNone)
{
List<Pinyin> pinyinList = PinyinDictionary.convertToPinyin(text, remainNone);
int length = pinyinList.size();
StringBuilder sb = new StringBuilder(length * (1 + separator.length()));
int i = 1;
for (Pinyin pinyin : pinyinList)
{
sb.append(pinyin.getFirstChar());
if (i < length)
{
sb.append(separator);
}
++i;
}
return sb.toString();
} | 转化为拼音(首字母)
@param text 文本
@param separator 分隔符
@param remainNone 有些字没有拼音(如标点),是否保留它们(用none表示)
@return 一个字符串,由[首字母][分隔符][首字母]构成 |
public static Segment newSegment(String algorithm)
{
if (algorithm == null)
{
throw new IllegalArgumentException(String.format("非法参数 algorithm == %s", algorithm));
}
algorithm = algorithm.toLowerCase();
if ("viterbi".equals(algorithm) || "维特比".equals(algorithm))
return new ViterbiSegment(); // Viterbi分词器是目前效率和效果的最佳平衡
else if ("dat".equals(algorithm) || "双数组trie树".equals(algorithm))
return new DoubleArrayTrieSegment();
else if ("nshort".equals(algorithm) || "n最短路".equals(algorithm))
return new NShortSegment();
else if ("crf".equals(algorithm) || "条件随机场".equals(algorithm))
try
{
return new CRFLexicalAnalyzer();
}
catch (IOException e)
{
logger.warning("CRF模型加载失败");
throw new RuntimeException(e);
}
else if ("perceptron".equals(algorithm) || "感知机".equals(algorithm))
{
try
{
return new PerceptronLexicalAnalyzer();
}
catch (IOException e)
{
logger.warning("感知机模型加载失败");
throw new RuntimeException(e);
}
}
throw new IllegalArgumentException(String.format("非法参数 algorithm == %s", algorithm));
} | 创建一个分词器,
这是一个工厂方法<br>
@param algorithm 分词算法,传入算法的中英文名都可以,可选列表:<br>
<ul>
<li>维特比 (viterbi):效率和效果的最佳平衡</li>
<li>双数组trie树 (dat):极速词典分词,千万字符每秒</li>
<li>条件随机场 (crf):分词、词性标注与命名实体识别精度都较高,适合要求较高的NLP任务</li>
<li>感知机 (perceptron):分词、词性标注与命名实体识别,支持在线学习</li>
<li>N最短路 (nshort):命名实体识别稍微好一些,牺牲了速度</li>
</ul>
@return 一个分词器 |
public static List<WordInfo> extractWords(String text, int size)
{
return extractWords(text, size, false);
} | 提取词语
@param text 大文本
@param size 需要提取词语的数量
@return 一个词语列表 |
public static List<WordInfo> extractWords(BufferedReader reader, int size) throws IOException
{
return extractWords(reader, size, false);
} | 提取词语
@param reader 从reader获取文本
@param size 需要提取词语的数量
@return 一个词语列表 |
public static List<WordInfo> extractWords(String text, int size, boolean newWordsOnly)
{
NewWordDiscover discover = new NewWordDiscover(4, 0.0f, .5f, 100f, newWordsOnly);
return discover.discover(text, size);
} | 提取词语(新词发现)
@param text 大文本
@param size 需要提取词语的数量
@param newWordsOnly 是否只提取词典中没有的词语
@return 一个词语列表 |
public static List<WordInfo> extractWords(BufferedReader reader, int size, boolean newWordsOnly) throws IOException
{
NewWordDiscover discover = new NewWordDiscover(4, 0.0f, .5f, 100f, newWordsOnly);
return discover.discover(reader, size);
} | 提取词语(新词发现)
@param reader 从reader获取文本
@param size 需要提取词语的数量
@param newWordsOnly 是否只提取词典中没有的词语
@return 一个词语列表 |
public static List<WordInfo> extractWords(BufferedReader reader, int size, boolean newWordsOnly, int max_word_len, float min_freq, float min_entropy, float min_aggregation) throws IOException
{
NewWordDiscover discover = new NewWordDiscover(max_word_len, min_freq, min_entropy, min_aggregation, newWordsOnly);
return discover.discover(reader, size);
} | 提取词语(新词发现)
@param reader 从reader获取文本
@param size 需要提取词语的数量
@param newWordsOnly 是否只提取词典中没有的词语
@param max_word_len 词语最长长度
@param min_freq 词语最低频率
@param min_entropy 词语最低熵
@param min_aggregation 词语最低互信息
@return 一个词语列表 |
public static List<String> extractKeyword(String document, int size)
{
return TextRankKeyword.getKeywordList(document, size);
} | 提取关键词
@param document 文档内容
@param size 希望提取几个关键词
@return 一个列表 |
public static List<String> extractSummary(String document, int size, String sentence_separator)
{
return TextRankSentence.getTopSentenceList(document, size, sentence_separator);
} | 自动摘要
@param document 目标文档
@param size 需要的关键句的个数
@param sentence_separator 分割目标文档时的句子分割符,正则格式, 如:[。??!!;;]
@return 关键句列表 |
public static String getSummary(String document, int max_length, String sentence_separator)
{
// Parameter size in this method refers to the string length of the summary required;
// The actual length of the summary generated may be short than the required length, but never longer;
return TextRankSentence.getSummary(document, max_length, sentence_separator);
} | 自动摘要
@param document 目标文档
@param max_length 需要摘要的长度
@param sentence_separator 分割目标文档时的句子分割符,正则格式, 如:[。??!!;;]
@return 摘要文本 |
public static String[][] spiltArray(String[] src, double rate)
{
assert 0 <= rate && rate <= 1;
String[][] output = new String[2][];
output[0] = new String[(int) (src.length * rate)];
output[1] = new String[src.length - output[0].length];
System.arraycopy(src, 0, output[0], 0, output[0].length);
System.arraycopy(src, output[0].length, output[1], 0, output[1].length);
return output;
} | 分割数组为两个数组
@param src 原数组
@param rate 第一个数组所占的比例
@return 两个数组 |
public static Map<String, String[]> splitMap(Map<String, String[]> src, double rate)
{
assert 0 <= rate && rate <= 1;
Map<String, String[]> output = new TreeMap<String, String[]>();
for (Map.Entry<String, String[]> entry : src.entrySet())
{
String[][] array = spiltArray(entry.getValue(), rate);
output.put(entry.getKey(), array[0]);
entry.setValue(array[1]);
}
return output;
} | 分割Map,其中旧map直接被改变
@param src
@param rate
@return |
static boolean load(String path)
{
if (loadDat(path)) return true;
// 从文本中载入并且尝试生成dat
StringDictionary dictionary = new StringDictionary("=");
if (!dictionary.load(path)) return false;
TreeMap<String, Pinyin[]> map = new TreeMap<String, Pinyin[]>();
for (Map.Entry<String, String> entry : dictionary.entrySet())
{
String[] args = entry.getValue().split(",");
Pinyin[] pinyinValue = new Pinyin[args.length];
for (int i = 0; i < pinyinValue.length; ++i)
{
try
{
Pinyin pinyin = Pinyin.valueOf(args[i]);
pinyinValue[i] = pinyin;
}
catch (IllegalArgumentException e)
{
logger.severe("读取拼音词典" + path + "失败,问题出在【" + entry + "】,异常是" + e);
return false;
}
}
map.put(entry.getKey(), pinyinValue);
}
trie.build(map);
logger.info("正在缓存双数组" + path);
saveDat(path, trie, map.entrySet());
return true;
} | 读取词典
@param path
@return |
protected static List<Pinyin> segLongest(char[] charArray, AhoCorasickDoubleArrayTrie<Pinyin[]> trie)
{
return segLongest(charArray, trie, true);
} | 用最长分词算法匹配拼音
@param charArray
@param trie
@return |
public void combine(SimpleDictionary<V> other)
{
if (other.trie == null)
{
logger.warning("有个词典还没加载");
return;
}
for (Map.Entry<String, V> entry : other.trie.entrySet())
{
if (trie.containsKey(entry.getKey())) continue;
trie.put(entry.getKey(), entry.getValue());
}
} | 以我为主词典,合并一个副词典,我有的词条不会被副词典覆盖
@param other 副词典 |
public int remove(Filter filter)
{
int size = trie.size();
for (Map.Entry<String, V> entry : entrySet())
{
if (filter.remove(entry))
{
trie.remove(entry.getKey());
}
}
return size - trie.size();
} | 过滤部分词条
@param filter 过滤器
@return 删除了多少条 |
protected void posTag(char[] charArray, int[] wordNet, Nature[] natureArray)
{
if (config.speechTagging)
{
for (int i = 0; i < natureArray.length; )
{
if (natureArray[i] == null)
{
int j = i + 1;
for (; j < natureArray.length; ++j)
{
if (natureArray[j] != null) break;
}
List<AtomNode> atomNodeList = quickAtomSegment(charArray, i, j);
for (AtomNode atomNode : atomNodeList)
{
if (atomNode.sWord.length() >= wordNet[i])
{
wordNet[i] = atomNode.sWord.length();
natureArray[i] = atomNode.getNature();
i += wordNet[i];
}
}
i = j;
}
else
{
++i;
}
}
}
} | 词性标注
@param charArray 字符数组
@param wordNet 词语长度
@param natureArray 输出词性 |
static boolean saveDat(TreeMap<String, Character> map)
{
try
{
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(IOUtil.newOutputStream(path + Predefine.VALUE_EXT)));
out.writeInt(map.size());
for (Character character : map.values())
{
out.writeChar(character);
}
out.close();
}
catch (Exception e)
{
logger.warning("保存值" + path + Predefine.VALUE_EXT + "失败" + e);
return false;
}
return trie.save(path + Predefine.TRIE_EXT);
} | 保存dat到磁盘
@param map
@return |
public WordVectorModel train(String trainFileName, String modelFileName)
{
Config settings = new Config();
settings.setInputFile(trainFileName);
settings.setLayer1Size(layerSize);
settings.setUseContinuousBagOfWords(type == NeuralNetworkType.CBOW);
settings.setUseHierarchicalSoftmax(useHierarchicalSoftmax);
settings.setNegative(negativeSamples);
settings.setNumThreads(numThreads);
settings.setAlpha(initialLearningRate == null ? type.getDefaultInitialLearningRate() : initialLearningRate);
settings.setSample(downSampleRate);
settings.setWindow(windowSize);
settings.setIter(iterations);
settings.setMinCount(minFrequency);
settings.setOutputFile(modelFileName);
Word2VecTraining model = new Word2VecTraining(settings);
final long timeStart = System.currentTimeMillis();
// if (callback == null)
// {
// callback = new TrainingCallback()
// {
// public void corpusLoading(float percent)
// {
// System.out.printf("\r加载训练语料:%.2f%%", percent);
// }
//
// public void corpusLoaded(int vocWords, int trainWords, int totalWords)
// {
// System.out.println();
// System.out.printf("词表大小:%d\n", vocWords);
// System.out.printf("训练词数:%d\n", trainWords);
// System.out.printf("语料词数:%d\n", totalWords);
// }
//
// public void training(float alpha, float progress)
// {
// System.out.printf("\r学习率:%.6f 进度:%.2f%%", alpha, progress);
// long timeNow = System.currentTimeMillis();
// long costTime = timeNow - timeStart + 1;
// progress /= 100;
// String etd = Utility.humanTime((long) (costTime / progress * (1.f - progress)));
// if (etd.length() > 0) System.out.printf(" 剩余时间:%s", etd);
// System.out.flush();
// }
// };
// }
settings.setCallback(callback);
try
{
model.trainModel();
System.out.println();
System.out.printf("训练结束,一共耗时:%s\n", Utility.humanTime(System.currentTimeMillis() - timeStart));
return new WordVectorModel(modelFileName);
}
catch (IOException e)
{
logger.warning("训练过程中发生IO异常\n" + TextUtility.exceptionToString(e));
}
return null;
} | 执行训练
@param trainFileName 输入语料文件
@param modelFileName 输出模型路径
@return 词向量模型 |
public Map<Integer, Double> chi_square(BaseFeatureData stats)
{
Map<Integer, Double> selectedFeatures = new HashMap<Integer, Double>();
double N1dot, N0dot, N00, N01, N10, N11;
double chisquareScore;
Double previousScore;
for (int feature = 0; feature < stats.featureCategoryJointCount.length; feature++)
{
int[] categoryList = stats.featureCategoryJointCount[feature];
//计算 N1. (含有该特征的文档数量)
N1dot = 0;
for (int count : categoryList)
{
N1dot += count;
}
//还有 N0. (不含该特征的文档数量)
N0dot = stats.n - N1dot;
for (int category = 0; category < categoryList.length; category++)
{
N11 = categoryList[category]; //N11 是含有该特征并属于该类目的文档数量
N01 = stats.categoryCounts[category] - N11; //N01 是不含该特征却属于该类目的文档数量
N00 = N0dot - N01; //N00 是不含该特征也不属于该类目的文档数量
N10 = N1dot - N11; //N10 是含有该特征却不属于该类目的文档数量
//基于上述统计数据计算卡方分值
chisquareScore = stats.n * Math.pow(N11 * N00 - N10 * N01, 2) / ((N11 + N01) * (N11 + N10) * (N10 + N00) * (N01 + N00));
//如果分数大于临界值则加入特征列表
if (chisquareScore >= chisquareCriticalValue)
{
previousScore = selectedFeatures.get(feature);
if (previousScore == null || chisquareScore > previousScore)
{
selectedFeatures.put(feature, chisquareScore);
}
}
}
}
if (selectedFeatures.size() == 0) // 当特征全部无法通过卡方检测时,取全集作为特征
{
for (int feature = 0; feature < stats.featureCategoryJointCount.length; feature++)
{
selectedFeatures.put(feature, 0.);
}
}
if (selectedFeatures.size() > maxSize)
{
MaxHeap<Map.Entry<Integer, Double>> maxHeap = new MaxHeap<Map.Entry<Integer, Double>>(maxSize, new Comparator<Map.Entry<Integer, Double>>()
{
@Override
public int compare(Map.Entry<Integer, Double> o1, Map.Entry<Integer, Double> o2)
{
return o1.getValue().compareTo(o2.getValue());
}
});
for (Map.Entry<Integer, Double> entry : selectedFeatures.entrySet())
{
maxHeap.add(entry);
}
selectedFeatures.clear();
for (Map.Entry<Integer, Double> entry : maxHeap)
{
selectedFeatures.put(entry.getKey(), entry.getValue());
}
}
return selectedFeatures;
} | 使用卡方非参数校验来执行特征选择<br>
https://nlp.stanford.edu/IR-book/html/htmledition/feature-selectionchi2-feature-selection-1.html
@param stats
@return |
public static void shuffleArray(int[] ar)
{
Random rnd = new Random();
for (int i = ar.length - 1; i > 0; i--)
{
int index = rnd.nextInt(i + 1);
// Simple swap
int a = ar[index];
ar[index] = ar[i];
ar[i] = a;
}
} | Fisher–Yates shuffle
@param ar |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.