code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
public static <E extends Enum<E>> List<E> computeEnumSimply(List<EnumItem<E>> roleTagList, TransformMatrixDictionary<E> transformMatrixDictionary)
{
int length = roleTagList.size() - 1;
List<E> tagList = new LinkedList<E>();
Iterator<EnumItem<E>> iterator = roleTagList.iterator();
EnumItem<E> start = iterator.next();
E pre = start.labelMap.entrySet().iterator().next().getKey();
E perfect_tag = pre;
// 第一个是确定的
tagList.add(pre);
for (int i = 0; i < length; ++i)
{
double perfect_cost = Double.MAX_VALUE;
EnumItem<E> item = iterator.next();
for (E cur : item.labelMap.keySet())
{
double now = transformMatrixDictionary.transititon_probability[pre.ordinal()][cur.ordinal()] - Math.log((item.getFrequency(cur) + 1e-8) / transformMatrixDictionary.getTotalFrequency(cur));
if (perfect_cost > now)
{
perfect_cost = now;
perfect_tag = cur;
}
}
pre = perfect_tag;
tagList.add(pre);
}
return tagList;
} | 仅仅利用了转移矩阵的“维特比”算法
@param roleTagList 观测序列
@param transformMatrixDictionary 转移矩阵
@param <E> EnumItem的具体类型
@return 预测结果 |
public int[] getTopSentence(int size)
{
Collection<Integer> values = top.values();
size = Math.min(size, values.size());
int[] indexArray = new int[size];
Iterator<Integer> it = values.iterator();
for (int i = 0; i < size; ++i)
{
indexArray[i] = it.next();
}
return indexArray;
} | 获取前几个关键句子
@param size 要几个
@return 关键句子的下标 |
static List<String> splitSentence(String document, String sentence_separator)
{
List<String> sentences = new ArrayList<String>();
for (String line : document.split("[\r\n]"))
{
line = line.trim();
if (line.length() == 0) continue;
for (String sent : line.split(sentence_separator)) // [,,。::“”??!!;;]
{
sent = sent.trim();
if (sent.length() == 0) continue;
sentences.add(sent);
}
}
return sentences;
} | 将文章分割为句子
@param document 待分割的文档
@param sentence_separator 句子分隔符,正则表达式,如: [。:??!!;;]
@return |
private static List<List<String>> convertSentenceListToDocument(List<String> sentenceList)
{
List<List<String>> docs = new ArrayList<List<String>>(sentenceList.size());
for (String sentence : sentenceList)
{
List<Term> termList = StandardTokenizer.segment(sentence.toCharArray());
List<String> wordList = new LinkedList<String>();
for (Term term : termList)
{
if (CoreStopWordDictionary.shouldInclude(term))
{
wordList.add(term.word);
}
}
docs.add(wordList);
}
return docs;
} | 将句子列表转化为文档
@param sentenceList
@return |
public static List<String> getTopSentenceList(String document, int size)
{
return getTopSentenceList(document, size, default_sentence_separator);
} | 一句话调用接口
@param document 目标文档
@param size 需要的关键句的个数
@return 关键句列表 |
public static List<String> getTopSentenceList(String document, int size, String sentence_separator)
{
List<String> sentenceList = splitSentence(document, sentence_separator);
List<List<String>> docs = convertSentenceListToDocument(sentenceList);
TextRankSentence textRank = new TextRankSentence(docs);
int[] topSentence = textRank.getTopSentence(size);
List<String> resultList = new LinkedList<String>();
for (int i : topSentence)
{
resultList.add(sentenceList.get(i));
}
return resultList;
} | 一句话调用接口
@param document 目标文档
@param size 需要的关键句的个数
@param sentence_separator 句子分隔符,正则格式, 如:[。??!!;;]
@return 关键句列表 |
public static String getSummary(String document, int max_length, String sentence_separator)
{
List<String> sentenceList = splitSentence(document, sentence_separator);
int sentence_count = sentenceList.size();
int document_length = document.length();
int sentence_length_avg = document_length / sentence_count;
int size = max_length / sentence_length_avg + 1;
List<List<String>> docs = convertSentenceListToDocument(sentenceList);
TextRankSentence textRank = new TextRankSentence(docs);
int[] topSentence = textRank.getTopSentence(size);
List<String> resultList = new LinkedList<String>();
for (int i : topSentence)
{
resultList.add(sentenceList.get(i));
}
resultList = permutation(resultList, sentenceList);
resultList = pick_sentences(resultList, max_length);
return TextUtility.join("。", resultList);
} | 一句话调用接口
@param document 目标文档
@param max_length 需要摘要的长度
@param sentence_separator 句子分隔符,正则格式, 如:[。??!!;;]
@return 摘要文本 |
public static ByteArray createByteArray(String path)
{
byte[] bytes = IOUtil.readBytes(path);
if (bytes == null) return null;
return new ByteArray(bytes);
} | 从文件读取一个字节数组
@param path
@return |
public String nextString()
{
char[] buffer = new char[nextInt()];
for (int i = 0; i < buffer.length; ++i)
{
buffer[i] = nextChar();
}
return new String(buffer);
} | 读取一个String,注意这个String是双字节版的,在字符之前有一个整型表示长度
@return |
public String nextUTF()
{
int utflen = nextUnsignedShort();
byte[] bytearr = null;
char[] chararr = null;
bytearr = new byte[utflen];
chararr = new char[utflen];
int c, char2, char3;
int count = 0;
int chararr_count = 0;
for (int i = 0; i < utflen; ++i)
{
bytearr[i] = nextByte();
}
while (count < utflen)
{
c = (int) bytearr[count] & 0xff;
if (c > 127) break;
count++;
chararr[chararr_count++] = (char) c;
}
while (count < utflen)
{
c = (int) bytearr[count] & 0xff;
switch (c >> 4)
{
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
/* 0xxxxxxx*/
count++;
chararr[chararr_count++] = (char) c;
break;
case 12:
case 13:
/* 110x xxxx 10xx xxxx*/
count += 2;
if (count > utflen)
logger.severe(
"malformed input: partial character at end");
char2 = (int) bytearr[count - 1];
if ((char2 & 0xC0) != 0x80)
logger.severe(
"malformed input around byte " + count);
chararr[chararr_count++] = (char) (((c & 0x1F) << 6) |
(char2 & 0x3F));
break;
case 14:
/* 1110 xxxx 10xx xxxx 10xx xxxx */
count += 3;
if (count > utflen)
logger.severe(
"malformed input: partial character at end");
char2 = (int) bytearr[count - 2];
char3 = (int) bytearr[count - 1];
if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80))
logger.severe(
"malformed input around byte " + (count - 1));
chararr[chararr_count++] = (char) (((c & 0x0F) << 12) |
((char2 & 0x3F) << 6) |
((char3 & 0x3F) << 0));
break;
default:
/* 10xx xxxx, 1111 xxxx */
logger.severe(
"malformed input around byte " + count);
}
}
// The number of chars produced may be less than utflen
return new String(chararr, 0, chararr_count);
} | 读取一个UTF字符串
@return |
public static IndexMaps createIndices(String conllPath, boolean labeled, boolean lowercased, String clusterFile) throws IOException
{
HashMap<String, Integer> wordMap = new HashMap<String, Integer>();
HashMap<Integer, Integer> labels = new HashMap<Integer, Integer>();
HashMap<String, Integer> clusterMap = new HashMap<String, Integer>();
HashMap<Integer, Integer> cluster4Map = new HashMap<Integer, Integer>();
HashMap<Integer, Integer> cluster6Map = new HashMap<Integer, Integer>();
String rootString = "ROOT";
wordMap.put("ROOT", 0);
labels.put(0, 0);
// 所有label的id必须从零开始并且连续
BufferedReader reader = new BufferedReader(new FileReader(conllPath));
String line;
while ((line = reader.readLine()) != null)
{
String[] args = line.trim().split("\t");
if (args.length > 7)
{
String label = args[7];
int head = Integer.parseInt(args[6]);
if (head == 0)
rootString = label;
if (!labeled)
label = "~";
else if (label.equals("_"))
label = "-";
if (!wordMap.containsKey(label))
{
labels.put(wordMap.size(), labels.size());
wordMap.put(label, wordMap.size());
}
}
}
reader = new BufferedReader(new FileReader(conllPath));
while ((line = reader.readLine()) != null)
{
String[] cells = line.trim().split("\t");
if (cells.length > 7)
{
String pos = cells[3];
if (!wordMap.containsKey(pos))
{
wordMap.put(pos, wordMap.size());
}
}
}
if (clusterFile.length() > 0)
{
reader = new BufferedReader(new FileReader(clusterFile));
while ((line = reader.readLine()) != null)
{
String[] cells = line.trim().split("\t");
if (cells.length > 2)
{
String cluster = cells[0];
String word = cells[1];
String prefix4 = cluster.substring(0, Math.min(4, cluster.length()));
String prefix6 = cluster.substring(0, Math.min(6, cluster.length()));
int clusterId = wordMap.size();
if (!wordMap.containsKey(cluster))
{
clusterMap.put(word, wordMap.size());
wordMap.put(cluster, wordMap.size());
}
else
{
clusterId = wordMap.get(cluster);
clusterMap.put(word, clusterId);
}
int pref4Id = wordMap.size();
if (!wordMap.containsKey(prefix4))
{
wordMap.put(prefix4, wordMap.size());
}
else
{
pref4Id = wordMap.get(prefix4);
}
int pref6Id = wordMap.size();
if (!wordMap.containsKey(prefix6))
{
wordMap.put(prefix6, wordMap.size());
}
else
{
pref6Id = wordMap.get(prefix6);
}
cluster4Map.put(clusterId, pref4Id);
cluster6Map.put(clusterId, pref6Id);
}
}
}
reader = new BufferedReader(new FileReader(conllPath));
while ((line = reader.readLine()) != null)
{
String[] cells = line.trim().split("\t");
if (cells.length > 7)
{
String word = cells[1];
if (lowercased)
word = word.toLowerCase();
if (!wordMap.containsKey(word))
{
wordMap.put(word, wordMap.size());
}
}
}
return new IndexMaps(wordMap, labels, rootString, cluster4Map, cluster6Map, clusterMap);
} | 读取CoNLL文件,创建索引
@param conllPath
@param labeled
@param lowercased
@param clusterFile
@return
@throws Exception |
public ArrayList<Instance> readData(int limit, boolean keepNonProjective, boolean labeled, boolean rootFirst, boolean lowerCased, IndexMaps maps) throws IOException
{
HashMap<String, Integer> wordMap = maps.getWordId();
ArrayList<Instance> instanceList = new ArrayList<Instance>();
String line;
ArrayList<Integer> tokens = new ArrayList<Integer>();
ArrayList<Integer> tags = new ArrayList<Integer>();
ArrayList<Integer> cluster4Ids = new ArrayList<Integer>();
ArrayList<Integer> cluster6Ids = new ArrayList<Integer>();
ArrayList<Integer> clusterIds = new ArrayList<Integer>();
HashMap<Integer, Edge> goldDependencies = new HashMap<Integer, Edge>();
int sentenceCounter = 0;
while ((line = fileReader.readLine()) != null)
{
line = line.trim();
if (line.length() == 0) // 句子分隔空白行
{
if (tokens.size() > 0)
{
sentenceCounter++;
if (!rootFirst)
{
for (Edge edge : goldDependencies.values())
{
if (edge.headIndex == 0)
edge.headIndex = tokens.size() + 1;
}
tokens.add(0);
tags.add(0);
cluster4Ids.add(0);
cluster6Ids.add(0);
clusterIds.add(0);
}
Sentence currentSentence = new Sentence(tokens, tags, cluster4Ids, cluster6Ids, clusterIds);
Instance instance = new Instance(currentSentence, goldDependencies);
if (keepNonProjective || !instance.isNonprojective())
instanceList.add(instance);
goldDependencies = new HashMap<Integer, Edge>();
tokens = new ArrayList<Integer>();
tags = new ArrayList<Integer>();
cluster4Ids = new ArrayList<Integer>();
cluster6Ids = new ArrayList<Integer>();
clusterIds = new ArrayList<Integer>();
}
else
{
goldDependencies = new HashMap<Integer, Edge>();
tokens = new ArrayList<Integer>();
tags = new ArrayList<Integer>();
cluster4Ids = new ArrayList<Integer>();
cluster6Ids = new ArrayList<Integer>();
clusterIds = new ArrayList<Integer>();
}
if (sentenceCounter >= limit)
{
System.out.println("buffer full..." + instanceList.size());
break;
}
}
else
{
String[] cells = line.split("\t");
if (cells.length < 8)
throw new IllegalArgumentException("invalid conll format");
int wordIndex = Integer.parseInt(cells[0]);
String word = cells[1].trim();
if (lowerCased)
word = word.toLowerCase();
String pos = cells[3].trim();
int wi = getId(word, wordMap);
int pi = getId(pos, wordMap);
tags.add(pi);
tokens.add(wi);
int headIndex = Integer.parseInt(cells[6]);
String relation = cells[7];
if (!labeled)
relation = "~";
else if (relation.equals("_"))
relation = "-";
if (headIndex == 0)
relation = "ROOT";
int ri = getId(relation, wordMap);
if (headIndex == -1)
ri = -1;
int[] ids = maps.clusterId(word);
clusterIds.add(ids[0]);
cluster4Ids.add(ids[1]);
cluster6Ids.add(ids[2]);
if (headIndex >= 0)
goldDependencies.put(wordIndex, new Edge(headIndex, ri));
}
}
if (tokens.size() > 0)
{
if (!rootFirst)
{
for (int gold : goldDependencies.keySet())
{
if (goldDependencies.get(gold).headIndex == 0)
goldDependencies.get(gold).headIndex = goldDependencies.size() + 1;
}
tokens.add(0);
tags.add(0);
cluster4Ids.add(0);
cluster6Ids.add(0);
clusterIds.add(0);
}
sentenceCounter++;
Sentence currentSentence = new Sentence(tokens, tags, cluster4Ids, cluster6Ids, clusterIds);
instanceList.add(new Instance(currentSentence, goldDependencies));
}
return instanceList;
} | 读取句子
@param limit 最大多少句
@param keepNonProjective 保留非投影
@param labeled
@param rootFirst 是否把root放到最前面
@param lowerCased
@param maps feature id map
@return
@throws Exception |
public void connect(int from, int to, double weight)
{
edgesTo[to].add(new EdgeFrom(from, weight, vertexes[from].word + '@' + vertexes[to].word));
} | 连接两个节点
@param from 起点
@param to 终点
@param weight 花费 |
public List<Vertex> parsePath(int[] path)
{
List<Vertex> vertexList = new LinkedList<Vertex>();
for (int i : path)
{
vertexList.add(vertexes[i]);
}
return vertexList;
} | 根据节点下标数组解释出对应的路径
@param path
@return |
public static String parseResult(List<Vertex> path)
{
if (path.size() < 2)
{
throw new RuntimeException("路径节点数小于2:" + path);
}
StringBuffer sb = new StringBuffer();
for (int i = 1; i < path.size() - 1; ++i)
{
Vertex v = path.get(i);
sb.append(v.getRealWord() + " ");
}
return sb.toString();
} | 从一个路径中转换出空格隔开的结果
@param path
@return |
public void learn(List<Sentence> sentenceList)
{
List<List<IWord>> s = new ArrayList<List<IWord>>(sentenceList.size());
for (Sentence sentence : sentenceList)
{
s.add(sentence.wordList);
}
compute(s);
} | 同compute
@param sentenceList |
public void train(String corpus)
{
CorpusLoader.walk(corpus, new CorpusLoader.Handler()
{
@Override
public void handle(Document document)
{
List<List<Word>> simpleSentenceList = document.getSimpleSentenceList();
List<List<IWord>> compatibleList = new LinkedList<List<IWord>>();
for (List<Word> wordList : simpleSentenceList)
{
compatibleList.add(new LinkedList<IWord>(wordList));
}
CommonDictionaryMaker.this.compute(compatibleList);
}
});
} | 训练
@param corpus 语料库路径 |
void set(int id, boolean bit)
{
if (bit)
{
_units.set(id / UNIT_SIZE, _units.get(id / UNIT_SIZE)
| 1 << (id % UNIT_SIZE));
}
} | 设置某一位的比特
@param id 位
@param bit 比特 |
void build()
{
_ranks = new int[_units.size()];
_numOnes = 0;
for (int i = 0; i < _units.size(); ++i)
{
_ranks[i] = _numOnes;
_numOnes += popCount(_units.get(i));
}
} | 构建 |
private static int popCount(int unit)
{
unit = ((unit & 0xAAAAAAAA) >>> 1) + (unit & 0x55555555);
unit = ((unit & 0xCCCCCCCC) >>> 2) + (unit & 0x33333333);
unit = ((unit >>> 4) + unit) & 0x0F0F0F0F;
unit += unit >>> 8;
unit += unit >>> 16;
return unit & 0xFF;
} | 1的数量
@param unit
@return |
public Sentence makeSentence(String[] words, String[] posTags, boolean rootFirst, boolean lowerCased)
{
ArrayList<Integer> tokens = new ArrayList<Integer>();
ArrayList<Integer> tags = new ArrayList<Integer>();
ArrayList<Integer> bc4 = new ArrayList<Integer>();
ArrayList<Integer> bc6 = new ArrayList<Integer>();
ArrayList<Integer> bcf = new ArrayList<Integer>();
int i = 0;
for (String word : words)
{
if (word.length() == 0)
continue;
String lowerCaseWord = word.toLowerCase();
if (lowerCased)
word = lowerCaseWord;
int[] clusterIDs = clusterId(word);
bcf.add(clusterIDs[0]);
bc4.add(clusterIDs[1]);
bc6.add(clusterIDs[2]);
String pos = posTags[i];
int wi = -1;
if (wordId.containsKey(word))
wi = wordId.get(word);
int pi = -1;
if (wordId.containsKey(pos))
pi = wordId.get(pos);
tokens.add(wi);
tags.add(pi);
i++;
}
if (!rootFirst)
{
tokens.add(0);
tags.add(0);
bcf.add(0);
bc6.add(0);
bc4.add(0);
}
return new Sentence(tokens, tags, bc4, bc6, bcf);
} | 将句子中的字符串转换为id
@param words
@param posTags
@param rootFirst
@param lowerCased
@return |
public int[] clusterId(String word)
{
int[] ids = new int[3];
ids[0] = -100;
ids[1] = -100;
ids[2] = -100;
if (brownFullClusters.containsKey(word))
ids[0] = brownFullClusters.get(word);
if (ids[0] > 0)
{
ids[1] = brown4Clusters.get(ids[0]);
ids[2] = brown6Clusters.get(ids[0]);
}
return ids;
} | 获取聚类id
@param word
@return |
protected void segment(final String sentence, final String normalized, final List<String> wordList, final List<CoreDictionary.Attribute> attributeList)
{
if (attributeList != null)
{
final int[] offset = new int[]{0};
CustomDictionary.parseLongestText(sentence, new AhoCorasickDoubleArrayTrie.IHit<CoreDictionary.Attribute>()
{
@Override
public void hit(int begin, int end, CoreDictionary.Attribute value)
{
if (begin != offset[0])
{
segmentAfterRule(sentence.substring(offset[0], begin), normalized.substring(offset[0], begin), wordList);
}
while (attributeList.size() < wordList.size())
attributeList.add(null);
wordList.add(sentence.substring(begin, end));
attributeList.add(value);
assert wordList.size() == attributeList.size() : "词语列表与属性列表不等长";
offset[0] = end;
}
});
if (offset[0] != sentence.length())
{
segmentAfterRule(sentence.substring(offset[0]), normalized.substring(offset[0]), wordList);
}
}
else
{
segmentAfterRule(sentence, normalized, wordList);
}
} | 分词
@param sentence 文本
@param normalized 正规化后的文本
@param wordList 储存单词列表
@param attributeList 储存用户词典中的词性,设为null表示不查询用户词典 |
public List<String> segment(String sentence)
{
return segment(sentence, CharTable.convert(sentence));
} | 中文分词
@param sentence
@return |
public List<String> segment(final String sentence, final String normalized)
{
final List<String> wordList = new LinkedList<String>();
segment(sentence, normalized, wordList);
return wordList;
} | 这个方法会查询用户词典
@param sentence
@param normalized
@return |
protected boolean acceptCustomWord(int begin, int end, CoreDictionary.Attribute value)
{
return config.forceCustomDictionary || (end - begin >= 4 && !value.hasNatureStartsWith("nr") && !value.hasNatureStartsWith("ns") && !value.hasNatureStartsWith("nt"));
} | 分词时查询到一个用户词典中的词语,此处控制是否接受它
@param begin 起始位置
@param end 终止位置
@param value 词性
@return true 表示接受
@deprecated 自1.6.7起废弃,强制模式下为最长匹配,否则按分词结果合并 |
private void pushPiece(String sentence, String normalized, int start, int end, byte preType, List<String> wordList)
{
if (preType == CharType.CT_CHINESE)
{
segmenter.segment(sentence.substring(start, end), normalized.substring(start, end), wordList);
}
else
{
wordList.add(sentence.substring(start, end));
}
} | CT_CHINESE区间交给统计分词,否则视作整个单位
@param sentence
@param normalized
@param start
@param end
@param preType
@param wordList |
protected void segmentAfterRule(String sentence, String normalized, List<String> wordList)
{
if (!enableRuleBasedSegment)
{
segmenter.segment(sentence, normalized, wordList);
return;
}
int start = 0;
int end = start;
byte preType = typeTable[normalized.charAt(end)];
byte curType;
while (++end < normalized.length())
{
curType = typeTable[normalized.charAt(end)];
if (curType != preType)
{
if (preType == CharType.CT_NUM)
{
// 浮点数识别
if (",,..".indexOf(normalized.charAt(end)) != -1)
{
if (end + 1 < normalized.length())
{
if (typeTable[normalized.charAt(end + 1)] == CharType.CT_NUM)
{
continue;
}
}
}
else if ("年月日时分秒".indexOf(normalized.charAt(end)) != -1)
{
preType = curType; // 交给统计分词
continue;
}
}
pushPiece(sentence, normalized, start, end, preType, wordList);
start = end;
}
preType = curType;
}
if (end == normalized.length())
pushPiece(sentence, normalized, start, end, preType, wordList);
} | 丑陋的规则系统
@param sentence
@param normalized
@param wordList |
private List<CoreDictionary.Attribute> segmentWithAttribute(String original, String normalized, List<String> wordList)
{
List<CoreDictionary.Attribute> attributeList;
if (config.useCustomDictionary)
{
if (config.forceCustomDictionary)
{
attributeList = new LinkedList<CoreDictionary.Attribute>();
segment(original, normalized, wordList, attributeList);
}
else
{
segmentAfterRule(original, normalized, wordList);
attributeList = combineWithCustomDictionary(wordList);
}
}
else
{
segmentAfterRule(original, normalized, wordList);
attributeList = null;
}
return attributeList;
} | 返回用户词典中的attribute的分词
@param original
@param normalized
@param wordList
@return |
protected static List<CoreDictionary.Attribute> combineWithCustomDictionary(List<String> vertexList)
{
String[] wordNet = new String[vertexList.size()];
vertexList.toArray(wordNet);
CoreDictionary.Attribute[] attributeArray = new CoreDictionary.Attribute[wordNet.length];
// DAT合并
DoubleArrayTrie<CoreDictionary.Attribute> dat = CustomDictionary.dat;
int length = wordNet.length;
for (int i = 0; i < length; ++i)
{
int state = 1;
state = dat.transition(wordNet[i], state);
if (state > 0)
{
int to = i + 1;
int end = to;
CoreDictionary.Attribute value = dat.output(state);
for (; to < length; ++to)
{
state = dat.transition(wordNet[to], state);
if (state < 0) break;
CoreDictionary.Attribute output = dat.output(state);
if (output != null)
{
value = output;
end = to + 1;
}
}
if (value != null)
{
combineWords(wordNet, i, end, attributeArray, value);
i = end - 1;
}
}
}
// BinTrie合并
if (CustomDictionary.trie != null)
{
for (int i = 0; i < length; ++i)
{
if (wordNet[i] == null) continue;
BaseNode<CoreDictionary.Attribute> state = CustomDictionary.trie.transition(wordNet[i], 0);
if (state != null)
{
int to = i + 1;
int end = to;
CoreDictionary.Attribute value = state.getValue();
for (; to < length; ++to)
{
if (wordNet[to] == null) continue;
state = state.transition(wordNet[to], 0);
if (state == null) break;
if (state.getValue() != null)
{
value = state.getValue();
end = to + 1;
}
}
if (value != null)
{
combineWords(wordNet, i, end, attributeArray, value);
i = end - 1;
}
}
}
}
vertexList.clear();
List<CoreDictionary.Attribute> attributeList = new LinkedList<CoreDictionary.Attribute>();
for (int i = 0; i < wordNet.length; i++)
{
if (wordNet[i] != null)
{
vertexList.add(wordNet[i]);
attributeList.add(attributeArray[i]);
}
}
return attributeList;
} | 使用用户词典合并粗分结果
@param vertexList 粗分结果
@return 合并后的结果 |
private static void combineWords(String[] wordNet, int start, int end, CoreDictionary.Attribute[] attributeArray, CoreDictionary.Attribute value)
{
if (start + 1 != end) // 小优化,如果只有一个词,那就不需要合并,直接应用新属性
{
StringBuilder sbTerm = new StringBuilder();
for (int j = start; j < end; ++j)
{
if (wordNet[j] == null) continue;
sbTerm.append(wordNet[j]);
wordNet[j] = null;
}
wordNet[start] = sbTerm.toString();
}
attributeArray[start] = value;
} | 将连续的词语合并为一个
@param wordNet 词图
@param start 起始下标(包含)
@param end 结束下标(不包含)
@param value 新的属性 |
public List<Hit<V>> parseText(String text)
{
int position = 1;
int currentState = 0;
List<Hit<V>> collectedEmits = new LinkedList<Hit<V>>();
for (int i = 0; i < text.length(); ++i)
{
currentState = getState(currentState, text.charAt(i));
storeEmits(position, currentState, collectedEmits);
++position;
}
return collectedEmits;
} | 匹配母文本
@param text 一些文本
@return 一个pair列表 |
public void parseText(String text, IHit<V> processor)
{
int position = 1;
int currentState = 0;
for (int i = 0; i < text.length(); ++i)
{
currentState = getState(currentState, text.charAt(i));
int[] hitArray = output[currentState];
if (hitArray != null)
{
for (int hit : hitArray)
{
processor.hit(position - l[hit], position, v[hit]);
}
}
++position;
}
} | 处理文本
@param text 文本
@param processor 处理器 |
public void parseText(char[] text, IHit<V> processor)
{
int position = 1;
int currentState = 0;
for (char c : text)
{
currentState = getState(currentState, c);
int[] hitArray = output[currentState];
if (hitArray != null)
{
for (int hit : hitArray)
{
processor.hit(position - l[hit], position, v[hit]);
}
}
++position;
}
} | 处理文本
@param text
@param processor |
public void save(DataOutputStream out) throws Exception
{
out.writeInt(size);
for (int i = 0; i < size; i++)
{
out.writeInt(base[i]);
out.writeInt(check[i]);
out.writeInt(fail[i]);
int output[] = this.output[i];
if (output == null)
{
out.writeInt(0);
}
else
{
out.writeInt(output.length);
for (int o : output)
{
out.writeInt(o);
}
}
}
out.writeInt(l.length);
for (int length : l)
{
out.writeInt(length);
}
} | 持久化
@param out 一个DataOutputStream
@throws Exception 可能的IO异常等 |
public void save(ObjectOutputStream out) throws IOException
{
out.writeObject(base);
out.writeObject(check);
out.writeObject(fail);
out.writeObject(output);
out.writeObject(l);
} | 持久化
@param out 一个ObjectOutputStream
@throws IOException 可能的IO异常 |
public void load(ObjectInputStream in, V[] value) throws IOException, ClassNotFoundException
{
base = (int[]) in.readObject();
check = (int[]) in.readObject();
fail = (int[]) in.readObject();
output = (int[][]) in.readObject();
l = (int[]) in.readObject();
v = value;
} | 载入
@param in 一个ObjectInputStream
@param value 值(持久化的时候并没有持久化值,现在需要额外提供)
@throws IOException
@throws ClassNotFoundException |
public boolean load(ByteArray byteArray, V[] value)
{
if (byteArray == null) return false;
size = byteArray.nextInt();
base = new int[size + 65535]; // 多留一些,防止越界
check = new int[size + 65535];
fail = new int[size + 65535];
output = new int[size + 65535][];
int length;
for (int i = 0; i < size; ++i)
{
base[i] = byteArray.nextInt();
check[i] = byteArray.nextInt();
fail[i] = byteArray.nextInt();
length = byteArray.nextInt();
if (length == 0) continue;
output[i] = new int[length];
for (int j = 0; j < output[i].length; ++j)
{
output[i][j] = byteArray.nextInt();
}
}
length = byteArray.nextInt();
l = new int[length];
for (int i = 0; i < l.length; ++i)
{
l[i] = byteArray.nextInt();
}
v = value;
return true;
} | 载入
@param byteArray 一个字节数组
@param value 值数组
@return 成功与否 |
public V get(String key)
{
int index = exactMatchSearch(key);
if (index >= 0)
{
return v[index];
}
return null;
} | 获取值
@param key 键
@return |
private int getState(int currentState, char character)
{
int newCurrentState = transitionWithRoot(currentState, character); // 先按success跳转
while (newCurrentState == -1) // 跳转失败的话,按failure跳转
{
currentState = fail[currentState];
newCurrentState = transitionWithRoot(currentState, character);
}
return newCurrentState;
} | 转移状态,支持failure转移
@param currentState
@param character
@return |
private void storeEmits(int position, int currentState, List<Hit<V>> collectedEmits)
{
int[] hitArray = output[currentState];
if (hitArray != null)
{
for (int hit : hitArray)
{
collectedEmits.add(new Hit<V>(position - l[hit], position, v[hit]));
}
}
} | 保存输出
@param position
@param currentState
@param collectedEmits |
protected int transition(int current, char c)
{
int b = current;
int p;
p = b + c + 1;
if (b == check[p])
b = base[p];
else
return -1;
p = b;
return p;
} | 转移状态
@param current
@param c
@return |
protected int transitionWithRoot(int nodePos, char c)
{
int b = base[nodePos];
int p;
p = b + c + 1;
if (b != check[p])
{
if (nodePos == 0) return 0;
return -1;
}
return p;
} | c转移,如果是根节点则返回自己
@param nodePos
@param c
@return |
private int fetch(State parent, List<Map.Entry<Integer, State>> siblings)
{
if (parent.isAcceptable())
{
State fakeNode = new State(-(parent.getDepth() + 1)); // 此节点是parent的子节点,同时具备parent的输出
fakeNode.addEmit(parent.getLargestValueId());
siblings.add(new AbstractMap.SimpleEntry<Integer, State>(0, fakeNode));
}
for (Map.Entry<Character, State> entry : parent.getSuccess().entrySet())
{
siblings.add(new AbstractMap.SimpleEntry<Integer, State>(entry.getKey() + 1, entry.getValue()));
}
return siblings.size();
} | 获取直接相连的子节点
@param parent 父节点
@param siblings (子)兄弟节点
@return 兄弟节点个数 |
public Collection<Token> tokenize(String text)
{
Collection<Token> tokens = new ArrayList<Token>();
Collection<Emit> collectedEmits = parseText(text);
// 下面是最长分词的关键
IntervalTree intervalTree = new IntervalTree((List<Intervalable>) (List<?>) collectedEmits);
intervalTree.removeOverlaps((List<Intervalable>) (List<?>) collectedEmits);
// 移除结束
int lastCollectedPosition = -1;
for (Emit emit : collectedEmits)
{
if (emit.getStart() - lastCollectedPosition > 1)
{
tokens.add(createFragment(emit, text, lastCollectedPosition));
}
tokens.add(createMatch(emit, text));
lastCollectedPosition = emit.getEnd();
}
if (text.length() - lastCollectedPosition > 1)
{
tokens.add(createFragment(null, text, lastCollectedPosition));
}
return tokens;
} | 一个最长分词器
@param text 待分词文本
@return |
@SuppressWarnings("unchecked")
public Collection<Emit> parseText(String text)
{
checkForConstructedFailureStates();
int position = 0;
State currentState = this.rootState;
List<Emit> collectedEmits = new ArrayList<Emit>();
for (int i = 0; i < text.length(); ++i)
{
currentState = getState(currentState, text.charAt(i));
storeEmits(position, currentState, collectedEmits);
++position;
}
if (!trieConfig.isAllowOverlaps())
{
IntervalTree intervalTree = new IntervalTree((List<Intervalable>) (List<?>) collectedEmits);
intervalTree.removeOverlaps((List<Intervalable>) (List<?>) collectedEmits);
}
if (trieConfig.remainLongest)
{
remainLongest(collectedEmits);
}
return collectedEmits;
} | 模式匹配
@param text 待匹配的文本
@return 匹配到的模式串 |
private static void remainLongest(Collection<Emit> collectedEmits)
{
if (collectedEmits.size() < 2) return;
Map<Integer, Emit> emitMapStart = new TreeMap<Integer, Emit>();
for (Emit emit : collectedEmits)
{
Emit pre = emitMapStart.get(emit.getStart());
if (pre == null || pre.size() < emit.size())
{
emitMapStart.put(emit.getStart(), emit);
}
}
if (emitMapStart.size() < 2)
{
collectedEmits.clear();
collectedEmits.addAll(emitMapStart.values());
return;
}
Map<Integer, Emit> emitMapEnd = new TreeMap<Integer, Emit>();
for (Emit emit : emitMapStart.values())
{
Emit pre = emitMapEnd.get(emit.getEnd());
if (pre == null || pre.size() < emit.size())
{
emitMapEnd.put(emit.getEnd(), emit);
}
}
collectedEmits.clear();
collectedEmits.addAll(emitMapEnd.values());
} | 只保留最长词
@param collectedEmits |
private static State getState(State currentState, Character character)
{
State newCurrentState = currentState.nextState(character); // 先按success跳转
while (newCurrentState == null) // 跳转失败的话,按failure跳转
{
currentState = currentState.failure();
newCurrentState = currentState.nextState(character);
}
return newCurrentState;
} | 跳转到下一个状态
@param currentState 当前状态
@param character 接受字符
@return 跳转结果 |
private void constructFailureStates()
{
Queue<State> queue = new LinkedBlockingDeque<State>();
// 第一步,将深度为1的节点的failure设为根节点
for (State depthOneState : this.rootState.getStates())
{
depthOneState.setFailure(this.rootState);
queue.add(depthOneState);
}
this.failureStatesConstructed = true;
// 第二步,为深度 > 1 的节点建立failure表,这是一个bfs
while (!queue.isEmpty())
{
State currentState = queue.remove();
for (Character transition : currentState.getTransitions())
{
State targetState = currentState.nextState(transition);
queue.add(targetState);
State traceFailureState = currentState.failure();
while (traceFailureState.nextState(transition) == null)
{
traceFailureState = traceFailureState.failure();
}
State newFailureState = traceFailureState.nextState(transition);
targetState.setFailure(newFailureState);
targetState.addEmit(newFailureState.emit());
}
}
} | 建立failure表 |
private static void storeEmits(int position, State currentState, List<Emit> collectedEmits)
{
Collection<String> emits = currentState.emit();
if (emits != null && !emits.isEmpty())
{
for (String emit : emits)
{
collectedEmits.add(new Emit(position - emit.length() + 1, position, emit));
}
}
} | 保存匹配结果
@param position 当前位置,也就是匹配到的模式串的结束位置+1
@param currentState 当前状态
@param collectedEmits 保存位置 |
public boolean hasKeyword(String text)
{
checkForConstructedFailureStates();
State currentState = this.rootState;
for (int i = 0; i < text.length(); ++i)
{
State nextState = getState(currentState, text.charAt(i));
if (nextState != null && nextState != currentState && nextState.emit().size() != 0) {
return true;
}
currentState = nextState;
}
return false;
} | 文本是否包含任何模式
@param text 待匹配的文本
@return 文本包含模式時回傳true |
static boolean load(Map<String, String> storage, boolean reverse, String... pathArray)
{
StringDictionary dictionary = new StringDictionary("=");
for (String path : pathArray)
{
if (!dictionary.load(path)) return false;
}
if (reverse) dictionary = dictionary.reverse();
Set<Map.Entry<String, String>> entrySet = dictionary.entrySet();
for (Map.Entry<String, String> entry : entrySet)
{
storage.put(entry.getKey(), entry.getValue());
}
return true;
} | 读取词典
@param storage 储存空间
@param reverse 是否翻转键值对
@param pathArray 路径
@return 是否加载成功 |
static boolean load(String path, AhoCorasickDoubleArrayTrie<String> trie)
{
return load(path, trie, false);
} | 将path的内容载入trie中
@param path
@param trie
@return |
static boolean load(String path, AhoCorasickDoubleArrayTrie<String> trie, boolean reverse)
{
String datPath = path;
if (reverse)
{
datPath += Predefine.REVERSE_EXT;
}
if (loadDat(datPath, trie)) return true;
// 从文本中载入并且尝试生成dat
TreeMap<String, String> map = new TreeMap<String, String>();
if (!load(map, reverse, path)) return false;
logger.info("正在构建AhoCorasickDoubleArrayTrie,来源:" + path);
trie.build(map);
logger.info("正在缓存双数组" + datPath);
saveDat(datPath, trie, map.entrySet());
return true;
} | 读取词典
@param path
@param trie
@param reverse 是否将其翻转
@return |
private static TreeMap<Double ,Set<String>> sortScoreMap(TreeMap<String, Double> scoreMap)
{
TreeMap<Double, Set<String>> result = new TreeMap<Double, Set<String>>(Collections.reverseOrder());
for (Map.Entry<String, Double> entry : scoreMap.entrySet())
{
Set<String> sentenceSet = result.get(entry.getValue());
if (sentenceSet == null)
{
sentenceSet = new HashSet<String>();
result.put(entry.getValue(), sentenceSet);
}
sentenceSet.add(entry.getKey());
}
return result;
} | 将分数map排序折叠
@param scoreMap
@return |
private static Double max(Map<String, Double> map)
{
Double theMax = 0.0;
for (Double v : map.values())
{
theMax = Math.max(theMax, v);
}
return theMax;
} | 从map的值中找出最大值,这个值是从0开始的
@param map
@return |
public int size()
{
int length = 0;
for (Pinyin pinyin : pinyinArray)
{
if (pinyin != Pinyin.none5) ++length;
}
return length;
} | 拼音的个数
@return |
protected int addWordToVocab(String word)
{
vocab[vocabSize] = new VocabWord(word);
vocabSize++;
// Reallocate memory if needed
if (vocabSize + 2 >= vocabMaxSize)
{
vocabMaxSize += 1000;
VocabWord[] temp = new VocabWord[vocabMaxSize];
System.arraycopy(vocab, 0, temp, 0, vocabSize);
vocab = temp;
}
vocabIndexMap.put(word, vocabSize - 1);
return vocabSize - 1;
} | Adds a word to the vocabulary
@param word
@return |
int searchVocab(String word)
{
if (word == null) return -1;
Integer pos = vocabIndexMap.get(word);
return pos == null ? -1 : pos.intValue();
} | Returns position of a word in the vocabulary; if the word is not found, returns -1
@param word
@return |
void sortVocab()
{
Arrays.sort(vocab, 0, vocabSize);
// re-build vocabIndexMap
final int size = vocabSize;
trainWords = 0;
table = new int[size];
for (int i = 0; i < size; i++)
{
VocabWord word = vocab[i];
// Words occuring less than min_count times will be discarded from the vocab
if (word.cn < config.getMinCount())
{
table[vocabIndexMap.get(word.word)] = -4;
vocabSize--;
}
else
{
// Hash will be re-computed, as after the sorting it is not actual
table[vocabIndexMap.get(word.word)] = i;
setVocabIndexMap(word, i);
}
}
// lose weight
vocabIndexMap = null;
VocabWord[] nvocab = new VocabWord[vocabSize];
System.arraycopy(vocab, 0, nvocab, 0, vocabSize);
} | Sorts the vocabulary by frequency using word counts |
void createBinaryTree()
{
int[] point = new int[VocabWord.MAX_CODE_LENGTH];
char[] code = new char[VocabWord.MAX_CODE_LENGTH];
int[] count = new int[vocabSize * 2 + 1];
char[] binary = new char[vocabSize * 2 + 1];
int[] parentNode = new int[vocabSize * 2 + 1];
for (int i = 0; i < vocabSize; i++)
count[i] = vocab[i].cn;
for (int i = vocabSize; i < vocabSize * 2; i++)
count[i] = Integer.MAX_VALUE;
int pos1 = vocabSize - 1;
int pos2 = vocabSize;
// Following algorithm constructs the Huffman tree by adding one node at a time
int min1i, min2i;
for (int i = 0; i < vocabSize - 1; i++)
{
// First, find two smallest nodes 'min1, min2'
if (pos1 >= 0)
{
if (count[pos1] < count[pos2])
{
min1i = pos1;
pos1--;
}
else
{
min1i = pos2;
pos2++;
}
}
else
{
min1i = pos2;
pos2++;
}
if (pos1 >= 0)
{
if (count[pos1] < count[pos2])
{
min2i = pos1;
pos1--;
}
else
{
min2i = pos2;
pos2++;
}
}
else
{
min2i = pos2;
pos2++;
}
count[vocabSize + i] = count[min1i] + count[min2i];
parentNode[min1i] = vocabSize + i;
parentNode[min2i] = vocabSize + i;
binary[min2i] = 1;
}
// Now assign binary code to each vocabulary word
for (int j = 0; j < vocabSize; j++)
{
int k = j;
int i = 0;
while (true)
{
code[i] = binary[k];
point[i] = k;
i++;
k = parentNode[k];
if (k == vocabSize * 2 - 2) break;
}
vocab[j].codelen = i;
vocab[j].point[0] = vocabSize - 2;
for (k = 0; k < i; k++)
{
vocab[j].code[i - k - 1] = code[k];
vocab[j].point[i - k] = point[k] - vocabSize;
}
}
} | Create binary Huffman tree using the word counts.
Frequent words will have short uniqe binary codes |
public static List<Document> convert2DocumentList(String folderPath, boolean verbose)
{
long start = System.currentTimeMillis();
List<File> fileList = IOUtil.fileList(folderPath);
List<Document> documentList = new LinkedList<Document>();
int i = 0;
for (File file : fileList)
{
if (verbose) System.out.print(file);
Document document = convert2Document(file);
documentList.add(document);
if (verbose) System.out.println(" " + ++i + " / " + fileList.size());
}
if (verbose)
{
System.out.println(documentList.size());
System.out.printf("花费时间%d ms\n", System.currentTimeMillis() - start);
}
return documentList;
} | 读取整个目录中的人民日报格式语料
@param folderPath 路径
@param verbose
@return |
public int determineMedian(List<Intervalable> intervals)
{
int start = -1;
int end = -1;
for (Intervalable interval : intervals)
{
int currentStart = interval.getStart();
int currentEnd = interval.getEnd();
if (start == -1 || currentStart < start)
{
start = currentStart;
}
if (end == -1 || currentEnd > end)
{
end = currentEnd;
}
}
return (start + end) / 2;
} | 计算中点
@param intervals 区间集合
@return 中点坐标 |
public List<Intervalable> findOverlaps(Intervalable interval)
{
List<Intervalable> overlaps = new ArrayList<Intervalable>();
if (this.point < interval.getStart())
{
// 右边找找
addToOverlaps(interval, overlaps, findOverlappingRanges(this.right, interval));
addToOverlaps(interval, overlaps, checkForOverlapsToTheRight(interval));
}
else if (this.point > interval.getEnd())
{
// 左边找找
addToOverlaps(interval, overlaps, findOverlappingRanges(this.left, interval));
addToOverlaps(interval, overlaps, checkForOverlapsToTheLeft(interval));
}
else
{
// 否则在当前区间
addToOverlaps(interval, overlaps, this.intervals);
addToOverlaps(interval, overlaps, findOverlappingRanges(this.left, interval));
addToOverlaps(interval, overlaps, findOverlappingRanges(this.right, interval));
}
return overlaps;
} | 寻找与interval有重叠的区间
@param interval
@return |
protected void addToOverlaps(Intervalable interval, List<Intervalable> overlaps, List<Intervalable> newOverlaps)
{
for (Intervalable currentInterval : newOverlaps)
{
if (!currentInterval.equals(interval))
{
overlaps.add(currentInterval);
}
}
} | 添加到重叠区间列表中
@param interval 跟此区间重叠
@param overlaps 重叠区间列表
@param newOverlaps 希望将这些区间加入 |
protected List<Intervalable> checkForOverlaps(Intervalable interval, Direction direction)
{
List<Intervalable> overlaps = new ArrayList<Intervalable>();
for (Intervalable currentInterval : this.intervals)
{
switch (direction)
{
case LEFT:
if (currentInterval.getStart() <= interval.getEnd())
{
overlaps.add(currentInterval);
}
break;
case RIGHT:
if (currentInterval.getEnd() >= interval.getStart())
{
overlaps.add(currentInterval);
}
break;
}
}
return overlaps;
} | 寻找重叠
@param interval 一个区间,与该区间重叠
@param direction 方向,表明重叠区间在interval的左边还是右边
@return |
protected static List<Intervalable> findOverlappingRanges(IntervalNode node, Intervalable interval)
{
if (node != null)
{
return node.findOverlaps(interval);
}
return Collections.emptyList();
} | 是对IntervalNode.findOverlaps(Intervalable)的一个包装,防止NPE
@see com.hankcs.hanlp.algorithm.ahocorasick.interval.IntervalNode#findOverlaps(Intervalable)
@param node
@param interval
@return |
public boolean learn(Instance instance)
{
if (instance == null) return false;
model.update(instance);
return true;
} | 在线学习
@param instance
@return |
public static List<NS> viterbiCompute(List<EnumItem<NS>> roleTagList)
{
return Viterbi.computeEnum(roleTagList, PlaceDictionary.transformMatrixDictionary);
} | 维特比算法求解最优标签
@param roleTagList
@return |
public BaseNode<V> transition(char path)
{
BaseNode<V> cur = this;
cur = cur.getChild(path);
if (cur == null || cur.status == Status.UNDEFINED_0) return null;
return cur;
} | 转移状态
@param path
@return |
public void extend(int ordinaryMax)
{
this.ordinaryMax = ordinaryMax;
double[][] n_transititon_probability = new double[ordinaryMax][ordinaryMax];
for (int i = 0; i < transititon_probability.length; i++)
{
System.arraycopy(transititon_probability[i], 0, n_transititon_probability[i], 0, transititon_probability.length);
}
transititon_probability = n_transititon_probability;
int[] n_total = new int[ordinaryMax];
System.arraycopy(total, 0, n_total, 0, total.length);
total = n_total;
double[] n_start_probability = new double[ordinaryMax];
System.arraycopy(start_probability, 0, n_start_probability, 0, start_probability.length);
start_probability = n_start_probability;
int[][] n_matrix = new int[ordinaryMax][ordinaryMax];
for (int i = 0; i < matrix.length; i++)
{
System.arraycopy(matrix[i], 0, n_matrix[i], 0, matrix.length);
}
matrix = n_matrix;
} | 拓展内部矩阵,仅用于通过反射新增了枚举实例之后的兼容措施 |
public long distance(String a, String b)
{
SynonymItem itemA = get(a);
if (itemA == null) return Long.MAX_VALUE / 3;
SynonymItem itemB = get(b);
if (itemB == null) return Long.MAX_VALUE / 3;
return itemA.distance(itemB);
} | 语义距离
@param a
@param b
@return |
public void addPair(String first, String second)
{
Map<String, Integer> firstMatrix = transferMatrix.get(first);
if (firstMatrix == null)
{
firstMatrix = new TreeMap<String, Integer>();
transferMatrix.put(first, firstMatrix);
}
Integer frequency = firstMatrix.get(second);
if (frequency == null) frequency = 0;
firstMatrix.put(second, frequency + 1);
} | 添加一个转移例子,会在内部完成统计
@param first
@param second |
public static final Nature fromString(String name)
{
Integer id = idMap.get(name);
if (id == null)
return null;
return values[id];
} | 安全地将字符串类型的词性转为Enum类型,如果未定义该词性,则返回null
@param name 字符串词性
@return Enum词性 |
public static final Nature create(String name)
{
Nature nature = fromString(name);
if (nature == null)
return new Nature(name);
return nature;
} | 创建自定义词性,如果已有该对应词性,则直接返回已有的词性
@param name 字符串词性
@return Enum词性 |
@Override
public int[] toIdList(int codePoint)
{
int count;
if (codePoint < 0x80)
count = 1;
else if (codePoint < 0x800)
count = 2;
else if (codePoint < 0x10000)
count = 3;
else if (codePoint < 0x200000)
count = 4;
else if (codePoint < 0x4000000)
count = 5;
else if (codePoint <= 0x7fffffff)
count = 6;
else
return EMPTYLIST;
int[] r = new int[count];
switch (count)
{ /* note: code falls through cases! */
case 6:
r[5] = (char) (0x80 | (codePoint & 0x3f));
codePoint = codePoint >> 6;
codePoint |= 0x4000000;
case 5:
r[4] = (char) (0x80 | (codePoint & 0x3f));
codePoint = codePoint >> 6;
codePoint |= 0x200000;
case 4:
r[3] = (char) (0x80 | (codePoint & 0x3f));
codePoint = codePoint >> 6;
codePoint |= 0x10000;
case 3:
r[2] = (char) (0x80 | (codePoint & 0x3f));
codePoint = codePoint >> 6;
codePoint |= 0x800;
case 2:
r[1] = (char) (0x80 | (codePoint & 0x3f));
codePoint = codePoint >> 6;
codePoint |= 0xc0;
case 1:
r[0] = (char) codePoint;
}
return r;
} | codes ported from iconv lib in utf8.h utf8_codepointtomb |
public static TriaFrequency create(String first, char delimiter, String second, String third)
{
TriaFrequency triaFrequency = new TriaFrequency(first + delimiter + second + Occurrence.RIGHT + third);
triaFrequency.first = first;
triaFrequency.second = second;
triaFrequency.third = third;
triaFrequency.delimiter = delimiter;
return triaFrequency;
} | 构造一个三阶接续,正向
@param first
@param second
@param third
@param delimiter 一般使用RIGHT!
@return |
public void train(String templFile, String trainFile, String modelFile,
int maxitr, int freq, double eta, double C, int threadNum, int shrinkingSize,
Encoder.Algorithm algorithm) throws IOException
{
Encoder encoder = new Encoder();
if (!encoder.learn(templFile, trainFile, modelFile,
true, maxitr, freq, eta, C, threadNum, shrinkingSize, algorithm))
{
throw new IOException("fail to learn model");
}
convert(modelFile);
} | 训练
@param templFile 模板文件
@param trainFile 训练文件
@param modelFile 模型文件
@param maxitr 最大迭代次数
@param freq 特征最低频次
@param eta 收敛阈值
@param C cost-factor
@param threadNum 线程数
@param shrinkingSize
@param algorithm 训练算法
@return |
public void dumpTemplate(String templatePath) throws IOException
{
BufferedWriter bw = IOUtil.newBufferedWriter(templatePath);
String template = getTemplate();
bw.write(template);
bw.close();
} | 导出特征模板
@param templatePath
@throws IOException |
public String getTemplate()
{
String template = getDefaultFeatureTemplate();
if (model != null && model.getFeatureTemplateArray() != null)
{
StringBuilder sbTemplate = new StringBuilder();
for (FeatureTemplate featureTemplate : model.getFeatureTemplateArray())
{
sbTemplate.append(featureTemplate.getTemplate()).append('\n');
}
}
return template;
} | 获取特征模板
@return |
public void add(Object id, String text)
{
List<Term> termList = preprocess(text);
add(id, termList);
} | 添加文档
@param id 文档id
@param text 文档内容 |
public int add(String text)
{
int id = tfMap.size();
add(id, text);
return id;
} | 添加文档,自动分配id
@param text |
public static CRFModel loadTxt(String path, CRFModel instance)
{
CRFModel CRFModel = instance;
// 先尝试从bin加载
if (CRFModel.load(ByteArray.createByteArray(path + Predefine.BIN_EXT))) return CRFModel;
IOUtil.LineIterator lineIterator = new IOUtil.LineIterator(path);
if (!lineIterator.hasNext()) return null;
logger.info(lineIterator.next()); // verson
logger.info(lineIterator.next()); // cost-factor
int maxid = Integer.parseInt(lineIterator.next().substring("maxid:".length()).trim());
logger.info(lineIterator.next()); // xsize
lineIterator.next(); // blank
String line;
int id = 0;
CRFModel.tag2id = new HashMap<String, Integer>();
while ((line = lineIterator.next()).length() != 0)
{
CRFModel.tag2id.put(line, id);
++id;
}
CRFModel.id2tag = new String[CRFModel.tag2id.size()];
final int size = CRFModel.id2tag.length;
for (Map.Entry<String, Integer> entry : CRFModel.tag2id.entrySet())
{
CRFModel.id2tag[entry.getValue()] = entry.getKey();
}
TreeMap<String, FeatureFunction> featureFunctionMap = new TreeMap<String, FeatureFunction>(); // 构建trie树的时候用
TreeMap<Integer, FeatureFunction> featureFunctionList = new TreeMap<Integer, FeatureFunction>(); // 读取权值的时候用
CRFModel.featureTemplateList = new LinkedList<FeatureTemplate>();
while ((line = lineIterator.next()).length() != 0)
{
if (!"B".equals(line))
{
FeatureTemplate featureTemplate = FeatureTemplate.create(line);
CRFModel.featureTemplateList.add(featureTemplate);
}
else
{
CRFModel.matrix = new double[size][size];
}
}
int b = -1;// 转换矩阵的权重位置
if (CRFModel.matrix != null)
{
String[] args = lineIterator.next().split(" ", 2); // 0 B
b = Integer.valueOf(args[0]);
featureFunctionList.put(b, null);
}
while ((line = lineIterator.next()).length() != 0)
{
String[] args = line.split(" ", 2);
char[] charArray = args[1].toCharArray();
FeatureFunction featureFunction = new FeatureFunction(charArray, size);
featureFunctionMap.put(args[1], featureFunction);
featureFunctionList.put(Integer.parseInt(args[0]), featureFunction);
}
for (Map.Entry<Integer, FeatureFunction> entry : featureFunctionList.entrySet())
{
int fid = entry.getKey();
FeatureFunction featureFunction = entry.getValue();
if (fid == b)
{
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
CRFModel.matrix[i][j] = Double.parseDouble(lineIterator.next());
}
}
}
else
{
for (int i = 0; i < size; i++)
{
featureFunction.w[i] = Double.parseDouble(lineIterator.next());
}
}
}
if (lineIterator.hasNext())
{
logger.warning("文本读取有残留,可能会出问题!" + path);
}
lineIterator.close();
logger.info("开始构建trie树");
CRFModel.featureFunctionTrie.build(featureFunctionMap);
// 缓存bin
try
{
logger.info("开始缓存" + path + Predefine.BIN_EXT);
DataOutputStream out = new DataOutputStream(IOUtil.newOutputStream(path + Predefine.BIN_EXT));
CRFModel.save(out);
out.close();
}
catch (Exception e)
{
logger.warning("在缓存" + path + Predefine.BIN_EXT + "时发生错误" + TextUtility.exceptionToString(e));
}
CRFModel.onLoadTxtFinished();
return CRFModel;
} | 加载Txt形式的CRF++模型
@param path 模型路径
@param instance 模型的实例(这里允许用户构造不同的CRFModel来储存最终读取的结果)
@return 该模型 |
public void tag(Table table)
{
int size = table.size();
if (size == 0) return;
int tagSize = id2tag.length;
double[][] net = new double[size][tagSize];
for (int i = 0; i < size; ++i)
{
LinkedList<double[]> scoreList = computeScoreList(table, i);
for (int tag = 0; tag < tagSize; ++tag)
{
net[i][tag] = computeScore(scoreList, tag);
}
}
if (size == 1)
{
double maxScore = -1e10;
int bestTag = 0;
for (int tag = 0; tag < net[0].length; ++tag)
{
if (net[0][tag] > maxScore)
{
maxScore = net[0][tag];
bestTag = tag;
}
}
table.setLast(0, id2tag[bestTag]);
return;
}
int[][] from = new int[size][tagSize];
double[][] maxScoreAt = new double[2][tagSize]; // 滚动数组
System.arraycopy(net[0], 0, maxScoreAt[0], 0, tagSize); // 初始preI=0, maxScoreAt[preI][pre] = net[0][pre]
int curI = 0;
for (int i = 1; i < size; ++i)
{
curI = i & 1;
int preI = 1 - curI;
for (int now = 0; now < tagSize; ++now)
{
double maxScore = -1e10;
for (int pre = 0; pre < tagSize; ++pre)
{
double score = maxScoreAt[preI][pre] + matrix[pre][now] + net[i][now];
if (score > maxScore)
{
maxScore = score;
from[i][now] = pre;
maxScoreAt[curI][now] = maxScore;
}
}
net[i][now] = maxScore;
}
}
// 反向回溯最佳路径
double maxScore = -1e10;
int maxTag = 0;
for (int tag = 0; tag < tagSize; ++tag)
{
if (maxScoreAt[curI][tag] > maxScore)
{
maxScore = maxScoreAt[curI][tag];
maxTag = tag;
}
}
table.setLast(size - 1, id2tag[maxTag]);
maxTag = from[size - 1][maxTag];
for (int i = size - 2; i > 0; --i)
{
table.setLast(i, id2tag[maxTag]);
maxTag = from[i][maxTag];
}
table.setLast(0, id2tag[maxTag]);
} | 维特比后向算法标注
@param table |
protected LinkedList<double[]> computeScoreList(Table table, int current)
{
LinkedList<double[]> scoreList = new LinkedList<double[]>();
for (FeatureTemplate featureTemplate : featureTemplateList)
{
char[] o = featureTemplate.generateParameter(table, current);
FeatureFunction featureFunction = featureFunctionTrie.get(o);
if (featureFunction == null) continue;
scoreList.add(featureFunction.w);
}
return scoreList;
} | 根据特征函数计算输出
@param table
@param current
@return |
protected static double computeScore(LinkedList<double[]> scoreList, int tag)
{
double score = 0;
for (double[] w : scoreList)
{
score += w[tag];
}
return score;
} | 给一系列特征函数结合tag打分
@param scoreList
@param tag
@return |
public static CRFModel load(String path)
{
CRFModel model = loadBin(path + BIN_EXT);
if (model != null) return model;
return loadTxt(path, new CRFModel(new DoubleArrayTrie<FeatureFunction>()));
} | 加载CRF++模型<br>
如果存在缓存的话,优先读取缓存,否则读取txt,并且建立缓存
@param path txt的路径,即使不存在.txt,只存在.bin,也应传入txt的路径,方法内部会自动加.bin后缀
@return |
public static CRFModel loadBin(String path)
{
ByteArray byteArray = ByteArray.createByteArray(path);
if (byteArray == null) return null;
CRFModel model = new CRFModel();
if (model.load(byteArray)) return model;
return null;
} | 加载Bin形式的CRF++模型<br>
注意该Bin形式不是CRF++的二进制模型,而是HanLP由CRF++的文本模型转换过来的私有格式
@param path
@return |
public static Word create(String param)
{
if (param == null) return null;
int cutIndex = param.lastIndexOf('/');
if (cutIndex <= 0 || cutIndex == param.length() - 1)
{
logger.warning("使用 " + param + "创建单个单词失败");
return null;
}
return new Word(param.substring(0, cutIndex), param.substring(cutIndex + 1));
} | 通过参数构造一个单词
@param param 比如 人民网/nz
@return 一个单词 |
public static IWord create(String param)
{
if (param == null) return null;
if (param.startsWith("[") && !param.startsWith("[/"))
{
return CompoundWord.create(param);
}
else
{
return Word.create(param);
}
} | 根据参数字符串产生对应的词语
@param param
@return |
public String[][] getEdgeArray()
{
String[][] edge = new String[word.length + 1][word.length + 1];
for (CoNLLWord coNLLWord : word)
{
edge[coNLLWord.ID][coNLLWord.HEAD.ID] = coNLLWord.DEPREL;
}
return edge;
} | 获取边的列表,edge[i][j]表示id为i的词语与j存在一条依存关系为该值的边,否则为null
@return |
public CoNLLWord[] getWordArrayWithRoot()
{
CoNLLWord[] wordArray = new CoNLLWord[word.length + 1];
wordArray[0] = CoNLLWord.ROOT;
System.arraycopy(word, 0, wordArray, 1, word.length);
return wordArray;
} | 获取包含根节点在内的单词数组
@return |
public List<CoNLLWord> findChildren(CoNLLWord word)
{
List<CoNLLWord> result = new LinkedList<CoNLLWord>();
for (CoNLLWord other : this)
{
if (other.HEAD == word)
result.add(other);
}
return result;
} | 找出所有子节点
@param word
@return |
public List<CoNLLWord> findChildren(CoNLLWord word, String relation)
{
List<CoNLLWord> result = new LinkedList<CoNLLWord>();
for (CoNLLWord other : this)
{
if (other.HEAD == word && other.DEPREL.equals(relation))
result.add(other);
}
return result;
} | 找出特定依存关系的子节点
@param word
@param relation
@return |
public void parseConllFileNoParallel(String inputFile, String outputFile, boolean rootFirst, int beamWidth, boolean labeled, boolean lowerCased, int numOfThreads, boolean partial, String scorePath) throws IOException, ExecutionException, InterruptedException
{
CoNLLReader reader = new CoNLLReader(inputFile);
boolean addScore = false;
if (scorePath.trim().length() > 0)
addScore = true;
ArrayList<Float> scoreList = new ArrayList<Float>();
long start = System.currentTimeMillis();
int allArcs = 0;
int size = 0;
BufferedWriter writer = new BufferedWriter(new FileWriter(outputFile + ".tmp"));
int dataCount = 0;
while (true)
{
ArrayList<Instance> data = reader.readData(15000, true, labeled, rootFirst, lowerCased, maps);
size += data.size();
if (data.size() == 0)
break;
for (Instance instance : data)
{
dataCount++;
if (dataCount % 100 == 0)
System.err.print(dataCount + " ... ");
Configuration bestParse;
if (partial)
bestParse = parsePartial(instance, instance.getSentence(), rootFirst, beamWidth, numOfThreads);
else bestParse = parse(instance.getSentence(), rootFirst, beamWidth, numOfThreads);
int[] words = instance.getSentence().getWords();
allArcs += words.length - 1;
if (addScore)
scoreList.add(bestParse.score / bestParse.sentence.size());
writeParsedSentence(writer, rootFirst, bestParse, words);
}
}
// System.err.print("\n");
long end = System.currentTimeMillis();
float each = (1.0f * (end - start)) / size;
float eacharc = (1.0f * (end - start)) / allArcs;
writer.flush();
writer.close();
// DecimalFormat format = new DecimalFormat("##.00");
//
// System.err.print(format.format(eacharc) + " ms for each arc!\n");
// System.err.print(format.format(each) + " ms for each sentence!\n\n");
BufferedReader gReader = new BufferedReader(new FileReader(inputFile));
BufferedReader pReader = new BufferedReader(new FileReader(outputFile + ".tmp"));
BufferedWriter pwriter = new BufferedWriter(new FileWriter(outputFile));
String line;
while ((line = pReader.readLine()) != null)
{
String gLine = gReader.readLine();
if (line.trim().length() > 0)
{
while (gLine.trim().length() == 0)
gLine = gReader.readLine();
String[] ps = line.split("\t");
String[] gs = gLine.split("\t");
gs[6] = ps[0];
gs[7] = ps[1];
StringBuilder output = new StringBuilder();
for (int i = 0; i < gs.length; i++)
{
output.append(gs[i]).append("\t");
}
pwriter.write(output.toString().trim() + "\n");
}
else
{
pwriter.write("\n");
}
}
pwriter.flush();
pwriter.close();
if (addScore)
{
BufferedWriter scoreWriter = new BufferedWriter(new FileWriter(scorePath));
for (int i = 0; i < scoreList.size(); i++)
scoreWriter.write(scoreList.get(i) + "\n");
scoreWriter.flush();
scoreWriter.close();
}
IOUtil.deleteFile(outputFile + ".tmp");
} | Needs Conll 2006 format
@param inputFile
@param outputFile
@param rootFirst
@param beamWidth
@throws Exception |
private static LinearModel trainNaivePerceptron(Instance[] instanceList, FeatureMap featureMap, int maxIteration)
{
LinearModel model = new LinearModel(featureMap, new float[featureMap.size()]);
for (int it = 0; it < maxIteration; ++it)
{
Utility.shuffleArray(instanceList);
for (Instance instance : instanceList)
{
int y = model.decode(instance.x);
if (y != instance.y) // 误差反馈
model.update(instance.x, instance.y);
}
}
return model;
} | 朴素感知机训练算法
@param instanceList 训练实例
@param featureMap 特征函数
@param maxIteration 训练迭代次数 |
private static LinearModel trainAveragedPerceptron(Instance[] instanceList, FeatureMap featureMap, int maxIteration)
{
float[] parameter = new float[featureMap.size()];
double[] sum = new double[featureMap.size()];
int[] time = new int[featureMap.size()];
AveragedPerceptron model = new AveragedPerceptron(featureMap, parameter);
int t = 0;
for (int it = 0; it < maxIteration; ++it)
{
Utility.shuffleArray(instanceList);
for (Instance instance : instanceList)
{
++t;
int y = model.decode(instance.x);
if (y != instance.y) // 误差反馈
model.update(instance.x, instance.y, sum, time, t);
}
}
model.average(sum, time, t);
return model;
} | 平均感知机训练算法
@param instanceList 训练实例
@param featureMap 特征函数
@param maxIteration 训练迭代次数 |
public BinaryClassificationFMeasure train(String corpus, int maxIteration, boolean averagePerceptron)
{
FeatureMap featureMap = new LockableFeatureMap(new TagSet(TaskType.CLASSIFICATION));
featureMap.mutable = true; // 训练时特征映射可拓充
Instance[] instanceList = readInstance(corpus, featureMap);
model = averagePerceptron ? trainAveragedPerceptron(instanceList, featureMap, maxIteration)
: trainNaivePerceptron(instanceList, featureMap, maxIteration);
featureMap.mutable = false; // 训练结束后特征不可写
return evaluate(instanceList);
} | 训练
@param corpus 语料库
@param maxIteration 最大迭代次数
@param averagePerceptron 是否使用平均感知机算法
@return 模型在训练集上的准确率 |
public String predict(String text)
{
int y = model.decode(extractFeature(text, model.featureMap));
if (y == -1)
y = 0;
return model.tagSet().stringOf(y);
} | 预测
@param text
@return |
public BinaryClassificationFMeasure evaluate(String corpus)
{
Instance[] instanceList = readInstance(corpus, model.featureMap);
return evaluate(instanceList);
} | 评估
@param corpus
@return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.