code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
public BinaryClassificationFMeasure evaluate(Instance[] instanceList)
{
int TP = 0, FP = 0, FN = 0;
for (Instance instance : instanceList)
{
int y = model.decode(instance.x);
if (y == 1)
{
if (instance.y == 1)
++TP;
else
++FP;
}
else if (instance.y == 1)
++FN;
}
float p = TP / (float) (TP + FP) * 100;
float r = TP / (float) (TP + FN) * 100;
return new BinaryClassificationFMeasure(p, r, 2 * p * r / (p + r));
} | 评估
@param instanceList
@return |
private Instance[] readInstance(String corpus, FeatureMap featureMap)
{
IOUtil.LineIterator lineIterator = new IOUtil.LineIterator(corpus);
List<Instance> instanceList = new LinkedList<Instance>();
for (String line : lineIterator)
{
String[] cells = line.split(",");
String text = cells[0], label = cells[1];
List<Integer> x = extractFeature(text, featureMap);
int y = featureMap.tagSet.add(label);
if (y == 0)
y = -1; // 感知机标签约定为±1
else if (y > 1)
throw new IllegalArgumentException("类别数大于2,目前只支持二分类。");
instanceList.add(new Instance(x, y));
}
return instanceList.toArray(new Instance[0]);
} | 从语料库读取实例
@param corpus 语料库
@param featureMap 特征映射
@return 数据集 |
protected static void addFeature(String feature, FeatureMap featureMap, List<Integer> featureList)
{
int featureId = featureMap.idOf(feature);
if (featureId != -1)
featureList.add(featureId);
} | 向特征向量插入特征
@param feature 特征
@param featureMap 特征映射
@param featureList 特征向量 |
public boolean add(E e)
{
if (queue.size() < maxSize)
{ // 未达到最大容量,直接添加
queue.add(e);
return true;
}
else
{ // 队列已满
E peek = queue.peek();
if (queue.comparator().compare(e, peek) > 0)
{ // 将新元素与当前堆顶元素比较,保留较小的元素
queue.poll();
queue.add(e);
return true;
}
}
return false;
} | 添加一个元素
@param e 元素
@return 是否添加成功 |
public MaxHeap<E> addAll(Collection<E> collection)
{
for (E e : collection)
{
add(e);
}
return this;
} | 添加许多元素
@param collection |
public List<E> toList()
{
ArrayList<E> list = new ArrayList<E>(queue.size());
while (!queue.isEmpty())
{
list.add(0, queue.poll());
}
return list;
} | 转为有序列表,自毁性操作
@return |
void normalize()
{
double nrm = norm();
for (Map.Entry<Integer, Double> d : entrySet())
{
d.setValue(d.getValue() / nrm);
}
} | Normalize a vector. |
void multiply_constant(double x)
{
for (Map.Entry<Integer, Double> entry : entrySet())
{
entry.setValue(entry.getValue() * x);
}
} | Multiply each value of avector by a constant value. |
void add_vector(SparseVector vec)
{
for (Map.Entry<Integer, Double> entry : vec.entrySet())
{
Double v = get(entry.getKey());
if (v == null)
v = 0.;
put(entry.getKey(), v + entry.getValue());
}
} | Add other vector. |
static double inner_product(SparseVector vec1, SparseVector vec2)
{
Iterator<Map.Entry<Integer, Double>> it;
SparseVector other;
if (vec1.size() < vec2.size())
{
it = vec1.entrySet().iterator();
other = vec2;
}
else
{
it = vec2.entrySet().iterator();
other = vec1;
}
double prod = 0;
while (it.hasNext())
{
Map.Entry<Integer, Double> entry = it.next();
prod += entry.getValue() * other.get(entry.getKey());
}
return prod;
} | Calculate the inner product value between vectors. |
double cosine(SparseVector vec1, SparseVector vec2)
{
double norm1 = vec1.norm();
double norm2 = vec2.norm();
double result = 0.0f;
if (norm1 == 0 && norm2 == 0)
{
return result;
}
else
{
double prod = inner_product(vec1, vec2);
result = prod / (norm1 * norm2);
return Double.isNaN(result) ? 0.0f : result;
}
} | Calculate the cosine value between vectors. |
public static List<Term> segment(char[] text)
{
List<Term> resultList = SEGMENT.seg(text);
ListIterator<Term> listIterator = resultList.listIterator();
while (listIterator.hasNext())
{
if (!CoreStopWordDictionary.shouldInclude(listIterator.next()))
{
listIterator.remove();
}
}
return resultList;
} | 分词
@param text 文本
@return 分词结果 |
public static List<List<Term>> seg2sentence(String text)
{
List<List<Term>> sentenceList = SEGMENT.seg2sentence(text);
for (List<Term> sentence : sentenceList)
{
ListIterator<Term> listIterator = sentence.listIterator();
while (listIterator.hasNext())
{
if (!CoreStopWordDictionary.shouldInclude(listIterator.next()))
{
listIterator.remove();
}
}
}
return sentenceList;
} | 切分为句子形式
@param text
@return |
public static List<List<Term>> seg2sentence(String text, boolean shortest)
{
return SEGMENT.seg2sentence(text, shortest);
} | 分词断句 输出句子形式
@param text 待分词句子
@param shortest 是否断句为最细的子句(将逗号也视作分隔符)
@return 句子列表,每个句子由一个单词列表组成 |
public static List<List<Term>> seg2sentence(String text, Filter... filterArrayChain)
{
List<List<Term>> sentenceList = SEGMENT.seg2sentence(text);
for (List<Term> sentence : sentenceList)
{
ListIterator<Term> listIterator = sentence.listIterator();
while (listIterator.hasNext())
{
if (filterArrayChain != null)
{
Term term = listIterator.next();
for (Filter filter : filterArrayChain)
{
if (!filter.shouldInclude(term))
{
listIterator.remove();
break;
}
}
}
}
}
return sentenceList;
} | 切分为句子形式
@param text
@param filterArrayChain 自定义过滤器链
@return |
public static List<List<Term>> seg2sentence(String text)
{
List<List<Term>> resultList = new LinkedList<List<Term>>();
{
for (String sentence : SentencesUtil.toSentenceList(text))
{
resultList.add(segment(sentence));
}
}
return resultList;
} | 切分为句子形式
@param text 文本
@return 句子列表 |
@Override
public String classify(String text) throws IllegalArgumentException, IllegalStateException
{
Map<String, Double> scoreMap = predict(text);
return CollectionUtility.max(scoreMap);
} | 使用一个训练出来的分类器来预测分类
@param text
@return
@throws IllegalArgumentException
@throws IllegalStateException |
void reduceVocab()
{
table = new int[vocabSize];
int j = 0;
for (int i = 0; i < vocabSize; i++)
{
if (vocab[i].cn > minReduce)
{
vocab[j].cn = vocab[i].cn;
vocab[j].word = vocab[i].word;
table[vocabIndexMap.get(vocab[j].word)] = j;
j++;
}
else
{
table[vocabIndexMap.get(vocab[j].word)] = -4;
}
}
// adjust the index in the cache
try
{
cache.close();
File fixingFile = new File(cacheFile.getAbsolutePath() + ".fixing");
cache = new DataOutputStream(new FileOutputStream(fixingFile));
DataInputStream oldCache = new DataInputStream(new FileInputStream(cacheFile));
while (oldCache.available() >= 4)
{
int oldId = oldCache.readInt();
if (oldId < 0)
{
cache.writeInt(oldId);
continue;
}
int id = table[oldId];
if (id == -4) continue;
cache.writeInt(id);
}
oldCache.close();
cache.close();
if (!fixingFile.renameTo(cacheFile))
{
throw new RuntimeException(String.format("moving %s to %s failed", fixingFile.getAbsolutePath(), cacheFile.getName()));
}
cache = new DataOutputStream(new FileOutputStream(cacheFile));
}
catch (IOException e)
{
throw new RuntimeException(String.format("failed to adjust cache file", e));
}
table = null;
vocabSize = j;
vocabIndexMap.clear();
for (int i = 0; i < vocabSize; i++)
{
vocabIndexMap.put(vocab[i].word, i);
}
minReduce++;
} | Reduces the vocabulary by removing infrequent tokens |
String readWord(BufferedReader raf) throws IOException
{
while (true)
{
// check the buffer first
if (wbp < wordsBuffer.length)
{
return wordsBuffer[wbp++];
}
String line = raf.readLine();
if (line == null)
{ // end of corpus
eoc = true;
return null;
}
line = line.trim();
if (line.length() == 0)
{
continue;
}
cache.writeInt(-3); // mark end of sentence
wordsBuffer = line.split("\\s+");
wbp = 0;
eoc = false;
}
} | Reads a single word from a file, assuming space + tab + EOL to be word boundaries
@param raf
@return null if EOF
@throws IOException |
static boolean loadDat(String path)
{
try
{
ByteArray byteArray = ByteArray.createByteArray(path + Predefine.BIN_EXT);
if (byteArray == null) return false;
int size = byteArray.nextInt();
CoreDictionary.Attribute[] attributes = new CoreDictionary.Attribute[size];
final Nature[] natureIndexArray = Nature.values();
for (int i = 0; i < size; ++i)
{
// 第一个是全部频次,第二个是词性个数
int currentTotalFrequency = byteArray.nextInt();
int length = byteArray.nextInt();
attributes[i] = new CoreDictionary.Attribute(length);
attributes[i].totalFrequency = currentTotalFrequency;
for (int j = 0; j < length; ++j)
{
attributes[i].nature[j] = natureIndexArray[byteArray.nextInt()];
attributes[i].frequency[j] = byteArray.nextInt();
}
}
if (!trie.load(byteArray, attributes) || byteArray.hasMore()) return false;
}
catch (Exception e)
{
logger.warning("读取失败,问题发生在" + e);
return false;
}
return true;
} | 从磁盘加载双数组
@param path
@return |
public static int getTermFrequency(String term)
{
Attribute attribute = get(term);
if (attribute == null) return 0;
return attribute.totalFrequency;
} | 获取词频
@param term
@return |
public void addTerm(String key)
{
TermFrequency value = trieSingle.get(key);
if (value == null)
{
value = new TermFrequency(key);
trieSingle.put(key, value);
}
else
{
value.increase();
}
++totalTerm;
} | 统计词频
@param key 增加一个词 |
public int getTermFrequency(String term)
{
TermFrequency termFrequency = trieSingle.get(term);
if (termFrequency == null) return 0;
return termFrequency.getValue();
} | 获取词频
@param term
@return |
public double computeLeftEntropy(PairFrequency pair)
{
Set<Map.Entry<String, TriaFrequency>> entrySet = trieTria.prefixSearch(pair.getKey() + LEFT);
return computeEntropy(entrySet);
} | 计算左熵
@param pair
@return |
public double computeRightEntropy(PairFrequency pair)
{
Set<Map.Entry<String, TriaFrequency>> entrySet = trieTria.prefixSearch(pair.getKey() + RIGHT);
return computeEntropy(entrySet);
} | 计算右熵
@param pair
@return |
public void compute()
{
entrySetPair = triePair.entrySet();
double total_mi = 0;
double total_le = 0;
double total_re = 0;
for (Map.Entry<String, PairFrequency> entry : entrySetPair)
{
PairFrequency value = entry.getValue();
value.mi = computeMutualInformation(value);
value.le = computeLeftEntropy(value);
value.re = computeRightEntropy(value);
total_mi += value.mi;
total_le += value.le;
total_re += value.re;
}
for (Map.Entry<String, PairFrequency> entry : entrySetPair)
{
PairFrequency value = entry.getValue();
value.score = value.mi / total_mi + value.le / total_le+ value.re / total_re; // 归一化
value.score *= entrySetPair.size();
}
} | 输入数据完毕,执行计算 |
public void segment(String text, List<String> output)
{
String normalized = CharTable.convert(text);
segment(text, normalized, output);
} | 中文分词
@param text
@param output |
public String[] partOfSpeechTag(List<String> wordList)
{
if (posTagger == null)
{
throw new IllegalStateException("未提供词性标注模型");
}
return tag(wordList);
} | 词性标注
@param wordList
@return |
public String[] namedEntityRecognize(String[] wordArray, String[] posArray)
{
if (neRecognizer == null)
{
throw new IllegalStateException("未提供命名实体识别模型");
}
return recognize(wordArray, posArray);
} | 命名实体识别
@param wordArray
@param posArray
@return |
public boolean learn(String segmentedTaggedSentence)
{
Sentence sentence = Sentence.create(segmentedTaggedSentence);
return learn(sentence);
} | 在线学习
@param segmentedTaggedSentence 已分词、标好词性和命名实体的人民日报2014格式的句子
@return 是否学习成果(失败的原因是句子格式不合法) |
public boolean learn(Sentence sentence)
{
CharTable.normalize(sentence);
if (!getPerceptronSegmenter().learn(sentence)) return false;
if (posTagger != null && !getPerceptronPOSTagger().learn(sentence)) return false;
if (neRecognizer != null && !getPerceptionNERecognizer().learn(sentence)) return false;
return true;
} | 在线学习
@param sentence 已分词、标好词性和命名实体的人民日报2014格式的句子
@return 是否学习成果(失败的原因是句子格式不合法) |
public String get(int x, int y)
{
if (x < 0) return HEAD + x;
if (x >= v.length) return HEAD + "+" + (x - v.length + 1);
return v[x][y];
} | 获取表中某一个元素
@param x
@param y
@return |
public boolean load(String path)
{
trie = new DoubleArrayTrie<V>();
long start = System.currentTimeMillis();
if (loadDat(ByteArray.createByteArray(path + BIN_EXT)))
{
return true;
}
TreeMap<String, V> map = new TreeMap<String, V>();
try
{
BufferedReader br = new BufferedReader(new InputStreamReader(IOUtil.newInputStream(path), "UTF-8"));
String line;
while ((line = br.readLine()) != null)
{
String[] paramArray = line.split("\\s");
map.put(paramArray[0], createValue(paramArray));
}
br.close();
}
catch (Exception e)
{
logger.warning("读取" + path + "失败" + e);
return false;
}
onLoaded(map);
Set<Map.Entry<String, V>> entrySet = map.entrySet();
List<String> keyList = new ArrayList<String>(entrySet.size());
List<V> valueList = new ArrayList<V>(entrySet.size());
for (Map.Entry<String, V> entry : entrySet)
{
keyList.add(entry.getKey());
valueList.add(entry.getValue());
}
int resultCode = trie.build(keyList, valueList);
if (resultCode != 0)
{
logger.warning("trie建立失败");
return false;
}
logger.info(path + "加载成功,耗时" + (System.currentTimeMillis() - start) + "ms");
saveDat(path + BIN_EXT, valueList);
return true;
} | 从txt路径加载
@param path
@return |
protected boolean loadDat(ByteArray byteArray)
{
V[] valueArray = loadValueArray(byteArray);
if (valueArray == null)
{
return false;
}
return trie.load(byteArray.getBytes(), byteArray.getOffset(), valueArray);
} | 从dat路径加载
@param byteArray
@return |
protected boolean saveDat(String path, List<V> valueArray)
{
try
{
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(IOUtil.newOutputStream(path)));
out.writeInt(valueArray.size());
for (V item : valueArray)
{
saveValue(item, out);
}
trie.save(out);
out.close();
}
catch (Exception e)
{
logger.warning("保存失败" + TextUtility.exceptionToString(e));
return false;
}
return true;
} | 保存dat到路径
@param path
@param valueArray
@return |
public static boolean saveObjectTo(Object o, String path)
{
try
{
ObjectOutputStream oos = new ObjectOutputStream(IOUtil.newOutputStream(path));
oos.writeObject(o);
oos.close();
}
catch (IOException e)
{
logger.warning("在保存对象" + o + "到" + path + "时发生异常" + e);
return false;
}
return true;
} | 序列化对象
@param o
@param path
@return |
public static Object readObjectFrom(String path)
{
ObjectInputStream ois = null;
try
{
ois = new ObjectInputStream(IOUtil.newInputStream(path));
Object o = ois.readObject();
ois.close();
return o;
}
catch (Exception e)
{
logger.warning("在从" + path + "读取对象时发生异常" + e);
}
return null;
} | 反序列化对象
@param path
@return |
public static String readTxt(String path)
{
if (path == null) return null;
try
{
InputStream in = IOAdapter == null ? new FileInputStream(path) :
IOAdapter.open(path);
byte[] fileContent = new byte[in.available()];
int read = readBytesFromOtherInputStream(in, fileContent);
in.close();
// 处理 UTF-8 BOM
if (read >= 3 && fileContent[0] == -17 && fileContent[1] == -69 && fileContent[2] == -65)
return new String(fileContent, 3, fileContent.length - 3, Charset.forName("UTF-8"));
return new String(fileContent, Charset.forName("UTF-8"));
}
catch (FileNotFoundException e)
{
logger.warning("找不到" + path + e);
return null;
}
catch (IOException e)
{
logger.warning("读取" + path + "发生IO异常" + e);
return null;
}
} | 一次性读入纯文本
@param path
@return |
public static boolean saveTxt(String path, String content)
{
try
{
FileChannel fc = new FileOutputStream(path).getChannel();
fc.write(ByteBuffer.wrap(content.getBytes()));
fc.close();
}
catch (Exception e)
{
logger.throwing("IOUtil", "saveTxt", e);
logger.warning("IOUtil saveTxt 到" + path + "失败" + e.toString());
return false;
}
return true;
} | 快速保存
@param path
@param content
@return |
public static byte[] readBytes(String path)
{
try
{
if (IOAdapter == null) return readBytesFromFileInputStream(new FileInputStream(path));
InputStream is = IOAdapter.open(path);
if (is instanceof FileInputStream)
return readBytesFromFileInputStream((FileInputStream) is);
else
return readBytesFromOtherInputStream(is);
}
catch (Exception e)
{
logger.warning("读取" + path + "时发生异常" + e);
}
return null;
} | 将整个文件读取为字节数组
@param path
@return |
public static byte[] readBytesFromOtherInputStream(InputStream is) throws IOException
{
ByteArrayOutputStream data = new ByteArrayOutputStream();
int readBytes;
byte[] buffer = new byte[Math.max(is.available(), 4096)]; // 最低4KB的缓冲区
while ((readBytes = is.read(buffer, 0, buffer.length)) != -1)
{
data.write(buffer, 0, readBytes);
}
data.flush();
return data.toByteArray();
} | 将非FileInputStream的某InputStream中的全部数据读入到字节数组中
@param is
@return
@throws IOException |
public static int readBytesFromOtherInputStream(InputStream is, byte[] targetArray) throws IOException
{
assert targetArray != null;
if (targetArray.length == 0) return 0;
int len;
int off = 0;
while (off < targetArray.length && (len = is.read(targetArray, off, targetArray.length - off)) != -1)
{
off += len;
}
return off;
} | 从InputStream读取指定长度的字节出来
@param is 流
@param targetArray output
@return 实际读取了多少字节,返回0表示遇到了文件尾部
@throws IOException |
public static LinkedList<String> readLineListWithLessMemory(String path)
{
LinkedList<String> result = new LinkedList<String>();
String line = null;
boolean first = true;
try
{
BufferedReader bw = new BufferedReader(new InputStreamReader(IOUtil.newInputStream(path), "UTF-8"));
while ((line = bw.readLine()) != null)
{
if (first)
{
first = false;
if (!line.isEmpty() && line.charAt(0) == '\uFEFF')
line = line.substring(1);
}
result.add(line);
}
bw.close();
}
catch (Exception e)
{
logger.warning("加载" + path + "失败," + e);
}
return result;
} | 用省内存的方式读取大文件
@param path
@return |
public static String dirname(String path)
{
int index = path.lastIndexOf('/');
if (index == -1) return path;
return path.substring(0, index + 1);
} | 获取文件所在目录的路径
@param path
@return |
public static String removeUTF8BOM(String line)
{
if (line != null && line.startsWith("\uFEFF")) // UTF-8 byte order mark (EF BB BF)
{
line = line.substring(1);
}
return line;
} | 去除文件第一行中的UTF8 BOM<br>
这是Java的bug,且官方不会修复。参考 https://stackoverflow.com/questions/4897876/reading-utf-8-bom-marker
@param line 文件第一行
@return 去除BOM的部分 |
public static List<File> fileList(String path)
{
List<File> fileList = new LinkedList<File>();
File folder = new File(path);
if (folder.isDirectory())
enumerate(folder, fileList);
else
fileList.add(folder); // 兼容路径为文件的情况
return fileList;
} | 递归遍历获取目录下的所有文件
@param path 根目录
@return 文件列表 |
private static void enumerate(File folder, List<File> fileList)
{
File[] fileArray = folder.listFiles();
if (fileArray != null)
{
for (File file : fileArray)
{
if (file.isFile() && !file.getName().startsWith(".")) // 过滤隐藏文件
{
fileList.add(file);
}
else
{
enumerate(file, fileList);
}
}
}
} | 递归遍历目录
@param folder 目录
@param fileList 储存文件 |
public static BufferedWriter newBufferedWriter(String path) throws IOException
{
return new BufferedWriter(new OutputStreamWriter(IOUtil.newOutputStream(path), "UTF-8"));
} | 创建一个BufferedWriter
@param path
@return
@throws FileNotFoundException
@throws UnsupportedEncodingException |
public static BufferedReader newBufferedReader(String path) throws IOException
{
return new BufferedReader(new InputStreamReader(IOUtil.newInputStream(path), "UTF-8"));
} | 创建一个BufferedReader
@param path
@return
@throws FileNotFoundException
@throws UnsupportedEncodingException |
public static InputStream newInputStream(String path) throws IOException
{
if (IOAdapter == null) return new FileInputStream(path);
return IOAdapter.open(path);
} | 创建输入流(经过IO适配器创建)
@param path
@return
@throws IOException |
public static OutputStream newOutputStream(String path) throws IOException
{
if (IOAdapter == null) return new FileOutputStream(path);
return IOAdapter.create(path);
} | 创建输出流(经过IO适配器创建)
@param path
@return
@throws IOException |
public static String getSuffix(String name, String delimiter)
{
return name.substring(name.lastIndexOf(delimiter) + 1);
} | 获取最后一个分隔符的后缀
@param name
@param delimiter
@return |
public static void writeLine(BufferedWriter bw, String... params) throws IOException
{
for (int i = 0; i < params.length - 1; i++)
{
bw.write(params[i]);
bw.write('\t');
}
bw.write(params[params.length - 1]);
} | 写数组,用制表符分割
@param bw
@param params
@throws IOException |
public static TreeMap<String, CoreDictionary.Attribute> loadDictionary(String... pathArray) throws IOException
{
TreeMap<String, CoreDictionary.Attribute> map = new TreeMap<String, CoreDictionary.Attribute>();
for (String path : pathArray)
{
File file = new File(path);
String fileName = file.getName();
int natureIndex = fileName.lastIndexOf(' ');
Nature defaultNature = Nature.n;
if (natureIndex > 0)
{
String natureString = fileName.substring(natureIndex + 1);
path = file.getParent() + File.separator + fileName.substring(0, natureIndex);
if (natureString.length() > 0 && !natureString.endsWith(".txt") && !natureString.endsWith(".csv"))
{
defaultNature = Nature.create(natureString);
}
}
BufferedReader br = new BufferedReader(new InputStreamReader(IOUtil.newInputStream(path), "UTF-8"));
loadDictionary(br, map, path.endsWith(".csv"), defaultNature);
}
return map;
} | 加载词典,词典必须遵守HanLP核心词典格式
@param pathArray 词典路径,可以有任意个。每个路径支持用空格表示默认词性,比如“全国地名大全.txt ns”
@return 一个储存了词条的map
@throws IOException 异常表示加载失败 |
public static void loadDictionary(BufferedReader br, TreeMap<String, CoreDictionary.Attribute> storage, boolean isCSV, Nature defaultNature) throws IOException
{
String splitter = "\\s";
if (isCSV)
{
splitter = ",";
}
String line;
boolean firstLine = true;
while ((line = br.readLine()) != null)
{
if (firstLine)
{
line = IOUtil.removeUTF8BOM(line);
firstLine = false;
}
String param[] = line.split(splitter);
int natureCount = (param.length - 1) / 2;
CoreDictionary.Attribute attribute;
if (natureCount == 0)
{
attribute = new CoreDictionary.Attribute(defaultNature);
}
else
{
attribute = new CoreDictionary.Attribute(natureCount);
for (int i = 0; i < natureCount; ++i)
{
attribute.nature[i] = LexiconUtility.convertStringToNature(param[1 + 2 * i]);
attribute.frequency[i] = Integer.parseInt(param[2 + 2 * i]);
attribute.totalFrequency += attribute.frequency[i];
}
}
storage.put(param[0], attribute);
}
br.close();
} | 将一个BufferedReader中的词条加载到词典
@param br 源
@param storage 储存位置
@throws IOException 异常表示加载失败 |
public static boolean isFileExisted(String path)
{
File file = new File(path);
return file.isFile() && file.exists();
} | 本地文件是否存在
@param path
@return |
public Edge getEdge(Node from, Node to)
{
// 首先尝试词+词
Attribute attribute = get(from.compiledWord, to.compiledWord);
if (attribute == null) attribute = get(from.compiledWord, WordNatureWeightModelMaker.wrapTag(to.label));
if (attribute == null) attribute = get(WordNatureWeightModelMaker.wrapTag(from.label), to.compiledWord);
if (attribute == null) attribute = get(WordNatureWeightModelMaker.wrapTag(from.label), WordNatureWeightModelMaker.wrapTag(to.label));
if (attribute == null)
{
attribute = Attribute.NULL;
}
if (HanLP.Config.DEBUG)
{
System.out.println(from + " 到 " + to + " : " + attribute);
}
return new Edge(from.id, to.id, attribute.dependencyRelation[0], attribute.p[0]);
} | 打分
@param from
@param to
@return |
public int add(K key)
{
int[] f = get(key);
if (f == null)
{
f = new int[]{1};
put(key, f);
}
else ++f[0];
return f[0];
} | 增加一个词的词频
@param key
@return |
public List<Map.Entry<String, Float>> analogy(String A, String B, String C)
{
return analogy(A, B, C, 10);
} | 返回跟 A - B + C 最相似的词语,比如 中国 - 北京 + 东京 = 日本。输入顺序按照 中国 北京 东京
@param A 做加法的词语
@param B 做减法的词语
@param C 做加法的词语
@return 与(A - B + C)语义距离最近的词语及其相似度列表 |
public List<Map.Entry<String, Float>> analogy(String A, String B, String C, int size)
{
Vector a = storage.get(A);
Vector b = storage.get(B);
Vector c = storage.get(C);
if (a == null || b == null || c == null)
{
return Collections.emptyList();
}
List<Map.Entry<String, Float>> resultList = nearest(a.minus(b).add(c), size + 3);
ListIterator<Map.Entry<String, Float>> listIterator = resultList.listIterator();
while (listIterator.hasNext())
{
String key = listIterator.next().getKey();
if (key.equals(A) || key.equals(B) || key.equals(C))
{
listIterator.remove();
}
}
if (resultList.size() > size)
{
resultList = resultList.subList(0, size);
}
return resultList;
} | 返回跟 A - B + C 最相似的词语,比如 中国 - 北京 + 东京 = 日本。输入顺序按照 中国 北京 东京
@param A 做加法的词语
@param B 做减法的词语
@param C 做加法的词语
@param size topN个
@return 与(A - B + C)语义距离最近的词语及其相似度列表 |
private static String[][] resizeArray(String[][] array, int size)
{
if (array.length == size) return array;
String[][] nArray = new String[size][];
System.arraycopy(array, 0, nArray, 0, size);
return nArray;
} | 数组减肥,原子分词可能会导致表格比原来的短
@param array
@param size
@return |
public static List<String> to863(List<Term> termList)
{
List<String> posTagList = new ArrayList<String>(termList.size());
for (Term term : termList)
{
String posTag = posConverter.get(term.nature.toString());
if (posTag == null)
posTag = term.nature.toString();
posTagList.add(posTag);
}
return posTagList;
} | 转为863标注集<br>
863词性标注集,其各个词性含义如下表:
<p>
Tag Description Example Tag Description Example
a adjective 美丽 ni organization name 保险公司
b other noun-modifier 大型, 西式 nl location noun 城郊
c conjunction 和, 虽然 ns geographical name 北京
d adverb 很 nt temporal noun 近日, 明代
e exclamation 哎 nz other proper noun 诺贝尔奖
g morpheme 茨, 甥 o onomatopoeia 哗啦
h prefix 阿, 伪 p preposition 在, 把
i idiom 百花齐放 q quantity 个
j abbreviation 公检法 r pronoun 我们
k suffix 界, 率 u auxiliary 的, 地
m number 一, 第一 v verb 跑, 学习
n general noun 苹果 wp punctuation ,。!
nd direction noun 右侧 ws foreign words CPU
nh person name 杜甫, 汤姆 x non-lexeme 萄, 翱
@param termList
@return |
public static float evaluate(POSTagger tagger, String corpus)
{
int correct = 0, total = 0;
IOUtil.LineIterator lineIterator = new IOUtil.LineIterator(corpus);
for (String line : lineIterator)
{
Sentence sentence = Sentence.create(line);
if (sentence == null) continue;
String[][] wordTagArray = sentence.toWordTagArray();
String[] prediction = tagger.tag(wordTagArray[0]);
assert prediction.length == wordTagArray[1].length;
total += prediction.length;
for (int i = 0; i < prediction.length; i++)
{
if (prediction[i].equals(wordTagArray[1][i]))
++correct;
}
}
if (total == 0) return 0;
return correct / (float) total * 100;
} | 评估词性标注器的准确率
@param tagger 词性标注器
@param corpus 测试集
@return Accuracy百分比 |
public static PairFrequency create(String first, char delimiter ,String second)
{
PairFrequency pairFrequency = new PairFrequency(first + delimiter + second);
pairFrequency.first = first;
pairFrequency.delimiter = delimiter;
pairFrequency.second = second;
return pairFrequency;
} | 构造一个pf
@param first
@param delimiter
@param second
@return |
final public Vector vector(K key)
{
Vector vector = storage.get(key);
if (vector == null) return null;
return vector;
} | 获取一个键的向量(键不会被预处理)
@param key 键
@return 向量 |
public float similarity(K what, K with)
{
Vector vectorWhat = storage.get(what);
if (vectorWhat == null)
{
return -1f;
}
Vector vectorWith = storage.get(with);
if (vectorWith == null)
{
return -1f;
}
return vectorWhat.cosineForUnitVector(vectorWith);
} | 余弦相似度
@param what 一个词
@param with 另一个词
@return 余弦相似度 |
public List<Map.Entry<K, Float>> nearest(K key, int size)
{
Vector vector = storage.get(key);
if (vector == null)
{
return Collections.emptyList();
}
return nearest(key, vector, size);
} | 查询与key最相似的元素
@param key 键
@param size topN个
@return 键值对列表, 键是相似词语, 值是相似度, 按相似度降序排列 |
public List<Map.Entry<K, Float>> nearest(Vector vector, int size)
{
MaxHeap<Map.Entry<K, Float>> maxHeap = new MaxHeap<Map.Entry<K, Float>>(size, new Comparator<Map.Entry<K, Float>>()
{
@Override
public int compare(Map.Entry<K, Float> o1, Map.Entry<K, Float> o2)
{
return o1.getValue().compareTo(o2.getValue());
}
});
for (Map.Entry<K, Vector> entry : storage.entrySet())
{
maxHeap.add(new AbstractMap.SimpleEntry<K, Float>(entry.getKey(), entry.getValue().cosineForUnitVector(vector)));
}
return maxHeap.toList();
} | 获取与向量最相似的词语
@param vector 向量
@param size topN个
@return 键值对列表, 键是相似词语, 值是相似度, 按相似度降序排列 |
public List<Map.Entry<K, Float>> nearest(Vector vector)
{
return nearest(vector, 10);
} | 获取与向量最相似的词语(默认10个)
@param vector 向量
@return 键值对列表, 键是相似词语, 值是相似度, 按相似度降序排列 |
public List<Map.Entry<K, Float>> nearest(K key)
{
return nearest(key, 10);
} | 查询与词语最相似的词语
@param key 词语
@return 键值对列表, 键是相似词语, 值是相似度, 按相似度降序排列 |
final List<Map.Entry<K, Float>> queryNearest(String query, int size)
{
if (query == null || query.length() == 0)
{
return Collections.emptyList();
}
try
{
return nearest(query(query), size);
}
catch (Exception e)
{
return Collections.emptyList();
}
} | 执行查询最相似的对象(子类通过query方法决定如何解析query,然后通过此方法执行查询)
@param query 查询语句(或者说一个对象的内容)
@param size 需要返回前多少个对象
@return |
public int dimension()
{
if (storage == null || storage.isEmpty())
{
return 0;
}
return storage.values().iterator().next().size();
} | 模型中的词向量维度
@return |
public final List<Pair<String, Double>> predict(String[] context)
{
List<Pair<String, Double>> result = new ArrayList<Pair<String, Double>>(outcomeNames.length);
double[] p = eval(context);
for (int i = 0; i < p.length; ++i)
{
result.add(new Pair<String, Double>(outcomeNames[i], p[i]));
}
return result;
} | 预测分布
@param context
@return |
public final Pair<String, Double> predictBest(String[] context)
{
List<Pair<String, Double>> resultList = predict(context);
double bestP = -1.0;
Pair<String, Double> bestPair = null;
for (Pair<String, Double> pair : resultList)
{
if (pair.getSecond() > bestP)
{
bestP = pair.getSecond();
bestPair = pair;
}
}
return bestPair;
} | 预测概率最高的分类
@param context
@return |
public final List<Pair<String, Double>> predict(Collection<String> context)
{
return predict(context.toArray(new String[0]));
} | 预测分布
@param context
@return |
public final double[] eval(String[] context, double[] outsums)
{
assert context != null;
int[] scontexts = new int[context.length];
for (int i = 0; i < context.length; i++)
{
Integer ci = pmap.get(context[i]);
scontexts[i] = ci == null ? -1 : ci;
}
prior.logPrior(outsums);
return eval(scontexts, outsums, evalParams);
} | 预测分布
@param context 环境
@param outsums 先验分布
@return 概率数组 |
public static double[] eval(int[] context, double[] prior, EvalParameters model)
{
Context[] params = model.getParams();
int numfeats[] = new int[model.getNumOutcomes()];
int[] activeOutcomes;
double[] activeParameters;
double value = 1;
for (int ci = 0; ci < context.length; ci++)
{
if (context[ci] >= 0)
{
Context predParams = params[context[ci]];
activeOutcomes = predParams.getOutcomes();
activeParameters = predParams.getParameters();
for (int ai = 0; ai < activeOutcomes.length; ai++)
{
int oid = activeOutcomes[ai];
numfeats[oid]++;
prior[oid] += activeParameters[ai] * value;
}
}
}
double normal = 0.0;
for (int oid = 0; oid < model.getNumOutcomes(); oid++)
{
if (model.getCorrectionParam() != 0)
{
prior[oid] = Math
.exp(prior[oid]
* model.getConstantInverse()
+ ((1.0 - ((double) numfeats[oid] / model
.getCorrectionConstant())) * model.getCorrectionParam()));
}
else
{
prior[oid] = Math.exp(prior[oid] * model.getConstantInverse());
}
normal += prior[oid];
}
for (int oid = 0; oid < model.getNumOutcomes(); oid++)
{
prior[oid] /= normal;
}
return prior;
} | 预测
@param context 环境
@param prior 先验概率
@param model 特征函数
@return |
public static MaxEntModel create(String path)
{
MaxEntModel m = new MaxEntModel();
try
{
BufferedReader br = new BufferedReader(new InputStreamReader(IOUtil.newInputStream(path), "UTF-8"));
DataOutputStream out = new DataOutputStream(IOUtil.newOutputStream(path + Predefine.BIN_EXT));
br.readLine(); // type
m.correctionConstant = Integer.parseInt(br.readLine()); // correctionConstant
out.writeInt(m.correctionConstant);
m.correctionParam = Double.parseDouble(br.readLine()); // getCorrectionParameter
out.writeDouble(m.correctionParam);
// label
int numOutcomes = Integer.parseInt(br.readLine());
out.writeInt(numOutcomes);
String[] outcomeLabels = new String[numOutcomes];
m.outcomeNames = outcomeLabels;
for (int i = 0; i < numOutcomes; i++)
{
outcomeLabels[i] = br.readLine();
TextUtility.writeString(outcomeLabels[i], out);
}
// pattern
int numOCTypes = Integer.parseInt(br.readLine());
out.writeInt(numOCTypes);
int[][] outcomePatterns = new int[numOCTypes][];
for (int i = 0; i < numOCTypes; i++)
{
StringTokenizer tok = new StringTokenizer(br.readLine(), " ");
int[] infoInts = new int[tok.countTokens()];
out.writeInt(infoInts.length);
for (int j = 0; tok.hasMoreTokens(); j++)
{
infoInts[j] = Integer.parseInt(tok.nextToken());
out.writeInt(infoInts[j]);
}
outcomePatterns[i] = infoInts;
}
// feature
int NUM_PREDS = Integer.parseInt(br.readLine());
out.writeInt(NUM_PREDS);
String[] predLabels = new String[NUM_PREDS];
m.pmap = new DoubleArrayTrie<Integer>();
TreeMap<String, Integer> tmpMap = new TreeMap<String, Integer>();
for (int i = 0; i < NUM_PREDS; i++)
{
predLabels[i] = br.readLine();
assert !tmpMap.containsKey(predLabels[i]) : "重复的键: " + predLabels[i] + " 请使用 -Dfile.encoding=UTF-8 训练";
TextUtility.writeString(predLabels[i], out);
tmpMap.put(predLabels[i], i);
}
m.pmap.build(tmpMap);
for (Map.Entry<String, Integer> entry : tmpMap.entrySet())
{
out.writeInt(entry.getValue());
}
m.pmap.save(out);
// params
Context[] params = new Context[NUM_PREDS];
int pid = 0;
for (int i = 0; i < outcomePatterns.length; i++)
{
int[] outcomePattern = new int[outcomePatterns[i].length - 1];
for (int k = 1; k < outcomePatterns[i].length; k++)
{
outcomePattern[k - 1] = outcomePatterns[i][k];
}
for (int j = 0; j < outcomePatterns[i][0]; j++)
{
double[] contextParameters = new double[outcomePatterns[i].length - 1];
for (int k = 1; k < outcomePatterns[i].length; k++)
{
contextParameters[k - 1] = Double.parseDouble(br.readLine());
out.writeDouble(contextParameters[k - 1]);
}
params[pid] = new Context(outcomePattern, contextParameters);
pid++;
}
}
// prior
m.prior = new UniformPrior();
m.prior.setLabels(outcomeLabels);
// eval
m.evalParams = new EvalParameters(params, m.correctionParam, m.correctionConstant, outcomeLabels.length);
out.close();
}
catch (Exception e)
{
logger.severe("从" + path + "加载最大熵模型失败!" + TextUtility.exceptionToString(e));
return null;
}
return m;
} | 从文件加载,同时缓存为二进制文件
@param path
@return |
public static MaxEntModel create(ByteArray byteArray)
{
MaxEntModel m = new MaxEntModel();
m.correctionConstant = byteArray.nextInt(); // correctionConstant
m.correctionParam = byteArray.nextDouble(); // getCorrectionParameter
// label
int numOutcomes = byteArray.nextInt();
String[] outcomeLabels = new String[numOutcomes];
m.outcomeNames = outcomeLabels;
for (int i = 0; i < numOutcomes; i++) outcomeLabels[i] = byteArray.nextString();
// pattern
int numOCTypes = byteArray.nextInt();
int[][] outcomePatterns = new int[numOCTypes][];
for (int i = 0; i < numOCTypes; i++)
{
int length = byteArray.nextInt();
int[] infoInts = new int[length];
for (int j = 0; j < length; j++)
{
infoInts[j] = byteArray.nextInt();
}
outcomePatterns[i] = infoInts;
}
// feature
int NUM_PREDS = byteArray.nextInt();
String[] predLabels = new String[NUM_PREDS];
m.pmap = new DoubleArrayTrie<Integer>();
for (int i = 0; i < NUM_PREDS; i++)
{
predLabels[i] = byteArray.nextString();
}
Integer[] v = new Integer[NUM_PREDS];
for (int i = 0; i < v.length; i++)
{
v[i] = byteArray.nextInt();
}
m.pmap.load(byteArray, v);
// params
Context[] params = new Context[NUM_PREDS];
int pid = 0;
for (int i = 0; i < outcomePatterns.length; i++)
{
int[] outcomePattern = new int[outcomePatterns[i].length - 1];
for (int k = 1; k < outcomePatterns[i].length; k++)
{
outcomePattern[k - 1] = outcomePatterns[i][k];
}
for (int j = 0; j < outcomePatterns[i][0]; j++)
{
double[] contextParameters = new double[outcomePatterns[i].length - 1];
for (int k = 1; k < outcomePatterns[i].length; k++)
{
contextParameters[k - 1] = byteArray.nextDouble();
}
params[pid] = new Context(outcomePattern, contextParameters);
pid++;
}
}
// prior
m.prior = new UniformPrior();
m.prior.setLabels(outcomeLabels);
// eval
m.evalParams = new EvalParameters(params, m.correctionParam, m.correctionConstant, outcomeLabels.length);
return m;
} | 从字节流快速加载
@param byteArray
@return |
public static MaxEntModel load(String txtPath)
{
ByteArray byteArray = ByteArray.createByteArray(txtPath + Predefine.BIN_EXT);
if (byteArray != null) return create(byteArray);
return create(txtPath);
} | 加载最大熵模型<br>
如果存在缓存的话,优先读取缓存,否则读取txt,并且建立缓存
@param txtPath txt的路径,即使不存在.txt,只存在.bin,也应传入txt的路径,方法内部会自动加.bin后缀
@return |
void set_composite_vector()
{
composite_.clear();
for (Document<K> document : documents_)
{
composite_.add_vector(document.feature());
}
} | Add the vectors of all documents to a composite vector. |
void clear()
{
documents_.clear();
composite_.clear();
if (centroid_ != null)
centroid_.clear();
if (sectioned_clusters_ != null)
sectioned_clusters_.clear();
sectioned_gain_ = 0.0;
} | Clear status. |
SparseVector centroid_vector()
{
if (documents_.size() > 0 && composite_.size() == 0)
set_composite_vector();
centroid_ = (SparseVector) composite_vector().clone();
centroid_.normalize();
return centroid_;
} | Get the pointer of a centroid vector.
@return the pointer of a centroid vector |
void add_document(Document doc)
{
doc.feature().normalize();
documents_.add(doc);
composite_.add_vector(doc.feature());
} | Add a document.
@param doc the pointer of a document object |
void remove_document(int index)
{
ListIterator<Document<K>> listIterator = documents_.listIterator(index);
Document<K> document = listIterator.next();
listIterator.set(null);
composite_.sub_vector(document.feature());
} | Remove a document from this cluster.
@param index the index of vector container of documents |
void remove_document(Document doc)
{
for (Document<K> document : documents_)
{
if (document.equals(doc))
{
remove_document(doc);
return;
}
}
} | Remove a document from this cluster.
@param doc the pointer of a document object |
void refresh()
{
ListIterator<Document<K>> listIterator = documents_.listIterator();
while (listIterator.hasNext())
{
if (listIterator.next() == null)
listIterator.remove();
}
} | Delete removed documents from the internal container. |
void set_sectioned_gain()
{
double gain = 0.0f;
if (sectioned_gain_ == 0 && sectioned_clusters_.size() > 1)
{
for (Cluster<K> cluster : sectioned_clusters_)
{
gain += cluster.composite_vector().norm();
}
gain -= composite_.norm();
}
sectioned_gain_ = gain;
} | Set a gain when the cluster sectioned. |
void choose_smartly(int ndocs, List<Document> docs)
{
int siz = size();
double[] closest = new double[siz];
if (siz < ndocs)
ndocs = siz;
int index, count = 0;
index = random.nextInt(siz); // initial center
docs.add(documents_.get(index));
++count;
double potential = 0.0;
for (int i = 0; i < documents_.size(); i++)
{
double dist = 1.0 - SparseVector.inner_product(documents_.get(i).feature(), documents_.get(index).feature());
potential += dist;
closest[i] = dist;
}
// choose each center
while (count < ndocs)
{
double randval = random.nextDouble() * potential;
for (index = 0; index < documents_.size(); index++)
{
double dist = closest[index];
if (randval <= dist)
break;
randval -= dist;
}
if (index == documents_.size())
index--;
docs.add(documents_.get(index));
++count;
double new_potential = 0.0;
for (int i = 0; i < documents_.size(); i++)
{
double dist = 1.0 - SparseVector.inner_product(documents_.get(i).feature(), documents_.get(index).feature());
double min = closest[i];
if (dist < min)
{
closest[i] = dist;
min = dist;
}
new_potential += min;
}
potential = new_potential;
}
} | 选取初始质心
@param ndocs 质心数量
@param docs 输出到该列表中 |
void section(int nclusters)
{
if (size() < nclusters)
return;
sectioned_clusters_ = new ArrayList<Cluster<K>>(nclusters);
List<Document> centroids = new ArrayList<Document>(nclusters);
// choose_randomly(nclusters, centroids);
choose_smartly(nclusters, centroids);
for (int i = 0; i < centroids.size(); i++)
{
Cluster<K> cluster = new Cluster<K>();
sectioned_clusters_.add(cluster);
}
for (Document<K> d : documents_)
{
double max_similarity = -1.0;
int max_index = 0;
for (int j = 0; j < centroids.size(); j++)
{
double similarity = SparseVector.inner_product(d.feature(), centroids.get(j).feature());
if (max_similarity < similarity)
{
max_similarity = similarity;
max_index = j;
}
}
sectioned_clusters_.get(max_index).add_document(d);
}
} | 将本簇划分为nclusters个簇
@param nclusters |
public static String extractGivenName(String name)
{
if (name.length() <= 2)
return "_" + name.substring(name.length() - 1);
else
return name.substring(name.length() - 2);
} | 去掉姓氏,截取中国人名中的名字
@param name 姓名
@return 名 |
public static void parsePattern(List<NT> ntList, List<Vertex> vertexList, final WordNet wordNetOptimum, final WordNet wordNetAll)
{
// ListIterator<Vertex> listIterator = vertexList.listIterator();
StringBuilder sbPattern = new StringBuilder(ntList.size());
for (NT nt : ntList)
{
sbPattern.append(nt.toString());
}
String pattern = sbPattern.toString();
final Vertex[] wordArray = vertexList.toArray(new Vertex[0]);
trie.parseText(pattern, new AhoCorasickDoubleArrayTrie.IHit<String>()
{
@Override
public void hit(int begin, int end, String keyword)
{
StringBuilder sbName = new StringBuilder();
for (int i = begin; i < end; ++i)
{
sbName.append(wordArray[i].realWord);
}
String name = sbName.toString();
// 对一些bad case做出调整
if (isBadCase(name)) return;
// 正式算它是一个名字
if (HanLP.Config.DEBUG)
{
System.out.printf("识别出机构名:%s %s\n", name, keyword);
}
int offset = 0;
for (int i = 0; i < begin; ++i)
{
offset += wordArray[i].realWord.length();
}
wordNetOptimum.insert(offset, new Vertex(Predefine.TAG_GROUP, name, ATTRIBUTE, WORD_ID), wordNetAll);
}
});
} | 模式匹配
@param ntList 确定的标注序列
@param vertexList 原始的未加角色标注的序列
@param wordNetOptimum 待优化的图
@param wordNetAll |
public static void normalizeExp(Map<String, Double> predictionScores)
{
Set<Map.Entry<String, Double>> entrySet = predictionScores.entrySet();
double max = Double.NEGATIVE_INFINITY;
for (Map.Entry<String, Double> entry : entrySet)
{
max = Math.max(max, entry.getValue());
}
double sum = 0.0;
//通过减去最大值防止浮点数溢出
for (Map.Entry<String, Double> entry : entrySet)
{
Double value = Math.exp(entry.getValue() - max);
entry.setValue(value);
sum += value;
}
if (sum != 0.0)
{
for (Map.Entry<String, Double> entry : entrySet)
{
predictionScores.put(entry.getKey(), entry.getValue() / sum);
}
}
} | 使用log-sum-exp技巧来归一化一组对数值
@param predictionScores |
public static double calculateWeight(Vertex from, Vertex to)
{
int frequency = from.getAttribute().totalFrequency;
if (frequency == 0)
{
frequency = 1; // 防止发生除零错误
}
// int nTwoWordsFreq = BiGramDictionary.getBiFrequency(from.word, to.word);
int nTwoWordsFreq = CoreBiGramTableDictionary.getBiFrequency(from.wordID, to.wordID);
double value = -Math.log(dSmoothingPara * frequency / (MAX_FREQUENCY) + (1 - dSmoothingPara) * ((1 - dTemp) * nTwoWordsFreq / frequency + dTemp));
if (value < 0.0)
{
value = -value;
}
// logger.info(String.format("%5s frequency:%6d, %s nTwoWordsFreq:%3d, weight:%.2f", from.word, frequency, from.word + "@" + to.word, nTwoWordsFreq, value));
return value;
} | 从一个词到另一个词的词的花费
@param from 前面的词
@param to 后面的词
@return 分数 |
public static <TERM> Map<TERM, Double> tf(Collection<TERM> document, TfType type)
{
Map<TERM, Double> tf = new HashMap<TERM, Double>();
for (TERM term : document)
{
Double f = tf.get(term);
if (f == null) f = 0.0;
tf.put(term, f + 1);
}
if (type != TfType.NATURAL)
{
for (TERM term : tf.keySet())
{
switch (type)
{
case LOGARITHM:
tf.put(term, 1 + Math.log(tf.get(term)));
break;
case BOOLEAN:
tf.put(term, tf.get(term) == 0.0 ? 0.0 : 1.0);
break;
}
}
}
return tf;
} | 单文档词频
@param document 词袋
@param type 词频计算方式
@param <TERM> 词语类型
@return 一个包含词频的Map |
public static <TERM> Map<TERM, Double> tf(Collection<TERM> document)
{
return tf(document, TfType.NATURAL);
} | 单文档词频
@param document 词袋
@param <TERM> 词语类型
@return 一个包含词频的Map |
public static <TERM> Iterable<Map<TERM, Double>> tfs(Iterable<Collection<TERM>> documents, TfType type)
{
List<Map<TERM, Double>> tfs = new ArrayList<Map<TERM, Double>>();
for (Collection<TERM> document : documents)
{
tfs.add(tf(document, type));
}
return tfs;
} | 多文档词频
@param documents 多个文档,每个文档都是一个词袋
@param type 词频计算方式
@param <TERM> 词语类型
@return 一个包含词频的Map的列表 |
public static <TERM> Iterable<Map<TERM, Double>> tfs(Iterable<Collection<TERM>> documents)
{
return tfs(documents, TfType.NATURAL);
} | 多文档词频
@param documents 多个文档,每个文档都是一个词袋
@param <TERM> 词语类型
@return 一个包含词频的Map的列表 |
public static <TERM> Map<TERM, Double> idf(Iterable<Iterable<TERM>> documentVocabularies,
boolean smooth, boolean addOne)
{
Map<TERM, Integer> df = new HashMap<TERM, Integer>();
int d = smooth ? 1 : 0;
int a = addOne ? 1 : 0;
int n = d;
for (Iterable<TERM> documentVocabulary : documentVocabularies)
{
n += 1;
for (TERM term : documentVocabulary)
{
Integer t = df.get(term);
if (t == null) t = d;
df.put(term, t + 1);
}
}
Map<TERM, Double> idf = new HashMap<TERM, Double>();
for (Map.Entry<TERM, Integer> e : df.entrySet())
{
TERM term = e.getKey();
double f = e.getValue();
idf.put(term, Math.log(n / f) + a);
}
return idf;
} | 一系列文档的倒排词频
@param documentVocabularies 词表
@param smooth 平滑参数,视作额外有一个文档,该文档含有smooth个每个词语
@param addOne tf-idf加一平滑
@param <TERM> 词语类型
@return 一个词语->倒排文档的Map |
public static <TERM> Map<TERM, Double> idf(Iterable<Iterable<TERM>> documentVocabularies)
{
return idf(documentVocabularies, true, true);
} | 平滑处理后的一系列文档的倒排词频
@param documentVocabularies 词表
@param <TERM> 词语类型
@return 一个词语->倒排文档的Map |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.