file_name
stringlengths 6
86
| file_path
stringlengths 45
249
| content
stringlengths 47
6.26M
| file_size
int64 47
6.26M
| language
stringclasses 1
value | extension
stringclasses 1
value | repo_name
stringclasses 767
values | repo_stars
int64 8
14.4k
| repo_forks
int64 0
1.17k
| repo_open_issues
int64 0
788
| repo_created_at
stringclasses 767
values | repo_pushed_at
stringclasses 767
values |
---|---|---|---|---|---|---|---|---|---|---|---|
Achievements.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/core/constants/Achievements.java | package net.cosmosmc.mcze.core.constants;
import lombok.AllArgsConstructor;
import lombok.Getter;
@Getter
@AllArgsConstructor
public enum Achievements {
UNDEAD_SLAYER("Undead Slayer", "Kill a Zombie"),
BRAIN_EATER("Brain Eater", "Kill a Human"),
INFECTION("Infection", "Get Infected by a Zombie"),
FIRST_GAME_PLAYED("First Game", "Play 1 Game"),
LONG_TIME_PLAYER("Long Time Player", "Play 100 games");
private String name;
private String description;
} | 485 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
KitAction.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/core/constants/KitAction.java | package net.cosmosmc.mcze.core.constants;
import org.bukkit.entity.Player;
import org.bukkit.event.player.PlayerInteractEvent;
import org.bukkit.inventory.ItemStack;
public interface KitAction {
void giveKit(Player player);
void interact(PlayerInteractEvent event, Player player, ItemStack itemStack);
} | 316 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
GameState.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/core/constants/GameState.java | package net.cosmosmc.mcze.core.constants;
public enum GameState {
WAITING, STARTING, RUNNING, NUKEROOM, RESETTING
} | 120 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
InfectReason.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/core/constants/InfectReason.java | package net.cosmosmc.mcze.core.constants;
public enum InfectReason {
ZOMBIE_BITE, JOINING_LATE, DEATH
} | 108 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
KitType.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/core/constants/KitType.java | package net.cosmosmc.mcze.core.constants;
import lombok.AllArgsConstructor;
import lombok.Getter;
import net.cosmosmc.mcze.core.kits.*;
@Getter
@AllArgsConstructor
public enum KitType {
MILKMAN("MilkMan", new MilkMan()),
LEAPER("Leaper", new Leaper()),
DECOY("Decoy", new Decoy()),
FORTIFY("Fortify", new Fortify()),
ARCHER("Archer", new Archer()),
BOOMSTICK("BoomStick", new BoomStick());
private String name;
private KitAction kitAction;
} | 479 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
Ztele.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/commands/Ztele.java | package net.cosmosmc.mcze.commands;
import lombok.AllArgsConstructor;
import net.cosmosmc.mcze.ZombieEscape;
import net.cosmosmc.mcze.core.GameArena;
import net.cosmosmc.mcze.core.constants.Messages;
import org.bukkit.command.Command;
import org.bukkit.command.CommandExecutor;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.Player;
@AllArgsConstructor
public class Ztele implements CommandExecutor {
private ZombieEscape plugin;
@Override
public boolean onCommand(CommandSender commandSender, Command command, String label, String[] args) {
if (!(commandSender instanceof Player)) {
return false;
}
Player player = (Player) commandSender;
GameArena gameArena = plugin.getGameArena();
if (!gameArena.isGameRunning()) {
Messages.GAME_NOT_RUNNING.send(player);
return false;
}
if (gameArena.isHuman(player)) {
Messages.ZOMBIE_ONLY_COMMAND.send(player);
return false;
}
gameArena.teleportCheckpoint(player);
return false;
}
} | 1,107 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
Game.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/commands/Game.java | package net.cosmosmc.mcze.commands;
import net.cosmosmc.mcze.core.constants.Messages;
import net.cosmosmc.mcze.utils.GameFile;
import net.cosmosmc.mcze.utils.Levenshtein;
import net.cosmosmc.mcze.utils.Utils;
import org.bukkit.Location;
import org.bukkit.Material;
import org.bukkit.block.Block;
import org.bukkit.command.Command;
import org.bukkit.command.CommandExecutor;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.Player;
public class Game implements CommandExecutor {
/**
* Represents a Game File to edit. We apply changes
* here to assist map making and live debugging.
* NOTE: Not final because this can change.
*/
private GameFile editedFile;
/**
* This is the ingame /game command. This is used to create, remove, modify
* the Game File above.
* NOTE: We run File I/O on the MAIN thread for convenience, and to keep our
* code concise.
*/
@Override
public boolean onCommand(CommandSender commandSender, Command command, String label, String[] args) {
if (!(commandSender instanceof Player)) {
return false;
}
Player player = (Player) commandSender;
if (args.length == 1) {
switch (args[0].toLowerCase()) {
case "help":
Messages.USAGE_GAME.send(player);
case "addspawn":
int spawn = editedFile.createListLocation(player, null, "Spawns");
Messages.CREATED_SPAWN.send(player, spawn);
break;
case "checkpoint":
int checkpoint = editedFile.createListLocation(player, null, "Checkpoints");
Messages.CREATED_CHECKPOINT.send(player, checkpoint);
break;
case "nukeroom":
editedFile.getConfig().set("Nukeroom", editedFile.serializeLocation(player.getLocation()));
editedFile.saveFile();
Messages.CREATED_NUKEROOM.send(player);
break;
default:
sendUnknownCommand(player);
sendCorrection(player, args[0].toLowerCase(), new String[] { "addspawn", "checkpoint", "nukeroom" });
}
} else if (args.length == 2) {
switch (args[0].toLowerCase()) {
case "load":
load(player, args[1]);
break;
default:
sendUnknownCommand(player);
sendCorrection(player, args[0].toLowerCase(), new String[] { "load" });
}
} else if (args.length == 3) {
switch (args[0].toLowerCase()) {
case "door":
doorSubCommand(player, args);
break;
default:
sendUnknownCommand(player);
sendCorrection(player, args[0].toLowerCase(), new String[] { "door" });
}
} else if (args.length == 4) {
switch (args[0].toLowerCase()) {
case "door":
doorSubCommand(player, args);
break;
default:
sendUnknownCommand(player);
sendCorrection(player, args[0].toLowerCase(), new String[] { "door" });
}
} else {
sendUnknownCommand(player);
}
return false;
}
/**
* Sends command usage for /game to the provided player
*
* @param player the player to send the usage to
*/
private void sendUnknownCommand(Player player) { Messages.UNKNOWN_COMMAND.send(player); }
private void sendCorrection(Player player, String input, String[] options)
{
String closest = Levenshtein.getClosestString(input, options);
if(closest.equals("")) return;
Messages.CORRECTION.send(player, closest);
}
/**
* Loads/Creates a new Game File for editing.
* NOTE: The file is in the YAML format, and
* should match the name of the arena.
*
* @param player the player who ran /game
* @param input the confirmation message
*/
private void load(Player player, String input) {
input += ".yml";
editedFile = new GameFile("plugins/ZombieEscape/", input);
Messages.LOADING_FILE.send(player, input);
}
/**
* Check if the current Game File is null or not. Will
* send an error message if the file isn't loaded.
*
* @param player the player to send the message to
* @return if the edited file is null
*/
private boolean isFileNull(Player player) {
if (editedFile == null) {
Messages.GAME_FILE_NULL.send(player);
return true;
}
return false;
}
/**
* Processes all subcommands of /door. Some arguments may
* range from 3 to 4, but will never throw an exception.
*
* @param player the player to run the subcommand
* @param args the /game command arguments
*/
private void doorSubCommand(Player player, String[] args) {
switch (args[1].toLowerCase()) {
case "add":
if (!isFileNull(player)) {
addDoor(player, args[2]);
}
break;
case "timer":
if (!isFileNull(player)) {
timer(player, args);
}
break;
case "view":
if (!isFileNull(player)) {
viewFile(player, args[2]);
}
break;
case "delete":
if (!isFileNull(player)) {
deleteDoor(player, args[2]);
}
break;
case "edge":
if (!isFileNull(player)) {
doorEdge(player, args[2], args[3]);
}
break;
}
}
/**
* Creates a door with a given time in seconds.
*
* @param player the player who is setting the arena up
* @param input the time, in seconds, the door will take to open
*/
private void addDoor(Player player, String input) {
Block block = player.getEyeLocation().getBlock();
Material material = block.getType();
if (material != Material.SIGN_POST && material != Material.WALL_SIGN) {
Messages.BLOCK_NOT_SIGN.send(player);
return;
}
int seconds = Utils.getNumber(player, input);
if (seconds < 0) {
Messages.BAD_SECONDS.send(player);
return;
}
int signID = editedFile.createListLocation(player, block.getLocation(), "Doors");
editedFile.getConfig().set("Doors." + signID + ".Timer", seconds);
editedFile.saveFile();
Messages.CREATED_SIGN.send(player, signID, seconds);
}
/**
* Sets a timer for a given door.
*
* @param player the player who is setting the arena up
* @param args the time, in seconds, the door will take to open
*/
private void timer(Player player, String[] args) {
int id = Utils.getNumber(player, args[2]);
int seconds = Utils.getNumber(player, args[3]);
if (id < 0 || seconds < 0) {
Messages.POSITIVE_VALUES.send(player);
return;
}
editedFile.getConfig().set("Signs." + id + ".Timer", seconds);
editedFile.saveFile();
}
/**
* Views current information for a provided Door.
*
* @param player the player to view the information
* @param input the id of the door
*/
private void viewFile(Player player, String input) {
// TODO: Load file and view it
}
/**
* Removes a door from the game file.
*
* @param player the player who is setting the arena up
* @param input the id of the door
*/
private void deleteDoor(Player player, String input) {
// TODO: Delete door
}
/**
* Creates a door edge. This is a location that will be used
* to remove/fill in the blocks.
*
* @param player the player who is setting the arena up
* @param inputId the id of the door to modify
* @param corner the corner, 1 or 2
*/
private void doorEdge(Player player, String inputId, String corner) {
int id = Utils.getNumber(player, inputId);
int input = Utils.getNumber(player, corner);
if (id < 0) {
Messages.POSITIVE_VALUES.send(player);
return;
}
if (input < 1 || input > 2) {
Messages.DOOR_EDGE_BAD.send(player);
return;
}
Location lookingAt = player.getEyeLocation().getBlock().getLocation();
editedFile.getConfig().set("Doors." + id + ".Edge" + input, editedFile.serializeLocation(lookingAt));
editedFile.saveFile();
Messages.ADDED_CORNER.send(player, corner);
}
} | 9,029 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
SetLobbySpawn.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/commands/SetLobbySpawn.java | package net.cosmosmc.mcze.commands;
import lombok.AllArgsConstructor;
import net.cosmosmc.mcze.ZombieEscape;
import net.cosmosmc.mcze.core.constants.Messages;
import net.cosmosmc.mcze.utils.Configuration;
import org.bukkit.command.Command;
import org.bukkit.command.CommandExecutor;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.Player;
@AllArgsConstructor
public class SetLobbySpawn implements CommandExecutor {
private ZombieEscape plugin;
@Override
public boolean onCommand(CommandSender commandSender, Command command, String label, String[] args) {
if (!(commandSender instanceof Player)) {
Messages.PLAYER_ONLY_COMMAND.send(commandSender);
return false;
}
Configuration configuration = plugin.getConfiguration();
configuration.setSpawn((Player) commandSender);
return false;
}
} | 890 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
ProfileLoader.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/profiles/ProfileLoader.java | package net.cosmosmc.mcze.profiles;
import lombok.AllArgsConstructor;
import net.cosmosmc.mcze.ZombieEscape;
import net.cosmosmc.mcze.core.constants.Achievements;
import net.cosmosmc.mcze.core.constants.KitType;
import net.cosmosmc.mcze.api.events.ProfileLoadedEvent;
import org.apache.commons.lang.StringUtils;
import org.bukkit.Bukkit;
import org.bukkit.scheduler.BukkitRunnable;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@AllArgsConstructor
public class ProfileLoader extends BukkitRunnable {
private Profile profile;
private ZombieEscape plugin;
private static final String INSERT = "INSERT INTO ze_players VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE name=?";
private static final String SELECT = "SELECT zombie_kills,human_kills,points,wins,achievements,human_kit,zombie_kit FROM ze_players WHERE uuid=?";
@Override
public void run() {
Connection connection = null;
try {
connection = plugin.getHikari().getConnection();
PreparedStatement preparedStatement = connection.prepareStatement(INSERT);
preparedStatement.setString(1, profile.getUuid().toString());
preparedStatement.setString(2, profile.getName());
preparedStatement.setInt(3, 0);
preparedStatement.setInt(4, 0);
preparedStatement.setInt(5, 0);
preparedStatement.setInt(6, 0);
preparedStatement.setString(7, StringUtils.repeat("f", Achievements.values().length));
preparedStatement.setString(8, "FORTIFY");
preparedStatement.setString(9, "LEAPER");
preparedStatement.setString(10, profile.getName());
preparedStatement.execute();
preparedStatement = connection.prepareStatement(SELECT);
preparedStatement.setString(1, profile.getUuid().toString());
ResultSet resultSet = preparedStatement.executeQuery();
if (resultSet.next()) {
profile.setHumanKills(resultSet.getInt("human_kills"));
profile.setZombieKills(resultSet.getInt("zombie_kills"));
profile.setPoints(resultSet.getInt("points"));
profile.setWins(resultSet.getInt("wins"));
profile.setAchievements(getAchievements(resultSet));
profile.setHumanKit(KitType.valueOf(resultSet.getString("human_kit")));
profile.setZombieKit(KitType.valueOf(resultSet.getString("zombie_kit")));
profile.setLoaded(true);
}
preparedStatement.close();
resultSet.close();
} catch (SQLException e) {
e.printStackTrace();
} finally {
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
if (profile.isLoaded()) {
new BukkitRunnable() {
@Override
public void run() {
Bukkit.getPluginManager().callEvent(new ProfileLoadedEvent(profile));
}
}.runTask(plugin);
}
}
private char[] getAchievements(ResultSet result) throws SQLException {
char[] achieved = result.getString("achievements").toCharArray();
if (achieved.length == Achievements.values().length) {
return achieved;
}
char[] adjusted = StringUtils.repeat("f", Achievements.values().length).toCharArray();
System.arraycopy(achieved, 0, adjusted, 0, achieved.length);
return adjusted;
}
} | 3,734 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
Profile.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/profiles/Profile.java | package net.cosmosmc.mcze.profiles;
import lombok.Getter;
import lombok.Setter;
import net.cosmosmc.mcze.core.constants.Achievements;
import net.cosmosmc.mcze.core.constants.KitType;
import org.bukkit.entity.Player;
import java.util.UUID;
@Getter
@Setter
public class Profile {
private UUID uuid;
private String name;
private int zombieKills;
private int humanKills;
private int points;
private int wins;
private int gamesPlayed;
private boolean loaded;
private char[] achievements;
private KitType humanKit;
private KitType zombieKit;
public Profile(Player player) {
this.uuid = player.getUniqueId();
this.name = player.getName();
}
public void awardAchievement(Achievements achievement) {
achievements[achievement.ordinal()] = 't';
}
} | 833 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
ProfileSaver.java | /FileExtraction/Java_unseen/sgtcaze_ZombieEscape/src/main/java/net/cosmosmc/mcze/profiles/ProfileSaver.java | package net.cosmosmc.mcze.profiles;
import lombok.AllArgsConstructor;
import net.cosmosmc.mcze.ZombieEscape;
import org.bukkit.scheduler.BukkitRunnable;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
@AllArgsConstructor
public class ProfileSaver extends BukkitRunnable {
private Profile profile;
private ZombieEscape plugin;
private static final String SAVE = "UPDATE ze_players SET zombie_kills=?, human_kills=?, points=?, wins=?, achievements=?, human_kit=?, zombie_kit=? WHERE uuid=?";
@Override
public void run() {
Connection connection = null;
try {
connection = plugin.getHikari().getConnection();
PreparedStatement preparedStatement = connection.prepareStatement(SAVE);
preparedStatement.setInt(1, profile.getZombieKills());
preparedStatement.setInt(2, profile.getHumanKills());
preparedStatement.setInt(3, profile.getPoints());
preparedStatement.setInt(4, profile.getWins());
preparedStatement.setString(5, new String(profile.getAchievements()));
preparedStatement.setString(6, profile.getHumanKit().name());
preparedStatement.setString(7, profile.getZombieKit().name());
preparedStatement.setString(8, profile.getUuid().toString());
preparedStatement.execute();
preparedStatement.close();
} catch (SQLException e) {
e.printStackTrace();
} finally {
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}
} | 1,754 | Java | .java | sgtcaze/ZombieEscape | 28 | 29 | 0 | 2015-08-07T00:09:19Z | 2015-12-16T01:18:54Z |
Organization_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Organization_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Organization_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Organization.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Organization");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Organization_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,028 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Location_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Location_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Location_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Location.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Location");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Location_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,012 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Parse_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Parse_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Parse_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Parse.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Parse");
/** @generated */
final Feature casFeat_parseType;
/** @generated */
final int casFeatCode_parseType;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getParseType(int addr) {
if (featOkTst && casFeat_parseType == null)
jcas.throwFeatMissing("parseType", "opennlp.uima.Parse");
return ll_cas.ll_getStringValue(addr, casFeatCode_parseType);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setParseType(int addr, String v) {
if (featOkTst && casFeat_parseType == null)
jcas.throwFeatMissing("parseType", "opennlp.uima.Parse");
ll_cas.ll_setStringValue(addr, casFeatCode_parseType, v);}
/** @generated */
final Feature casFeat_children;
/** @generated */
final int casFeatCode_children;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getChildren(int addr) {
if (featOkTst && casFeat_children == null)
jcas.throwFeatMissing("children", "opennlp.uima.Parse");
return ll_cas.ll_getRefValue(addr, casFeatCode_children);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setChildren(int addr, int v) {
if (featOkTst && casFeat_children == null)
jcas.throwFeatMissing("children", "opennlp.uima.Parse");
ll_cas.ll_setRefValue(addr, casFeatCode_children, v);}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @return value at index i in the array
*/
public int getChildren(int addr, int i) {
if (featOkTst && casFeat_children == null)
jcas.throwFeatMissing("children", "opennlp.uima.Parse");
if (lowLevelTypeChecks)
return ll_cas.ll_getRefArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_children), i, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_children), i);
return ll_cas.ll_getRefArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_children), i);
}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @param v value to set
*/
public void setChildren(int addr, int i, int v) {
if (featOkTst && casFeat_children == null)
jcas.throwFeatMissing("children", "opennlp.uima.Parse");
if (lowLevelTypeChecks)
ll_cas.ll_setRefArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_children), i, v, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_children), i);
ll_cas.ll_setRefArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_children), i, v);
}
/** @generated */
final Feature casFeat_prob;
/** @generated */
final int casFeatCode_prob;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public double getProb(int addr) {
if (featOkTst && casFeat_prob == null)
jcas.throwFeatMissing("prob", "opennlp.uima.Parse");
return ll_cas.ll_getDoubleValue(addr, casFeatCode_prob);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setProb(int addr, double v) {
if (featOkTst && casFeat_prob == null)
jcas.throwFeatMissing("prob", "opennlp.uima.Parse");
ll_cas.ll_setDoubleValue(addr, casFeatCode_prob, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Parse_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_parseType = jcas.getRequiredFeatureDE(casType, "parseType", "uima.cas.String", featOkTst);
casFeatCode_parseType = (null == casFeat_parseType) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_parseType).getCode();
casFeat_children = jcas.getRequiredFeatureDE(casType, "children", "uima.cas.FSArray", featOkTst);
casFeatCode_children = (null == casFeat_children) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_children).getCode();
casFeat_prob = jcas.getRequiredFeatureDE(casType, "prob", "uima.cas.Double", featOkTst);
casFeatCode_prob = (null == casFeat_prob) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_prob).getCode();
}
}
| 5,333 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Sentence_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Sentence_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Sentence_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Sentence.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Sentence");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Sentence_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,012 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Time.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Time.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Time extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Time.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Time() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Time(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Time(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Time(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,955 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Person.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Person.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Person extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Person.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Person() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Person(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Person(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Person(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,967 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Location.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Location.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Location extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Location.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Location() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Location(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Location(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Location(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,979 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Token_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Token_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Token_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Token.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Token");
/** @generated */
final Feature casFeat_pos;
/** @generated */
final int casFeatCode_pos;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getPos(int addr) {
if (featOkTst && casFeat_pos == null)
jcas.throwFeatMissing("pos", "opennlp.uima.Token");
return ll_cas.ll_getStringValue(addr, casFeatCode_pos);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setPos(int addr, String v) {
if (featOkTst && casFeat_pos == null)
jcas.throwFeatMissing("pos", "opennlp.uima.Token");
ll_cas.ll_setStringValue(addr, casFeatCode_pos, v);}
/** @generated */
final Feature casFeat_tokenId;
/** @generated */
final int casFeatCode_tokenId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getTokenId(int addr) {
if (featOkTst && casFeat_tokenId == null)
jcas.throwFeatMissing("tokenId", "opennlp.uima.Token");
return ll_cas.ll_getIntValue(addr, casFeatCode_tokenId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTokenId(int addr, int v) {
if (featOkTst && casFeat_tokenId == null)
jcas.throwFeatMissing("tokenId", "opennlp.uima.Token");
ll_cas.ll_setIntValue(addr, casFeatCode_tokenId, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Token_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_pos = jcas.getRequiredFeatureDE(casType, "pos", "uima.cas.String", featOkTst);
casFeatCode_pos = (null == casFeat_pos) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_pos).getCode();
casFeat_tokenId = jcas.getRequiredFeatureDE(casType, "tokenId", "uima.cas.Integer", featOkTst);
casFeatCode_tokenId = (null == casFeat_tokenId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_tokenId).getCode();
}
}
| 3,024 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Person_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Person_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Person_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Person.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Person");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Person_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,004 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Money_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Money_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Money_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Money.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Money");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Money_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,000 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Date.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Date.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Date extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Date.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Date() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Date(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Date(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Date(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,955 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Time_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Time_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Time_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Time.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Time");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Time_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 996 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Token.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Token.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Token extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Token.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Token() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Token(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Token(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Token(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: pos
/** getter for pos - gets Part of speech
* @generated
* @return value of the feature
*/
public String getPos() {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_pos == null)
jcasType.jcas.throwFeatMissing("pos", "opennlp.uima.Token");
return jcasType.ll_cas.ll_getStringValue(addr, ((Token_Type)jcasType).casFeatCode_pos);}
/** setter for pos - sets Part of speech
* @generated
* @param v value to set into the feature
*/
public void setPos(String v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_pos == null)
jcasType.jcas.throwFeatMissing("pos", "opennlp.uima.Token");
jcasType.ll_cas.ll_setStringValue(addr, ((Token_Type)jcasType).casFeatCode_pos, v);}
//*--------------*
//* Feature: tokenId
/** getter for tokenId - gets
* @generated
* @return value of the feature
*/
public int getTokenId() {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_tokenId == null)
jcasType.jcas.throwFeatMissing("tokenId", "opennlp.uima.Token");
return jcasType.ll_cas.ll_getIntValue(addr, ((Token_Type)jcasType).casFeatCode_tokenId);}
/** setter for tokenId - sets
* @generated
* @param v value to set into the feature
*/
public void setTokenId(int v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_tokenId == null)
jcasType.jcas.throwFeatMissing("tokenId", "opennlp.uima.Token");
jcasType.ll_cas.ll_setIntValue(addr, ((Token_Type)jcasType).casFeatCode_tokenId, v);}
}
| 3,561 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Money.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Money.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Money extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Money.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Money() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Money(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Money(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Money(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,961 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Date_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Date_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Date_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Date.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Date");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Date_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 996 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Percentage_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Percentage_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Percentage_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Percentage.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Percentage");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Percentage_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,020 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Chunk.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Chunk.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Chunk extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Chunk.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Chunk() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Chunk(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Chunk(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Chunk(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: chunkType
/** getter for chunkType - gets
* @generated
* @return value of the feature
*/
public String getChunkType() {
if (Chunk_Type.featOkTst && ((Chunk_Type)jcasType).casFeat_chunkType == null)
jcasType.jcas.throwFeatMissing("chunkType", "opennlp.uima.Chunk");
return jcasType.ll_cas.ll_getStringValue(addr, ((Chunk_Type)jcasType).casFeatCode_chunkType);}
/** setter for chunkType - sets
* @generated
* @param v value to set into the feature
*/
public void setChunkType(String v) {
if (Chunk_Type.featOkTst && ((Chunk_Type)jcasType).casFeat_chunkType == null)
jcasType.jcas.throwFeatMissing("chunkType", "opennlp.uima.Chunk");
jcasType.ll_cas.ll_setStringValue(addr, ((Chunk_Type)jcasType).casFeatCode_chunkType, v);}
}
| 2,797 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Chunk_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Chunk_Type.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Chunk_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Chunk.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("opennlp.uima.Chunk");
/** @generated */
final Feature casFeat_chunkType;
/** @generated */
final int casFeatCode_chunkType;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getChunkType(int addr) {
if (featOkTst && casFeat_chunkType == null)
jcas.throwFeatMissing("chunkType", "opennlp.uima.Chunk");
return ll_cas.ll_getStringValue(addr, casFeatCode_chunkType);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setChunkType(int addr, String v) {
if (featOkTst && casFeat_chunkType == null)
jcas.throwFeatMissing("chunkType", "opennlp.uima.Chunk");
ll_cas.ll_setStringValue(addr, casFeatCode_chunkType, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Chunk_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_chunkType = jcas.getRequiredFeatureDE(casType, "chunkType", "uima.cas.String", featOkTst);
casFeatCode_chunkType = (null == casFeat_chunkType) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_chunkType).getCode();
}
}
| 2,118 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Parse.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Parse.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.cas.FSArray;
import org.apache.uima.jcas.cas.TOP;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Parse extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Parse.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Parse() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Parse(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Parse(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Parse(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: parseType
/** getter for parseType - gets Type of the parse node
* @generated
* @return value of the feature
*/
public String getParseType() {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_parseType == null)
jcasType.jcas.throwFeatMissing("parseType", "opennlp.uima.Parse");
return jcasType.ll_cas.ll_getStringValue(addr, ((Parse_Type)jcasType).casFeatCode_parseType);}
/** setter for parseType - sets Type of the parse node
* @generated
* @param v value to set into the feature
*/
public void setParseType(String v) {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_parseType == null)
jcasType.jcas.throwFeatMissing("parseType", "opennlp.uima.Parse");
jcasType.ll_cas.ll_setStringValue(addr, ((Parse_Type)jcasType).casFeatCode_parseType, v);}
//*--------------*
//* Feature: children
/** getter for children - gets Leaf nodes
* @generated
* @return value of the feature
*/
public FSArray getChildren() {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_children == null)
jcasType.jcas.throwFeatMissing("children", "opennlp.uima.Parse");
return (FSArray)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Parse_Type)jcasType).casFeatCode_children)));}
/** setter for children - sets Leaf nodes
* @generated
* @param v value to set into the feature
*/
public void setChildren(FSArray v) {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_children == null)
jcasType.jcas.throwFeatMissing("children", "opennlp.uima.Parse");
jcasType.ll_cas.ll_setRefValue(addr, ((Parse_Type)jcasType).casFeatCode_children, jcasType.ll_cas.ll_getFSRef(v));}
/** indexed getter for children - gets an indexed value - Leaf nodes
* @generated
* @param i index in the array to get
* @return value of the element at index i
*/
public TOP getChildren(int i) {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_children == null)
jcasType.jcas.throwFeatMissing("children", "opennlp.uima.Parse");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Parse_Type)jcasType).casFeatCode_children), i);
return (TOP)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Parse_Type)jcasType).casFeatCode_children), i)));}
/** indexed setter for children - sets an indexed value - Leaf nodes
* @generated
* @param i index in the array to set
* @param v value to set into the array
*/
public void setChildren(int i, TOP v) {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_children == null)
jcasType.jcas.throwFeatMissing("children", "opennlp.uima.Parse");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Parse_Type)jcasType).casFeatCode_children), i);
jcasType.ll_cas.ll_setRefArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Parse_Type)jcasType).casFeatCode_children), i, jcasType.ll_cas.ll_getFSRef(v));}
//*--------------*
//* Feature: prob
/** getter for prob - gets Leaf nodes
* @generated
* @return value of the feature
*/
public double getProb() {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_prob == null)
jcasType.jcas.throwFeatMissing("prob", "opennlp.uima.Parse");
return jcasType.ll_cas.ll_getDoubleValue(addr, ((Parse_Type)jcasType).casFeatCode_prob);}
/** setter for prob - sets Leaf nodes
* @generated
* @param v value to set into the feature
*/
public void setProb(double v) {
if (Parse_Type.featOkTst && ((Parse_Type)jcasType).casFeat_prob == null)
jcasType.jcas.throwFeatMissing("prob", "opennlp.uima.Parse");
jcasType.ll_cas.ll_setDoubleValue(addr, ((Parse_Type)jcasType).casFeatCode_prob, v);}
}
| 5,955 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Organization.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Organization.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Organization extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Organization.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Organization() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Organization(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Organization(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Organization(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 2,003 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Percentage.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Percentage.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Percentage extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Percentage.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Percentage() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Percentage(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Percentage(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Percentage(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,991 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Sentence.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/opennlp/uima/Sentence.java |
/* First created by JCasGen Thu Nov 23 14:25:03 CET 2017 */
package opennlp.uima;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Sentence extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Sentence.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Sentence() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Sentence(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Sentence(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Sentence(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,979 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Event.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Event.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Event extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Event.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Event() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Event(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Event(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Event(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: filename
/** getter for filename - gets
* @generated
* @return value of the feature
*/
public String getFilename() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getStringValue(addr, ((Event_Type)jcasType).casFeatCode_filename);}
/** setter for filename - sets
* @generated
* @param v value to set into the feature
*/
public void setFilename(String v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setStringValue(addr, ((Event_Type)jcasType).casFeatCode_filename, v);}
//*--------------*
//* Feature: sentId
/** getter for sentId - gets
* @generated
* @return value of the feature
*/
public int getSentId() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_sentId == null)
jcasType.jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getIntValue(addr, ((Event_Type)jcasType).casFeatCode_sentId);}
/** setter for sentId - sets
* @generated
* @param v value to set into the feature
*/
public void setSentId(int v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_sentId == null)
jcasType.jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setIntValue(addr, ((Event_Type)jcasType).casFeatCode_sentId, v);}
//*--------------*
//* Feature: tokId
/** getter for tokId - gets
* @generated
* @return value of the feature
*/
public int getTokId() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_tokId == null)
jcasType.jcas.throwFeatMissing("tokId", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getIntValue(addr, ((Event_Type)jcasType).casFeatCode_tokId);}
/** setter for tokId - sets
* @generated
* @param v value to set into the feature
*/
public void setTokId(int v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_tokId == null)
jcasType.jcas.throwFeatMissing("tokId", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setIntValue(addr, ((Event_Type)jcasType).casFeatCode_tokId, v);}
//*--------------*
//* Feature: eventId
/** getter for eventId - gets
* @generated
* @return value of the feature
*/
public String getEventId() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_eventId == null)
jcasType.jcas.throwFeatMissing("eventId", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getStringValue(addr, ((Event_Type)jcasType).casFeatCode_eventId);}
/** setter for eventId - sets
* @generated
* @param v value to set into the feature
*/
public void setEventId(String v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_eventId == null)
jcasType.jcas.throwFeatMissing("eventId", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setStringValue(addr, ((Event_Type)jcasType).casFeatCode_eventId, v);}
//*--------------*
//* Feature: eventInstanceId
/** getter for eventInstanceId - gets
* @generated
* @return value of the feature
*/
public int getEventInstanceId() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_eventInstanceId == null)
jcasType.jcas.throwFeatMissing("eventInstanceId", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getIntValue(addr, ((Event_Type)jcasType).casFeatCode_eventInstanceId);}
/** setter for eventInstanceId - sets
* @generated
* @param v value to set into the feature
*/
public void setEventInstanceId(int v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_eventInstanceId == null)
jcasType.jcas.throwFeatMissing("eventInstanceId", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setIntValue(addr, ((Event_Type)jcasType).casFeatCode_eventInstanceId, v);}
//*--------------*
//* Feature: aspect
/** getter for aspect - gets
* @generated
* @return value of the feature
*/
public String getAspect() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_aspect == null)
jcasType.jcas.throwFeatMissing("aspect", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getStringValue(addr, ((Event_Type)jcasType).casFeatCode_aspect);}
/** setter for aspect - sets
* @generated
* @param v value to set into the feature
*/
public void setAspect(String v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_aspect == null)
jcasType.jcas.throwFeatMissing("aspect", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setStringValue(addr, ((Event_Type)jcasType).casFeatCode_aspect, v);}
//*--------------*
//* Feature: modality
/** getter for modality - gets
* @generated
* @return value of the feature
*/
public String getModality() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_modality == null)
jcasType.jcas.throwFeatMissing("modality", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getStringValue(addr, ((Event_Type)jcasType).casFeatCode_modality);}
/** setter for modality - sets
* @generated
* @param v value to set into the feature
*/
public void setModality(String v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_modality == null)
jcasType.jcas.throwFeatMissing("modality", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setStringValue(addr, ((Event_Type)jcasType).casFeatCode_modality, v);}
//*--------------*
//* Feature: polarity
/** getter for polarity - gets
* @generated
* @return value of the feature
*/
public String getPolarity() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_polarity == null)
jcasType.jcas.throwFeatMissing("polarity", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getStringValue(addr, ((Event_Type)jcasType).casFeatCode_polarity);}
/** setter for polarity - sets
* @generated
* @param v value to set into the feature
*/
public void setPolarity(String v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_polarity == null)
jcasType.jcas.throwFeatMissing("polarity", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setStringValue(addr, ((Event_Type)jcasType).casFeatCode_polarity, v);}
//*--------------*
//* Feature: tense
/** getter for tense - gets
* @generated
* @return value of the feature
*/
public String getTense() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_tense == null)
jcasType.jcas.throwFeatMissing("tense", "de.unihd.dbs.uima.types.heideltime.Event");
return jcasType.ll_cas.ll_getStringValue(addr, ((Event_Type)jcasType).casFeatCode_tense);}
/** setter for tense - sets
* @generated
* @param v value to set into the feature
*/
public void setTense(String v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_tense == null)
jcasType.jcas.throwFeatMissing("tense", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setStringValue(addr, ((Event_Type)jcasType).casFeatCode_tense, v);}
//*--------------*
//* Feature: token
/** getter for token - gets
* @generated
* @return value of the feature
*/
public Token getToken() {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_token == null)
jcasType.jcas.throwFeatMissing("token", "de.unihd.dbs.uima.types.heideltime.Event");
return (Token)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Event_Type)jcasType).casFeatCode_token)));}
/** setter for token - sets
* @generated
* @param v value to set into the feature
*/
public void setToken(Token v) {
if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_token == null)
jcasType.jcas.throwFeatMissing("token", "de.unihd.dbs.uima.types.heideltime.Event");
jcasType.ll_cas.ll_setRefValue(addr, ((Event_Type)jcasType).casFeatCode_token, jcasType.ll_cas.ll_getFSRef(v));}
}
| 10,622 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Dct_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Dct_Type.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Dct_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Dct.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.Dct");
/** @generated */
final Feature casFeat_filename;
/** @generated */
final int casFeatCode_filename;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getFilename(int addr) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Dct");
return ll_cas.ll_getStringValue(addr, casFeatCode_filename);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFilename(int addr, String v) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Dct");
ll_cas.ll_setStringValue(addr, casFeatCode_filename, v);}
/** @generated */
final Feature casFeat_value;
/** @generated */
final int casFeatCode_value;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getValue(int addr) {
if (featOkTst && casFeat_value == null)
jcas.throwFeatMissing("value", "de.unihd.dbs.uima.types.heideltime.Dct");
return ll_cas.ll_getStringValue(addr, casFeatCode_value);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setValue(int addr, String v) {
if (featOkTst && casFeat_value == null)
jcas.throwFeatMissing("value", "de.unihd.dbs.uima.types.heideltime.Dct");
ll_cas.ll_setStringValue(addr, casFeatCode_value, v);}
/** @generated */
final Feature casFeat_timexId;
/** @generated */
final int casFeatCode_timexId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexId(int addr) {
if (featOkTst && casFeat_timexId == null)
jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Dct");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexId(int addr, String v) {
if (featOkTst && casFeat_timexId == null)
jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Dct");
ll_cas.ll_setStringValue(addr, casFeatCode_timexId, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Dct_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_filename = jcas.getRequiredFeatureDE(casType, "filename", "uima.cas.String", featOkTst);
casFeatCode_filename = (null == casFeat_filename) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_filename).getCode();
casFeat_value = jcas.getRequiredFeatureDE(casType, "value", "uima.cas.String", featOkTst);
casFeatCode_value = (null == casFeat_value) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_value).getCode();
casFeat_timexId = jcas.getRequiredFeatureDE(casType, "timexId", "uima.cas.String", featOkTst);
casFeatCode_timexId = (null == casFeat_timexId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexId).getCode();
}
}
| 4,243 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
IntervalCandidateSentence.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/IntervalCandidateSentence.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class IntervalCandidateSentence extends Sentence {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(IntervalCandidateSentence.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected IntervalCandidateSentence() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public IntervalCandidateSentence(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public IntervalCandidateSentence(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public IntervalCandidateSentence(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 2,056 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Sentence_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Sentence_Type.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Sentence_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Sentence.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.Sentence");
/** @generated */
final Feature casFeat_filename;
/** @generated */
final int casFeatCode_filename;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getFilename(int addr) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Sentence");
return ll_cas.ll_getStringValue(addr, casFeatCode_filename);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFilename(int addr, String v) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Sentence");
ll_cas.ll_setStringValue(addr, casFeatCode_filename, v);}
/** @generated */
final Feature casFeat_sentenceId;
/** @generated */
final int casFeatCode_sentenceId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getSentenceId(int addr) {
if (featOkTst && casFeat_sentenceId == null)
jcas.throwFeatMissing("sentenceId", "de.unihd.dbs.uima.types.heideltime.Sentence");
return ll_cas.ll_getIntValue(addr, casFeatCode_sentenceId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setSentenceId(int addr, int v) {
if (featOkTst && casFeat_sentenceId == null)
jcas.throwFeatMissing("sentenceId", "de.unihd.dbs.uima.types.heideltime.Sentence");
ll_cas.ll_setIntValue(addr, casFeatCode_sentenceId, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Sentence_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_filename = jcas.getRequiredFeatureDE(casType, "filename", "uima.cas.String", featOkTst);
casFeatCode_filename = (null == casFeat_filename) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_filename).getCode();
casFeat_sentenceId = jcas.getRequiredFeatureDE(casType, "sentenceId", "uima.cas.Integer", featOkTst);
casFeatCode_sentenceId = (null == casFeat_sentenceId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_sentenceId).getCode();
}
}
| 3,300 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Event_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Event_Type.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Event_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Event.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.Event");
/** @generated */
final Feature casFeat_filename;
/** @generated */
final int casFeatCode_filename;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getFilename(int addr) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getStringValue(addr, casFeatCode_filename);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFilename(int addr, String v) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setStringValue(addr, casFeatCode_filename, v);}
/** @generated */
final Feature casFeat_sentId;
/** @generated */
final int casFeatCode_sentId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getSentId(int addr) {
if (featOkTst && casFeat_sentId == null)
jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getIntValue(addr, casFeatCode_sentId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setSentId(int addr, int v) {
if (featOkTst && casFeat_sentId == null)
jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setIntValue(addr, casFeatCode_sentId, v);}
/** @generated */
final Feature casFeat_tokId;
/** @generated */
final int casFeatCode_tokId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getTokId(int addr) {
if (featOkTst && casFeat_tokId == null)
jcas.throwFeatMissing("tokId", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getIntValue(addr, casFeatCode_tokId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTokId(int addr, int v) {
if (featOkTst && casFeat_tokId == null)
jcas.throwFeatMissing("tokId", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setIntValue(addr, casFeatCode_tokId, v);}
/** @generated */
final Feature casFeat_eventId;
/** @generated */
final int casFeatCode_eventId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getEventId(int addr) {
if (featOkTst && casFeat_eventId == null)
jcas.throwFeatMissing("eventId", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getStringValue(addr, casFeatCode_eventId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setEventId(int addr, String v) {
if (featOkTst && casFeat_eventId == null)
jcas.throwFeatMissing("eventId", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setStringValue(addr, casFeatCode_eventId, v);}
/** @generated */
final Feature casFeat_eventInstanceId;
/** @generated */
final int casFeatCode_eventInstanceId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getEventInstanceId(int addr) {
if (featOkTst && casFeat_eventInstanceId == null)
jcas.throwFeatMissing("eventInstanceId", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getIntValue(addr, casFeatCode_eventInstanceId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setEventInstanceId(int addr, int v) {
if (featOkTst && casFeat_eventInstanceId == null)
jcas.throwFeatMissing("eventInstanceId", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setIntValue(addr, casFeatCode_eventInstanceId, v);}
/** @generated */
final Feature casFeat_aspect;
/** @generated */
final int casFeatCode_aspect;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getAspect(int addr) {
if (featOkTst && casFeat_aspect == null)
jcas.throwFeatMissing("aspect", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getStringValue(addr, casFeatCode_aspect);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setAspect(int addr, String v) {
if (featOkTst && casFeat_aspect == null)
jcas.throwFeatMissing("aspect", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setStringValue(addr, casFeatCode_aspect, v);}
/** @generated */
final Feature casFeat_modality;
/** @generated */
final int casFeatCode_modality;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getModality(int addr) {
if (featOkTst && casFeat_modality == null)
jcas.throwFeatMissing("modality", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getStringValue(addr, casFeatCode_modality);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setModality(int addr, String v) {
if (featOkTst && casFeat_modality == null)
jcas.throwFeatMissing("modality", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setStringValue(addr, casFeatCode_modality, v);}
/** @generated */
final Feature casFeat_polarity;
/** @generated */
final int casFeatCode_polarity;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getPolarity(int addr) {
if (featOkTst && casFeat_polarity == null)
jcas.throwFeatMissing("polarity", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getStringValue(addr, casFeatCode_polarity);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setPolarity(int addr, String v) {
if (featOkTst && casFeat_polarity == null)
jcas.throwFeatMissing("polarity", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setStringValue(addr, casFeatCode_polarity, v);}
/** @generated */
final Feature casFeat_tense;
/** @generated */
final int casFeatCode_tense;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTense(int addr) {
if (featOkTst && casFeat_tense == null)
jcas.throwFeatMissing("tense", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getStringValue(addr, casFeatCode_tense);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTense(int addr, String v) {
if (featOkTst && casFeat_tense == null)
jcas.throwFeatMissing("tense", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setStringValue(addr, casFeatCode_tense, v);}
/** @generated */
final Feature casFeat_token;
/** @generated */
final int casFeatCode_token;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getToken(int addr) {
if (featOkTst && casFeat_token == null)
jcas.throwFeatMissing("token", "de.unihd.dbs.uima.types.heideltime.Event");
return ll_cas.ll_getRefValue(addr, casFeatCode_token);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setToken(int addr, int v) {
if (featOkTst && casFeat_token == null)
jcas.throwFeatMissing("token", "de.unihd.dbs.uima.types.heideltime.Event");
ll_cas.ll_setRefValue(addr, casFeatCode_token, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Event_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_filename = jcas.getRequiredFeatureDE(casType, "filename", "uima.cas.String", featOkTst);
casFeatCode_filename = (null == casFeat_filename) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_filename).getCode();
casFeat_sentId = jcas.getRequiredFeatureDE(casType, "sentId", "uima.cas.Integer", featOkTst);
casFeatCode_sentId = (null == casFeat_sentId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_sentId).getCode();
casFeat_tokId = jcas.getRequiredFeatureDE(casType, "tokId", "uima.cas.Integer", featOkTst);
casFeatCode_tokId = (null == casFeat_tokId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_tokId).getCode();
casFeat_eventId = jcas.getRequiredFeatureDE(casType, "eventId", "uima.cas.String", featOkTst);
casFeatCode_eventId = (null == casFeat_eventId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_eventId).getCode();
casFeat_eventInstanceId = jcas.getRequiredFeatureDE(casType, "eventInstanceId", "uima.cas.Integer", featOkTst);
casFeatCode_eventInstanceId = (null == casFeat_eventInstanceId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_eventInstanceId).getCode();
casFeat_aspect = jcas.getRequiredFeatureDE(casType, "aspect", "uima.cas.String", featOkTst);
casFeatCode_aspect = (null == casFeat_aspect) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_aspect).getCode();
casFeat_modality = jcas.getRequiredFeatureDE(casType, "modality", "uima.cas.String", featOkTst);
casFeatCode_modality = (null == casFeat_modality) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_modality).getCode();
casFeat_polarity = jcas.getRequiredFeatureDE(casType, "polarity", "uima.cas.String", featOkTst);
casFeatCode_polarity = (null == casFeat_polarity) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_polarity).getCode();
casFeat_tense = jcas.getRequiredFeatureDE(casType, "tense", "uima.cas.String", featOkTst);
casFeatCode_tense = (null == casFeat_tense) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_tense).getCode();
casFeat_token = jcas.getRequiredFeatureDE(casType, "token", "de.unihd.dbs.uima.types.heideltime.Token", featOkTst);
casFeatCode_token = (null == casFeat_token) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_token).getCode();
}
}
| 11,660 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
SourceDocInfo.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/SourceDocInfo.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class SourceDocInfo extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(SourceDocInfo.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected SourceDocInfo() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public SourceDocInfo(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public SourceDocInfo(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public SourceDocInfo(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: uri
/** getter for uri - gets
* @generated
* @return value of the feature
*/
public String getUri() {
if (SourceDocInfo_Type.featOkTst && ((SourceDocInfo_Type)jcasType).casFeat_uri == null)
jcasType.jcas.throwFeatMissing("uri", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
return jcasType.ll_cas.ll_getStringValue(addr, ((SourceDocInfo_Type)jcasType).casFeatCode_uri);}
/** setter for uri - sets
* @generated
* @param v value to set into the feature
*/
public void setUri(String v) {
if (SourceDocInfo_Type.featOkTst && ((SourceDocInfo_Type)jcasType).casFeat_uri == null)
jcasType.jcas.throwFeatMissing("uri", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
jcasType.ll_cas.ll_setStringValue(addr, ((SourceDocInfo_Type)jcasType).casFeatCode_uri, v);}
//*--------------*
//* Feature: offsetInSource
/** getter for offsetInSource - gets
* @generated
* @return value of the feature
*/
public int getOffsetInSource() {
if (SourceDocInfo_Type.featOkTst && ((SourceDocInfo_Type)jcasType).casFeat_offsetInSource == null)
jcasType.jcas.throwFeatMissing("offsetInSource", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
return jcasType.ll_cas.ll_getIntValue(addr, ((SourceDocInfo_Type)jcasType).casFeatCode_offsetInSource);}
/** setter for offsetInSource - sets
* @generated
* @param v value to set into the feature
*/
public void setOffsetInSource(int v) {
if (SourceDocInfo_Type.featOkTst && ((SourceDocInfo_Type)jcasType).casFeat_offsetInSource == null)
jcasType.jcas.throwFeatMissing("offsetInSource", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
jcasType.ll_cas.ll_setIntValue(addr, ((SourceDocInfo_Type)jcasType).casFeatCode_offsetInSource, v);}
}
| 3,896 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Token_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Token_Type.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Token_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Token.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.Token");
/** @generated */
final Feature casFeat_filename;
/** @generated */
final int casFeatCode_filename;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getFilename(int addr) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Token");
return ll_cas.ll_getStringValue(addr, casFeatCode_filename);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFilename(int addr, String v) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Token");
ll_cas.ll_setStringValue(addr, casFeatCode_filename, v);}
/** @generated */
final Feature casFeat_tokenId;
/** @generated */
final int casFeatCode_tokenId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getTokenId(int addr) {
if (featOkTst && casFeat_tokenId == null)
jcas.throwFeatMissing("tokenId", "de.unihd.dbs.uima.types.heideltime.Token");
return ll_cas.ll_getIntValue(addr, casFeatCode_tokenId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTokenId(int addr, int v) {
if (featOkTst && casFeat_tokenId == null)
jcas.throwFeatMissing("tokenId", "de.unihd.dbs.uima.types.heideltime.Token");
ll_cas.ll_setIntValue(addr, casFeatCode_tokenId, v);}
/** @generated */
final Feature casFeat_sentId;
/** @generated */
final int casFeatCode_sentId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getSentId(int addr) {
if (featOkTst && casFeat_sentId == null)
jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Token");
return ll_cas.ll_getIntValue(addr, casFeatCode_sentId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setSentId(int addr, int v) {
if (featOkTst && casFeat_sentId == null)
jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Token");
ll_cas.ll_setIntValue(addr, casFeatCode_sentId, v);}
/** @generated */
final Feature casFeat_pos;
/** @generated */
final int casFeatCode_pos;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getPos(int addr) {
if (featOkTst && casFeat_pos == null)
jcas.throwFeatMissing("pos", "de.unihd.dbs.uima.types.heideltime.Token");
return ll_cas.ll_getStringValue(addr, casFeatCode_pos);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setPos(int addr, String v) {
if (featOkTst && casFeat_pos == null)
jcas.throwFeatMissing("pos", "de.unihd.dbs.uima.types.heideltime.Token");
ll_cas.ll_setStringValue(addr, casFeatCode_pos, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Token_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_filename = jcas.getRequiredFeatureDE(casType, "filename", "uima.cas.String", featOkTst);
casFeatCode_filename = (null == casFeat_filename) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_filename).getCode();
casFeat_tokenId = jcas.getRequiredFeatureDE(casType, "tokenId", "uima.cas.Integer", featOkTst);
casFeatCode_tokenId = (null == casFeat_tokenId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_tokenId).getCode();
casFeat_sentId = jcas.getRequiredFeatureDE(casType, "sentId", "uima.cas.Integer", featOkTst);
casFeatCode_sentId = (null == casFeat_sentId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_sentId).getCode();
casFeat_pos = jcas.getRequiredFeatureDE(casType, "pos", "uima.cas.String", featOkTst);
casFeatCode_pos = (null == casFeat_pos) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_pos).getCode();
}
}
| 5,247 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Timex3Interval.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Timex3Interval.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Timex3Interval extends Timex3 {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Timex3Interval.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Timex3Interval() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Timex3Interval(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Timex3Interval(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Timex3Interval(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: TimexValueEB
/** getter for TimexValueEB - gets
* @generated
* @return value of the feature
*/
public String getTimexValueEB() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueEB == null)
jcasType.jcas.throwFeatMissing("TimexValueEB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueEB);}
/** setter for TimexValueEB - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexValueEB(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueEB == null)
jcasType.jcas.throwFeatMissing("TimexValueEB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueEB, v);}
//*--------------*
//* Feature: TimexValueLE
/** getter for TimexValueLE - gets
* @generated
* @return value of the feature
*/
public String getTimexValueLE() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueLE == null)
jcasType.jcas.throwFeatMissing("TimexValueLE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueLE);}
/** setter for TimexValueLE - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexValueLE(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueLE == null)
jcasType.jcas.throwFeatMissing("TimexValueLE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueLE, v);}
//*--------------*
//* Feature: TimexValueEE
/** getter for TimexValueEE - gets
* @generated
* @return value of the feature
*/
public String getTimexValueEE() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueEE == null)
jcasType.jcas.throwFeatMissing("TimexValueEE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueEE);}
/** setter for TimexValueEE - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexValueEE(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueEE == null)
jcasType.jcas.throwFeatMissing("TimexValueEE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueEE, v);}
//*--------------*
//* Feature: TimexValueLB
/** getter for TimexValueLB - gets
* @generated
* @return value of the feature
*/
public String getTimexValueLB() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueLB == null)
jcasType.jcas.throwFeatMissing("TimexValueLB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueLB);}
/** setter for TimexValueLB - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexValueLB(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_TimexValueLB == null)
jcasType.jcas.throwFeatMissing("TimexValueLB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_TimexValueLB, v);}
//*--------------*
//* Feature: emptyValue
/** getter for emptyValue - gets
* @generated
* @return value of the feature
*/
public String getEmptyValue() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_emptyValue == null)
jcasType.jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_emptyValue);}
/** setter for emptyValue - sets
* @generated
* @param v value to set into the feature
*/
public void setEmptyValue(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_emptyValue == null)
jcasType.jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_emptyValue, v);}
//*--------------*
//* Feature: beginTimex
/** getter for beginTimex - gets
* @generated
* @return value of the feature
*/
public String getBeginTimex() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_beginTimex == null)
jcasType.jcas.throwFeatMissing("beginTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_beginTimex);}
/** setter for beginTimex - sets
* @generated
* @param v value to set into the feature
*/
public void setBeginTimex(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_beginTimex == null)
jcasType.jcas.throwFeatMissing("beginTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_beginTimex, v);}
//*--------------*
//* Feature: endTimex
/** getter for endTimex - gets
* @generated
* @return value of the feature
*/
public String getEndTimex() {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_endTimex == null)
jcasType.jcas.throwFeatMissing("endTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_endTimex);}
/** setter for endTimex - sets
* @generated
* @param v value to set into the feature
*/
public void setEndTimex(String v) {
if (Timex3Interval_Type.featOkTst && ((Timex3Interval_Type)jcasType).casFeat_endTimex == null)
jcasType.jcas.throwFeatMissing("endTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3Interval_Type)jcasType).casFeatCode_endTimex, v);}
}
| 8,795 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
IntervalCandidateSentence_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/IntervalCandidateSentence_Type.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class IntervalCandidateSentence_Type extends Sentence_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = IntervalCandidateSentence.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.IntervalCandidateSentence");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public IntervalCandidateSentence_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,072 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
SourceDocInfo_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/SourceDocInfo_Type.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class SourceDocInfo_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = SourceDocInfo.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
/** @generated */
final Feature casFeat_uri;
/** @generated */
final int casFeatCode_uri;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getUri(int addr) {
if (featOkTst && casFeat_uri == null)
jcas.throwFeatMissing("uri", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
return ll_cas.ll_getStringValue(addr, casFeatCode_uri);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setUri(int addr, String v) {
if (featOkTst && casFeat_uri == null)
jcas.throwFeatMissing("uri", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
ll_cas.ll_setStringValue(addr, casFeatCode_uri, v);}
/** @generated */
final Feature casFeat_offsetInSource;
/** @generated */
final int casFeatCode_offsetInSource;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getOffsetInSource(int addr) {
if (featOkTst && casFeat_offsetInSource == null)
jcas.throwFeatMissing("offsetInSource", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
return ll_cas.ll_getIntValue(addr, casFeatCode_offsetInSource);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setOffsetInSource(int addr, int v) {
if (featOkTst && casFeat_offsetInSource == null)
jcas.throwFeatMissing("offsetInSource", "de.unihd.dbs.uima.types.heideltime.SourceDocInfo");
ll_cas.ll_setIntValue(addr, casFeatCode_offsetInSource, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public SourceDocInfo_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_uri = jcas.getRequiredFeatureDE(casType, "uri", "uima.cas.String", featOkTst);
casFeatCode_uri = (null == casFeat_uri) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_uri).getCode();
casFeat_offsetInSource = jcas.getRequiredFeatureDE(casType, "offsetInSource", "uima.cas.Integer", featOkTst);
casFeatCode_offsetInSource = (null == casFeat_offsetInSource) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_offsetInSource).getCode();
}
}
| 3,325 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
GoldEvent.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/GoldEvent.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class GoldEvent extends Event {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(GoldEvent.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected GoldEvent() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public GoldEvent(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public GoldEvent(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public GoldEvent(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
}
| 1,957 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Token.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Token.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Token extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Token.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Token() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Token(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Token(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Token(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: filename
/** getter for filename - gets
* @generated
* @return value of the feature
*/
public String getFilename() {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Token");
return jcasType.ll_cas.ll_getStringValue(addr, ((Token_Type)jcasType).casFeatCode_filename);}
/** setter for filename - sets
* @generated
* @param v value to set into the feature
*/
public void setFilename(String v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Token");
jcasType.ll_cas.ll_setStringValue(addr, ((Token_Type)jcasType).casFeatCode_filename, v);}
//*--------------*
//* Feature: tokenId
/** getter for tokenId - gets
* @generated
* @return value of the feature
*/
public int getTokenId() {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_tokenId == null)
jcasType.jcas.throwFeatMissing("tokenId", "de.unihd.dbs.uima.types.heideltime.Token");
return jcasType.ll_cas.ll_getIntValue(addr, ((Token_Type)jcasType).casFeatCode_tokenId);}
/** setter for tokenId - sets
* @generated
* @param v value to set into the feature
*/
public void setTokenId(int v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_tokenId == null)
jcasType.jcas.throwFeatMissing("tokenId", "de.unihd.dbs.uima.types.heideltime.Token");
jcasType.ll_cas.ll_setIntValue(addr, ((Token_Type)jcasType).casFeatCode_tokenId, v);}
//*--------------*
//* Feature: sentId
/** getter for sentId - gets
* @generated
* @return value of the feature
*/
public int getSentId() {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_sentId == null)
jcasType.jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Token");
return jcasType.ll_cas.ll_getIntValue(addr, ((Token_Type)jcasType).casFeatCode_sentId);}
/** setter for sentId - sets
* @generated
* @param v value to set into the feature
*/
public void setSentId(int v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_sentId == null)
jcasType.jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Token");
jcasType.ll_cas.ll_setIntValue(addr, ((Token_Type)jcasType).casFeatCode_sentId, v);}
//*--------------*
//* Feature: pos
/** getter for pos - gets
* @generated
* @return value of the feature
*/
public String getPos() {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_pos == null)
jcasType.jcas.throwFeatMissing("pos", "de.unihd.dbs.uima.types.heideltime.Token");
return jcasType.ll_cas.ll_getStringValue(addr, ((Token_Type)jcasType).casFeatCode_pos);}
/** setter for pos - sets
* @generated
* @param v value to set into the feature
*/
public void setPos(String v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_pos == null)
jcasType.jcas.throwFeatMissing("pos", "de.unihd.dbs.uima.types.heideltime.Token");
jcasType.ll_cas.ll_setStringValue(addr, ((Token_Type)jcasType).casFeatCode_pos, v);}
}
| 5,347 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Timex3.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Timex3.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Timex3 extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Timex3.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Timex3() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Timex3(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Timex3(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Timex3(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: filename
/** getter for filename - gets
* @generated
* @return value of the feature
*/
public String getFilename() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_filename);}
/** setter for filename - sets
* @generated
* @param v value to set into the feature
*/
public void setFilename(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_filename, v);}
//*--------------*
//* Feature: sentId
/** getter for sentId - gets
* @generated
* @return value of the feature
*/
public int getSentId() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_sentId == null)
jcasType.jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getIntValue(addr, ((Timex3_Type)jcasType).casFeatCode_sentId);}
/** setter for sentId - sets
* @generated
* @param v value to set into the feature
*/
public void setSentId(int v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_sentId == null)
jcasType.jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setIntValue(addr, ((Timex3_Type)jcasType).casFeatCode_sentId, v);}
//*--------------*
//* Feature: firstTokId
/** getter for firstTokId - gets
* @generated
* @return value of the feature
*/
public int getFirstTokId() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_firstTokId == null)
jcasType.jcas.throwFeatMissing("firstTokId", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getIntValue(addr, ((Timex3_Type)jcasType).casFeatCode_firstTokId);}
/** setter for firstTokId - sets
* @generated
* @param v value to set into the feature
*/
public void setFirstTokId(int v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_firstTokId == null)
jcasType.jcas.throwFeatMissing("firstTokId", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setIntValue(addr, ((Timex3_Type)jcasType).casFeatCode_firstTokId, v);}
//*--------------*
//* Feature: allTokIds
/** getter for allTokIds - gets
* @generated
* @return value of the feature
*/
public String getAllTokIds() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_allTokIds == null)
jcasType.jcas.throwFeatMissing("allTokIds", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_allTokIds);}
/** setter for allTokIds - sets
* @generated
* @param v value to set into the feature
*/
public void setAllTokIds(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_allTokIds == null)
jcasType.jcas.throwFeatMissing("allTokIds", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_allTokIds, v);}
//*--------------*
//* Feature: timexId
/** getter for timexId - gets
* @generated
* @return value of the feature
*/
public String getTimexId() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexId == null)
jcasType.jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexId);}
/** setter for timexId - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexId(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexId == null)
jcasType.jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexId, v);}
//*--------------*
//* Feature: timexInstance
/** getter for timexInstance - gets
* @generated
* @return value of the feature
*/
public int getTimexInstance() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexInstance == null)
jcasType.jcas.throwFeatMissing("timexInstance", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getIntValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexInstance);}
/** setter for timexInstance - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexInstance(int v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexInstance == null)
jcasType.jcas.throwFeatMissing("timexInstance", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setIntValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexInstance, v);}
//*--------------*
//* Feature: timexType
/** getter for timexType - gets
* @generated
* @return value of the feature
*/
public String getTimexType() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexType == null)
jcasType.jcas.throwFeatMissing("timexType", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexType);}
/** setter for timexType - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexType(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexType == null)
jcasType.jcas.throwFeatMissing("timexType", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexType, v);}
//*--------------*
//* Feature: timexValue
/** getter for timexValue - gets
* @generated
* @return value of the feature
*/
public String getTimexValue() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexValue == null)
jcasType.jcas.throwFeatMissing("timexValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexValue);}
/** setter for timexValue - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexValue(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexValue == null)
jcasType.jcas.throwFeatMissing("timexValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexValue, v);}
//*--------------*
//* Feature: foundByRule
/** getter for foundByRule - gets
* @generated
* @return value of the feature
*/
public String getFoundByRule() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_foundByRule == null)
jcasType.jcas.throwFeatMissing("foundByRule", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_foundByRule);}
/** setter for foundByRule - sets
* @generated
* @param v value to set into the feature
*/
public void setFoundByRule(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_foundByRule == null)
jcasType.jcas.throwFeatMissing("foundByRule", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_foundByRule, v);}
//*--------------*
//* Feature: timexQuant
/** getter for timexQuant - gets
* @generated
* @return value of the feature
*/
public String getTimexQuant() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexQuant == null)
jcasType.jcas.throwFeatMissing("timexQuant", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexQuant);}
/** setter for timexQuant - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexQuant(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexQuant == null)
jcasType.jcas.throwFeatMissing("timexQuant", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexQuant, v);}
//*--------------*
//* Feature: timexFreq
/** getter for timexFreq - gets
* @generated
* @return value of the feature
*/
public String getTimexFreq() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexFreq == null)
jcasType.jcas.throwFeatMissing("timexFreq", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexFreq);}
/** setter for timexFreq - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexFreq(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexFreq == null)
jcasType.jcas.throwFeatMissing("timexFreq", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexFreq, v);}
//*--------------*
//* Feature: timexMod
/** getter for timexMod - gets
* @generated
* @return value of the feature
*/
public String getTimexMod() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexMod == null)
jcasType.jcas.throwFeatMissing("timexMod", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexMod);}
/** setter for timexMod - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexMod(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_timexMod == null)
jcasType.jcas.throwFeatMissing("timexMod", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_timexMod, v);}
//*--------------*
//* Feature: emptyValue
/** getter for emptyValue - gets attribute to hold a value for "empty" timex3 tags
* @generated
* @return value of the feature
*/
public String getEmptyValue() {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_emptyValue == null)
jcasType.jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
return jcasType.ll_cas.ll_getStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_emptyValue);}
/** setter for emptyValue - sets attribute to hold a value for "empty" timex3 tags
* @generated
* @param v value to set into the feature
*/
public void setEmptyValue(String v) {
if (Timex3_Type.featOkTst && ((Timex3_Type)jcasType).casFeat_emptyValue == null)
jcasType.jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
jcasType.ll_cas.ll_setStringValue(addr, ((Timex3_Type)jcasType).casFeatCode_emptyValue, v);}
}
| 13,628 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
GoldEvent_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/GoldEvent_Type.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class GoldEvent_Type extends Event_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = GoldEvent.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.GoldEvent");
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public GoldEvent_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
}
}
| 1,005 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Dct.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Dct.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Dct extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Dct.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Dct() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Dct(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Dct(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Dct(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: filename
/** getter for filename - gets
* @generated
* @return value of the feature
*/
public String getFilename() {
if (Dct_Type.featOkTst && ((Dct_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Dct");
return jcasType.ll_cas.ll_getStringValue(addr, ((Dct_Type)jcasType).casFeatCode_filename);}
/** setter for filename - sets
* @generated
* @param v value to set into the feature
*/
public void setFilename(String v) {
if (Dct_Type.featOkTst && ((Dct_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Dct");
jcasType.ll_cas.ll_setStringValue(addr, ((Dct_Type)jcasType).casFeatCode_filename, v);}
//*--------------*
//* Feature: value
/** getter for value - gets
* @generated
* @return value of the feature
*/
public String getValue() {
if (Dct_Type.featOkTst && ((Dct_Type)jcasType).casFeat_value == null)
jcasType.jcas.throwFeatMissing("value", "de.unihd.dbs.uima.types.heideltime.Dct");
return jcasType.ll_cas.ll_getStringValue(addr, ((Dct_Type)jcasType).casFeatCode_value);}
/** setter for value - sets
* @generated
* @param v value to set into the feature
*/
public void setValue(String v) {
if (Dct_Type.featOkTst && ((Dct_Type)jcasType).casFeat_value == null)
jcasType.jcas.throwFeatMissing("value", "de.unihd.dbs.uima.types.heideltime.Dct");
jcasType.ll_cas.ll_setStringValue(addr, ((Dct_Type)jcasType).casFeatCode_value, v);}
//*--------------*
//* Feature: timexId
/** getter for timexId - gets
* @generated
* @return value of the feature
*/
public String getTimexId() {
if (Dct_Type.featOkTst && ((Dct_Type)jcasType).casFeat_timexId == null)
jcasType.jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Dct");
return jcasType.ll_cas.ll_getStringValue(addr, ((Dct_Type)jcasType).casFeatCode_timexId);}
/** setter for timexId - sets
* @generated
* @param v value to set into the feature
*/
public void setTimexId(String v) {
if (Dct_Type.featOkTst && ((Dct_Type)jcasType).casFeat_timexId == null)
jcasType.jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Dct");
jcasType.ll_cas.ll_setStringValue(addr, ((Dct_Type)jcasType).casFeatCode_timexId, v);}
}
| 4,486 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Timex3_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Timex3_Type.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Timex3_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Timex3.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.Timex3");
/** @generated */
final Feature casFeat_filename;
/** @generated */
final int casFeatCode_filename;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getFilename(int addr) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_filename);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFilename(int addr, String v) {
if (featOkTst && casFeat_filename == null)
jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_filename, v);}
/** @generated */
final Feature casFeat_sentId;
/** @generated */
final int casFeatCode_sentId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getSentId(int addr) {
if (featOkTst && casFeat_sentId == null)
jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getIntValue(addr, casFeatCode_sentId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setSentId(int addr, int v) {
if (featOkTst && casFeat_sentId == null)
jcas.throwFeatMissing("sentId", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setIntValue(addr, casFeatCode_sentId, v);}
/** @generated */
final Feature casFeat_firstTokId;
/** @generated */
final int casFeatCode_firstTokId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getFirstTokId(int addr) {
if (featOkTst && casFeat_firstTokId == null)
jcas.throwFeatMissing("firstTokId", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getIntValue(addr, casFeatCode_firstTokId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFirstTokId(int addr, int v) {
if (featOkTst && casFeat_firstTokId == null)
jcas.throwFeatMissing("firstTokId", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setIntValue(addr, casFeatCode_firstTokId, v);}
/** @generated */
final Feature casFeat_allTokIds;
/** @generated */
final int casFeatCode_allTokIds;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getAllTokIds(int addr) {
if (featOkTst && casFeat_allTokIds == null)
jcas.throwFeatMissing("allTokIds", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_allTokIds);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setAllTokIds(int addr, String v) {
if (featOkTst && casFeat_allTokIds == null)
jcas.throwFeatMissing("allTokIds", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_allTokIds, v);}
/** @generated */
final Feature casFeat_timexId;
/** @generated */
final int casFeatCode_timexId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexId(int addr) {
if (featOkTst && casFeat_timexId == null)
jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexId(int addr, String v) {
if (featOkTst && casFeat_timexId == null)
jcas.throwFeatMissing("timexId", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_timexId, v);}
/** @generated */
final Feature casFeat_timexInstance;
/** @generated */
final int casFeatCode_timexInstance;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getTimexInstance(int addr) {
if (featOkTst && casFeat_timexInstance == null)
jcas.throwFeatMissing("timexInstance", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getIntValue(addr, casFeatCode_timexInstance);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexInstance(int addr, int v) {
if (featOkTst && casFeat_timexInstance == null)
jcas.throwFeatMissing("timexInstance", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setIntValue(addr, casFeatCode_timexInstance, v);}
/** @generated */
final Feature casFeat_timexType;
/** @generated */
final int casFeatCode_timexType;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexType(int addr) {
if (featOkTst && casFeat_timexType == null)
jcas.throwFeatMissing("timexType", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexType);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexType(int addr, String v) {
if (featOkTst && casFeat_timexType == null)
jcas.throwFeatMissing("timexType", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_timexType, v);}
/** @generated */
final Feature casFeat_timexValue;
/** @generated */
final int casFeatCode_timexValue;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexValue(int addr) {
if (featOkTst && casFeat_timexValue == null)
jcas.throwFeatMissing("timexValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexValue);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexValue(int addr, String v) {
if (featOkTst && casFeat_timexValue == null)
jcas.throwFeatMissing("timexValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_timexValue, v);}
/** @generated */
final Feature casFeat_foundByRule;
/** @generated */
final int casFeatCode_foundByRule;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getFoundByRule(int addr) {
if (featOkTst && casFeat_foundByRule == null)
jcas.throwFeatMissing("foundByRule", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_foundByRule);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setFoundByRule(int addr, String v) {
if (featOkTst && casFeat_foundByRule == null)
jcas.throwFeatMissing("foundByRule", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_foundByRule, v);}
/** @generated */
final Feature casFeat_timexQuant;
/** @generated */
final int casFeatCode_timexQuant;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexQuant(int addr) {
if (featOkTst && casFeat_timexQuant == null)
jcas.throwFeatMissing("timexQuant", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexQuant);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexQuant(int addr, String v) {
if (featOkTst && casFeat_timexQuant == null)
jcas.throwFeatMissing("timexQuant", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_timexQuant, v);}
/** @generated */
final Feature casFeat_timexFreq;
/** @generated */
final int casFeatCode_timexFreq;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexFreq(int addr) {
if (featOkTst && casFeat_timexFreq == null)
jcas.throwFeatMissing("timexFreq", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexFreq);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexFreq(int addr, String v) {
if (featOkTst && casFeat_timexFreq == null)
jcas.throwFeatMissing("timexFreq", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_timexFreq, v);}
/** @generated */
final Feature casFeat_timexMod;
/** @generated */
final int casFeatCode_timexMod;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexMod(int addr) {
if (featOkTst && casFeat_timexMod == null)
jcas.throwFeatMissing("timexMod", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_timexMod);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexMod(int addr, String v) {
if (featOkTst && casFeat_timexMod == null)
jcas.throwFeatMissing("timexMod", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_timexMod, v);}
/** @generated */
final Feature casFeat_emptyValue;
/** @generated */
final int casFeatCode_emptyValue;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getEmptyValue(int addr) {
if (featOkTst && casFeat_emptyValue == null)
jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
return ll_cas.ll_getStringValue(addr, casFeatCode_emptyValue);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setEmptyValue(int addr, String v) {
if (featOkTst && casFeat_emptyValue == null)
jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3");
ll_cas.ll_setStringValue(addr, casFeatCode_emptyValue, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Timex3_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_filename = jcas.getRequiredFeatureDE(casType, "filename", "uima.cas.String", featOkTst);
casFeatCode_filename = (null == casFeat_filename) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_filename).getCode();
casFeat_sentId = jcas.getRequiredFeatureDE(casType, "sentId", "uima.cas.Integer", featOkTst);
casFeatCode_sentId = (null == casFeat_sentId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_sentId).getCode();
casFeat_firstTokId = jcas.getRequiredFeatureDE(casType, "firstTokId", "uima.cas.Integer", featOkTst);
casFeatCode_firstTokId = (null == casFeat_firstTokId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_firstTokId).getCode();
casFeat_allTokIds = jcas.getRequiredFeatureDE(casType, "allTokIds", "uima.cas.String", featOkTst);
casFeatCode_allTokIds = (null == casFeat_allTokIds) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_allTokIds).getCode();
casFeat_timexId = jcas.getRequiredFeatureDE(casType, "timexId", "uima.cas.String", featOkTst);
casFeatCode_timexId = (null == casFeat_timexId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexId).getCode();
casFeat_timexInstance = jcas.getRequiredFeatureDE(casType, "timexInstance", "uima.cas.Integer", featOkTst);
casFeatCode_timexInstance = (null == casFeat_timexInstance) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexInstance).getCode();
casFeat_timexType = jcas.getRequiredFeatureDE(casType, "timexType", "uima.cas.String", featOkTst);
casFeatCode_timexType = (null == casFeat_timexType) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexType).getCode();
casFeat_timexValue = jcas.getRequiredFeatureDE(casType, "timexValue", "uima.cas.String", featOkTst);
casFeatCode_timexValue = (null == casFeat_timexValue) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexValue).getCode();
casFeat_foundByRule = jcas.getRequiredFeatureDE(casType, "foundByRule", "uima.cas.String", featOkTst);
casFeatCode_foundByRule = (null == casFeat_foundByRule) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_foundByRule).getCode();
casFeat_timexQuant = jcas.getRequiredFeatureDE(casType, "timexQuant", "uima.cas.String", featOkTst);
casFeatCode_timexQuant = (null == casFeat_timexQuant) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexQuant).getCode();
casFeat_timexFreq = jcas.getRequiredFeatureDE(casType, "timexFreq", "uima.cas.String", featOkTst);
casFeatCode_timexFreq = (null == casFeat_timexFreq) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexFreq).getCode();
casFeat_timexMod = jcas.getRequiredFeatureDE(casType, "timexMod", "uima.cas.String", featOkTst);
casFeatCode_timexMod = (null == casFeat_timexMod) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timexMod).getCode();
casFeat_emptyValue = jcas.getRequiredFeatureDE(casType, "emptyValue", "uima.cas.String", featOkTst);
casFeatCode_emptyValue = (null == casFeat_emptyValue) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_emptyValue).getCode();
}
}
| 15,220 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Sentence.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Sentence.java |
/* First created by JCasGen Thu Nov 23 16:42:40 CET 2017 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Sentence extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Sentence.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Sentence() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Sentence(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Sentence(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Sentence(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: filename
/** getter for filename - gets
* @generated
* @return value of the feature
*/
public String getFilename() {
if (Sentence_Type.featOkTst && ((Sentence_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Sentence");
return jcasType.ll_cas.ll_getStringValue(addr, ((Sentence_Type)jcasType).casFeatCode_filename);}
/** setter for filename - sets
* @generated
* @param v value to set into the feature
*/
public void setFilename(String v) {
if (Sentence_Type.featOkTst && ((Sentence_Type)jcasType).casFeat_filename == null)
jcasType.jcas.throwFeatMissing("filename", "de.unihd.dbs.uima.types.heideltime.Sentence");
jcasType.ll_cas.ll_setStringValue(addr, ((Sentence_Type)jcasType).casFeatCode_filename, v);}
//*--------------*
//* Feature: sentenceId
/** getter for sentenceId - gets
* @generated
* @return value of the feature
*/
public int getSentenceId() {
if (Sentence_Type.featOkTst && ((Sentence_Type)jcasType).casFeat_sentenceId == null)
jcasType.jcas.throwFeatMissing("sentenceId", "de.unihd.dbs.uima.types.heideltime.Sentence");
return jcasType.ll_cas.ll_getIntValue(addr, ((Sentence_Type)jcasType).casFeatCode_sentenceId);}
/** setter for sentenceId - sets
* @generated
* @param v value to set into the feature
*/
public void setSentenceId(int v) {
if (Sentence_Type.featOkTst && ((Sentence_Type)jcasType).casFeat_sentenceId == null)
jcasType.jcas.throwFeatMissing("sentenceId", "de.unihd.dbs.uima.types.heideltime.Sentence");
jcasType.ll_cas.ll_setIntValue(addr, ((Sentence_Type)jcasType).casFeatCode_sentenceId, v);}
}
| 3,797 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Timex3Interval_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/de/unihd/dbs/uima/types/heideltime/Timex3Interval_Type.java |
/* First created by JCasGen Thu Jan 04 14:37:05 CET 2018 */
package de.unihd.dbs.uima.types.heideltime;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Timex3Interval_Type extends Timex3_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Timex3Interval.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("de.unihd.dbs.uima.types.heideltime.Timex3Interval");
/** @generated */
final Feature casFeat_TimexValueEB;
/** @generated */
final int casFeatCode_TimexValueEB;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexValueEB(int addr) {
if (featOkTst && casFeat_TimexValueEB == null)
jcas.throwFeatMissing("TimexValueEB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_TimexValueEB);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexValueEB(int addr, String v) {
if (featOkTst && casFeat_TimexValueEB == null)
jcas.throwFeatMissing("TimexValueEB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_TimexValueEB, v);}
/** @generated */
final Feature casFeat_TimexValueLE;
/** @generated */
final int casFeatCode_TimexValueLE;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexValueLE(int addr) {
if (featOkTst && casFeat_TimexValueLE == null)
jcas.throwFeatMissing("TimexValueLE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_TimexValueLE);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexValueLE(int addr, String v) {
if (featOkTst && casFeat_TimexValueLE == null)
jcas.throwFeatMissing("TimexValueLE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_TimexValueLE, v);}
/** @generated */
final Feature casFeat_TimexValueEE;
/** @generated */
final int casFeatCode_TimexValueEE;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexValueEE(int addr) {
if (featOkTst && casFeat_TimexValueEE == null)
jcas.throwFeatMissing("TimexValueEE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_TimexValueEE);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexValueEE(int addr, String v) {
if (featOkTst && casFeat_TimexValueEE == null)
jcas.throwFeatMissing("TimexValueEE", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_TimexValueEE, v);}
/** @generated */
final Feature casFeat_TimexValueLB;
/** @generated */
final int casFeatCode_TimexValueLB;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimexValueLB(int addr) {
if (featOkTst && casFeat_TimexValueLB == null)
jcas.throwFeatMissing("TimexValueLB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_TimexValueLB);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimexValueLB(int addr, String v) {
if (featOkTst && casFeat_TimexValueLB == null)
jcas.throwFeatMissing("TimexValueLB", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_TimexValueLB, v);}
/** @generated */
final Feature casFeat_emptyValue;
/** @generated */
final int casFeatCode_emptyValue;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getEmptyValue(int addr) {
if (featOkTst && casFeat_emptyValue == null)
jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_emptyValue);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setEmptyValue(int addr, String v) {
if (featOkTst && casFeat_emptyValue == null)
jcas.throwFeatMissing("emptyValue", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_emptyValue, v);}
/** @generated */
final Feature casFeat_beginTimex;
/** @generated */
final int casFeatCode_beginTimex;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getBeginTimex(int addr) {
if (featOkTst && casFeat_beginTimex == null)
jcas.throwFeatMissing("beginTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_beginTimex);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setBeginTimex(int addr, String v) {
if (featOkTst && casFeat_beginTimex == null)
jcas.throwFeatMissing("beginTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_beginTimex, v);}
/** @generated */
final Feature casFeat_endTimex;
/** @generated */
final int casFeatCode_endTimex;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getEndTimex(int addr) {
if (featOkTst && casFeat_endTimex == null)
jcas.throwFeatMissing("endTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
return ll_cas.ll_getStringValue(addr, casFeatCode_endTimex);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setEndTimex(int addr, String v) {
if (featOkTst && casFeat_endTimex == null)
jcas.throwFeatMissing("endTimex", "de.unihd.dbs.uima.types.heideltime.Timex3Interval");
ll_cas.ll_setStringValue(addr, casFeatCode_endTimex, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Timex3Interval_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_TimexValueEB = jcas.getRequiredFeatureDE(casType, "TimexValueEB", "uima.cas.String", featOkTst);
casFeatCode_TimexValueEB = (null == casFeat_TimexValueEB) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_TimexValueEB).getCode();
casFeat_TimexValueLE = jcas.getRequiredFeatureDE(casType, "TimexValueLE", "uima.cas.String", featOkTst);
casFeatCode_TimexValueLE = (null == casFeat_TimexValueLE) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_TimexValueLE).getCode();
casFeat_TimexValueEE = jcas.getRequiredFeatureDE(casType, "TimexValueEE", "uima.cas.String", featOkTst);
casFeatCode_TimexValueEE = (null == casFeat_TimexValueEE) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_TimexValueEE).getCode();
casFeat_TimexValueLB = jcas.getRequiredFeatureDE(casType, "TimexValueLB", "uima.cas.String", featOkTst);
casFeatCode_TimexValueLB = (null == casFeat_TimexValueLB) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_TimexValueLB).getCode();
casFeat_emptyValue = jcas.getRequiredFeatureDE(casType, "emptyValue", "uima.cas.String", featOkTst);
casFeatCode_emptyValue = (null == casFeat_emptyValue) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_emptyValue).getCode();
casFeat_beginTimex = jcas.getRequiredFeatureDE(casType, "beginTimex", "uima.cas.String", featOkTst);
casFeatCode_beginTimex = (null == casFeat_beginTimex) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_beginTimex).getCode();
casFeat_endTimex = jcas.getRequiredFeatureDE(casType, "endTimex", "uima.cas.String", featOkTst);
casFeatCode_endTimex = (null == casFeat_endTimex) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_endTimex).getCode();
}
}
| 8,995 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NewsleakElasticsearchReader.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/reader/NewsleakElasticsearchReader.java | package uhh_lt.newsleak.reader;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.uima.UimaContext;
import org.apache.uima.cas.CAS;
import org.apache.uima.cas.CASException;
import org.apache.uima.collection.CollectionException;
import org.apache.uima.fit.component.CasCollectionReader_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import org.apache.uima.util.Progress;
import org.apache.uima.util.ProgressImpl;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.sort.SortParseElement;
import de.unihd.dbs.uima.types.heideltime.Dct;
import uhh_lt.newsleak.resources.ElasticsearchResource;
import uhh_lt.newsleak.types.Metadata;
import uhh_lt.newsleak.writer.ElasticsearchDocumentWriter;
/**
* This reader reads from a temporary elasticsearch index which has been
* populated by an earlier preprocessing process. The earlier process is
* expected to provide a document id, the fulltext, a document imte stamp, and
* document language information.
*
* The reader reads all documents for exactly one language into CASes for
* further information extraction processing steps.
*/
public class NewsleakElasticsearchReader extends CasCollectionReader_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant RESOURCE_ESCLIENT. */
public static final String RESOURCE_ESCLIENT = "esResource";
/** The es resource. */
@ExternalResource(key = RESOURCE_ESCLIENT)
private ElasticsearchResource esResource;
/** The Constant PARAM_LANGUAGE. */
public static final String PARAM_LANGUAGE = "language";
/** The language. */
@ConfigurationParameter(name = PARAM_LANGUAGE, mandatory = true)
private String language;
/** The client. */
private TransportClient client;
/** The es index. */
private String esIndex;
/** The total records. */
private long totalRecords = 0;
/** The current record. */
private int currentRecord = 0;
/** The total id list. */
private ArrayList<String> totalIdList;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.CasCollectionReader_ImplBase#initialize(org.
* apache.uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
logger = context.getLogger();
client = esResource.getClient();
esIndex = esResource.getIndex();
try {
XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("match").startObject()
.field("DocumentLanguage", language).endObject().endObject();
// retrieve all ids
totalIdList = new ArrayList<String>();
SearchResponse scrollResp = client.prepareSearch(esIndex)
.addSort(SortParseElement.DOC_FIELD_NAME, SortOrder.ASC).setScroll(new TimeValue(60000))
.setQuery(builder).setSize(10000).execute().actionGet();
while (true) {
for (SearchHit hit : scrollResp.getHits().getHits()) {
totalIdList.add(hit.getId());
}
scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000))
.execute().actionGet();
// Break condition: No hits are returned
if (scrollResp.getHits().getHits().length == 0) {
break;
}
}
totalRecords = totalIdList.size();
logger.log(Level.INFO, "Found " + totalRecords + " for language " + language + " in index");
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
// System.exit(1);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.collection.CollectionReader#getNext(org.apache.uima.cas.CAS)
*/
public void getNext(CAS cas) throws IOException, CollectionException {
JCas jcas;
try {
jcas = cas.getJCas();
} catch (CASException e) {
throw new CollectionException(e);
}
String docId = totalIdList.get(currentRecord);
GetResponse response = client.prepareGet(esIndex, ElasticsearchDocumentWriter.ES_TYPE_DOCUMENT, docId)
.setFields("Content", "Created").get();
jcas.setDocumentText((String) response.getField("Content").getValue());
jcas.setDocumentLanguage(language);
// Set metadata
Metadata metaCas = new Metadata(jcas);
metaCas.setDocId(docId);
String docDate = (String) response.getField("Created").getValue();
metaCas.setTimestamp(docDate);
metaCas.addToIndexes();
// heideltime
Dct dct = new Dct(jcas);
dct.setValue(docDate);
dct.addToIndexes();
currentRecord++;
logger.log(Level.FINEST, "Document ID: " + docId);
logger.log(Level.FINEST, "Document Length: " + jcas.getDocumentText().length());
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.collection.base_cpm.BaseCollectionReader#getProgress()
*/
public Progress[] getProgress() {
return new Progress[] { new ProgressImpl(Long.valueOf(currentRecord).intValue(),
Long.valueOf(totalRecords).intValue(), Progress.ENTITIES) };
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.collection.base_cpm.BaseCollectionReader#hasNext()
*/
public boolean hasNext() throws IOException, CollectionException {
return currentRecord < totalRecords ? true : false;
}
}
| 5,676 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NewsleakCsvStreamReader.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/reader/NewsleakCsvStreamReader.java | package uhh_lt.newsleak.reader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Iterator;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVRecord;
import org.apache.uima.UimaContext;
import org.apache.uima.cas.CAS;
import org.apache.uima.cas.CASException;
import org.apache.uima.collection.CollectionException;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Progress;
import org.apache.uima.util.ProgressImpl;
import uhh_lt.newsleak.types.Metadata;
/**
* The Class NewsleakCsvStreamReader expects externally generated data for a
* data import into newsleak. The importer needs 2 files (their paths can be
* configured in the preprocessing configuration file):
*
* - documents.csv: a 4-column CSV (RFC4180 format) with document id (Integer),
* fulltext (may contain line breaks), Date-time, and document language code
* (optional)
*
* - metadata.csv: a 4-column CSV (comma separator) with document id,
* metadata key, metadata value, metadata type (e.g. Text)
*
* See <i>data/document_example.csv</i> and <i>data/metadata_example.csv</i> for
* example files.
*
* Metadata will not be handled by this reader, but imported directly in
* the @see uhh_lt.newsleak.preprocessing.InformationExtraction2Postgres
* processor.
*/
public class NewsleakCsvStreamReader extends NewsleakReader {
/** Directory containing input files. */
public static final String PARAM_INPUTDIR = "inputDir";
/** The input dir. */
@ConfigurationParameter(name = PARAM_INPUTDIR, mandatory = false, defaultValue = ".")
private String inputDir;
/** The Constant PARAM_DOCUMENT_FILE. */
public static final String PARAM_DOCUMENT_FILE = "documentFile";
/** The document file. */
@ConfigurationParameter(name = PARAM_DOCUMENT_FILE, mandatory = true)
private String documentFile;
/** The Constant PARAM_METADATA_FILE. */
public static final String PARAM_METADATA_FILE = "metadataFile";
/** The metadata file. */
@ConfigurationParameter(name = PARAM_METADATA_FILE, mandatory = true)
private String metadataFile;
/** The Constant PARAM_DEFAULT_LANG. */
public static final String PARAM_DEFAULT_LANG = "defaultLanguage";
/** The default language. */
@ConfigurationParameter(name = PARAM_DEFAULT_LANG, mandatory = false, defaultValue = "en")
private String defaultLanguage;
/** The Constant PARAM_DEBUG_MAX_DOCS. */
public static final String PARAM_DEBUG_MAX_DOCS = "maxRecords";
/** The max records. */
@ConfigurationParameter(name = PARAM_DEBUG_MAX_DOCS, mandatory = false)
private Integer maxRecords = Integer.MAX_VALUE;
/** The csv reader. */
private Reader csvReader;
/** The records. */
private Iterable<CSVRecord> records;
/** The records iterator. */
private Iterator<CSVRecord> recordsIterator;
// private Reader metadataReader;
// private Iterable<CSVRecord> metadata;
// private Iterator<CSVRecord> metadataIterator;
/** Number of total records in the documents CSV. */
private int totalRecords = 0;
/** Current record number. */
private int currentRecord = 0;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.CasCollectionReader_ImplBase#initialize(org.
* apache.uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
logger = context.getLogger();
try {
File csvFile = new File(inputDir, documentFile);
csvReader = new FileReader(csvFile);
records = CSVFormat.RFC4180.parse(csvReader);
recordsIterator = records.iterator();
} catch (IOException e) {
throw new ResourceInitializationException(e);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.collection.CollectionReader#getNext(org.apache.uima.cas.CAS)
*/
public void getNext(CAS cas) throws IOException, CollectionException {
currentRecord++;
JCas jcas;
try {
jcas = cas.getJCas();
} catch (CASException e) {
throw new CollectionException(e);
}
// Set document data
CSVRecord record = recordsIterator.next();
String docId = record.get(0); // external document id from CSV file
jcas.setDocumentText(cleanBodyText(record.get(1)));
jcas.setDocumentLanguage(record.size() > 3 ? record.get(3) : defaultLanguage);
// Set metadata
Metadata metaCas = new Metadata(jcas);
metaCas.setDocId(docId);
metaCas.setTimestamp(record.get(2));
metaCas.addToIndexes();
// metadata
// is assumed to be provided from external prcessing in a separate file
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.fit.component.CasCollectionReader_ImplBase#close()
*/
public void close() throws IOException {
csvReader.close();
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.collection.base_cpm.BaseCollectionReader#getProgress()
*/
public Progress[] getProgress() {
return new Progress[] { new ProgressImpl(Long.valueOf(currentRecord).intValue(),
Long.valueOf(totalRecords).intValue(), Progress.ENTITIES) };
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.collection.base_cpm.BaseCollectionReader#hasNext()
*/
public boolean hasNext() throws IOException, CollectionException {
if (currentRecord > maxRecords)
return false;
return recordsIterator.hasNext();
}
}
| 5,423 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
HooverElasticsearchReader.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/reader/HooverElasticsearchReader.java | package uhh_lt.newsleak.reader;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.uima.UimaContext;
import org.apache.uima.cas.CAS;
import org.apache.uima.cas.CASException;
import org.apache.uima.collection.CollectionException;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Progress;
import org.apache.uima.util.ProgressImpl;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import de.unihd.dbs.uima.types.heideltime.Dct;
import io.searchbox.client.JestClient;
import io.searchbox.client.JestResult;
import io.searchbox.core.Get;
import io.searchbox.core.Search;
import io.searchbox.core.SearchScroll;
import io.searchbox.params.Parameters;
import uhh_lt.newsleak.resources.HooverResource;
import uhh_lt.newsleak.resources.MetadataResource;
import uhh_lt.newsleak.types.Metadata;
/**
* The HooverElasticsearchReader connects to a running instance of the Hoover
* text data extraction system created by the EIC.network (see
* <a href="https://hoover.github.io">https://hoover.github.io</a>). It utilizes
* the Hoover API to query for all extracted documents in a collection.
*
* Hoover is expected to extract raw fulltext (regardless of any further NLP
* application or human analysts requirement). Newsleak takes Hoover's output
* and extracted file metadata (e.g. creation date).
*
* Duplicated storage of fulltexts (with newly generated document IDs) is
* necessary since we clean and preprocess raw data for further annotation
* processes. Among others, this includes deletion of multiple blank lines
* (often extracted from Excel sheets), dehyphenation at line endings (a result
* from OCR-ed or badly encoded PDFs) , or splitting of long documents into
* chunks of roughly page length.
*
* This reader sets document IDs to 0. The final document IDs will be generated
* as an automatically incremented by @see
* uhh_lt.newsleak.writer.ElasticsearchDocumentWriter
*
* Metadata is written into a temporary file on the disk to be inserted into the
* newsleak postgres database lateron.
*/
public class HooverElasticsearchReader extends NewsleakReader {
/** The Constant RESOURCE_HOOVER. */
public static final String RESOURCE_HOOVER = "hooverResource";
/** The hoover resource. */
@ExternalResource(key = RESOURCE_HOOVER)
private HooverResource hooverResource;
/** The Constant RESOURCE_METADATA. */
public static final String RESOURCE_METADATA = "metadataResource";
/** The metadata resource. */
@ExternalResource(key = RESOURCE_METADATA)
private MetadataResource metadataResource;
/** The Constant PARAM_SCROLL_SIZE. */
private static final String PARAM_SCROLL_SIZE = "10000";
/** The Constant PARAM_SCROLL_TIME. */
private static final String PARAM_SCROLL_TIME = "1m";
/** JEST client to run JSON API requests. */
private JestClient client;
/** The Hoover elasticsearch index. */
private String esIndex;
/** The total records. */
private int totalRecords = 0;
/** The current record. */
private int currentRecord = 0;
/** The list of all Ids. */
private ArrayList<String> totalIdList;
/** The date format. */
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
/** The date created. */
SimpleDateFormat dateCreated = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
/** The date json. */
SimpleDateFormat dateJson = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssX");
/** The email regex pattern. */
Pattern emailPattern = Pattern.compile("[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+");
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.CasCollectionReader_ImplBase#initialize(org.
* apache.uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
logger = context.getLogger();
// init hoover connection
client = hooverResource.getClient();
esIndex = hooverResource.getIndex();
// query hoover's elasticsearch index
Search search = new Search.Builder(
"{\"query\": {\"match_all\" : {}}, \"_source\" : false, \"size\" : " + PARAM_SCROLL_SIZE + "}")
.addIndex(hooverResource.getIndex()).addType(HooverResource.HOOVER_DOCUMENT_TYPE)
.setParameter(Parameters.SCROLL, PARAM_SCROLL_TIME).build();
try {
// run JEST request
JestResult result = client.execute(search);
totalIdList = new ArrayList<String>();
JsonArray hits = result.getJsonObject().getAsJsonObject("hits").getAsJsonArray("hits");
Integer total = result.getJsonObject().getAsJsonObject("hits").get("total").getAsInt();
int nHits = hits.size();
logger.log(Level.INFO, "Hits first result: " + nHits);
logger.log(Level.INFO, "Hits total: " + total);
totalIdList.addAll(hooverResource.getIds(hits));
String scrollId = result.getJsonObject().get("_scroll_id").getAsString();
// run scroll request to collect all Ids
int i = 0;
while (nHits > 0) {
SearchScroll scroll = new SearchScroll.Builder(scrollId, PARAM_SCROLL_TIME).build();
result = client.execute(scroll);
hits = result.getJsonObject().getAsJsonObject("hits").getAsJsonArray("hits");
nHits = hits.size();
logger.log(Level.INFO, "Hits " + ++i + " result: " + nHits);
totalIdList.addAll(hooverResource.getIds(hits));
scrollId = result.getJsonObject().getAsJsonPrimitive("_scroll_id").getAsString();
}
if (maxRecords > 0 && maxRecords < totalIdList.size()) {
totalIdList = new ArrayList<String>(totalIdList.subList(0, maxRecords));
}
totalRecords = totalIdList.size();
logger.log(Level.INFO, "Found " + totalRecords + " ids in index " + esIndex);
} catch (IOException e) {
throw new ResourceInitializationException(e);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.collection.CollectionReader#getNext(org.apache.uima.cas.CAS)
*/
public void getNext(CAS cas) throws IOException, CollectionException {
JCas jcas;
try {
jcas = cas.getJCas();
} catch (CASException e) {
throw new CollectionException(e);
}
// temporary document Id (a new id will be generated by the
// ElasticsearchDocumentWriter)
String docIdNewsleak = Integer.toString(currentRecord);
String docIdHoover = totalIdList.get(currentRecord - 1);
logger.log(Level.INFO, "Proceessing document: " + docIdHoover);
Get get = new Get.Builder(hooverResource.getIndex(), docIdHoover).type(HooverResource.HOOVER_DOCUMENT_TYPE)
.build();
JestResult getResult = client.execute(get);
JsonObject o = getResult.getJsonObject();
JsonObject source = o.get("_source").getAsJsonObject();
String docText = "";
String field;
// put email header information in main text
field = getField(source, "from");
if (field != null) {
String fromText = field.trim();
docText += "From: " + fromText.replaceAll("<", "[").replaceAll(">", "]") + "\n";
}
JsonArray arrayField = getFieldArray(source, "to");
if (arrayField != null) {
String toList = "";
for (JsonElement item : arrayField) {
String toListItem = item.getAsString().trim();
toListItem = toListItem.replaceAll("<", "[").replaceAll(">", "]");
toListItem = toListItem.replaceAll("\\s+", " ") + "\n";
toList += toList.isEmpty() ? toListItem : "; " + toListItem;
}
docText += "To: " + toList;
}
field = getField(source, "subject");
if (field != null) {
docText += "Subject: " + field.trim() + "\n";
}
if (!docText.isEmpty()) {
docText += "\n-- \n\n";
}
// add main text
field = getField(source, "text");
if (field != null) {
String completeText = field.trim();
docText += cleanBodyText(completeText);
}
jcas.setDocumentText(docText);
// set document metadata
Metadata metaCas = new Metadata(jcas);
metaCas.setDocId(docIdNewsleak);
// date
String docDate = "1900-01-01";
Date dateField = null;
Date dateCreatedField = null;
try {
String date = getField(source, "date");
if (date != null) {
// docDate = dateFormat.format();
dateField = dateCreated.parse(date);
}
date = getField(source, "date-created");
if (date != null) {
// docDate = dateFormat.format() ;
dateCreatedField = dateJson.parse(date);
}
if (dateField != null && dateCreatedField != null) {
docDate = dateField.before(dateCreatedField) ? dateFormat.format(dateCreatedField)
: dateFormat.format(dateField);
} else {
if (dateField != null) {
docDate = dateFormat.format(dateField);
}
if (dateCreatedField != null) {
docDate = dateFormat.format(dateCreatedField);
}
}
} catch (ParseException e) {
e.printStackTrace();
}
metaCas.setTimestamp(docDate);
// heideltime
Dct dct = new Dct(jcas);
dct.setValue(docDate);
dct.addToIndexes();
metaCas.addToIndexes();
// write external metadata
ArrayList<List<String>> metadata = new ArrayList<List<String>>();
// filename, subject, path
String fileName = "";
field = getField(source, "filename");
if (field != null) {
fileName = field;
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "filename", fileName));
}
field = getField(source, "subject");
if (field != null) {
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "subject", field));
} else {
if (!fileName.isEmpty()) {
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "subject", fileName));
}
}
field = getField(source, "path");
if (field != null)
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "path", field));
// Source Id
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "Link", hooverResource.getHooverBasePath() + docIdHoover));
// attachments
Boolean booleanField = getFieldBoolean(source, "attachments");
if (booleanField != null)
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "attachments", booleanField.toString()));
// content-type
field = getField(source, "content-type");
if (field != null)
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "content-type", field));
// file-type
field = getField(source, "filetype");
if (field != null)
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "filetype", field));
// from
field = getField(source, "from");
if (field != null) {
for (String email : extractEmail(field)) {
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "from", email));
}
}
// to
arrayField = getFieldArray(source, "to");
if (arrayField != null) {
for (JsonElement toList : arrayField) {
for (String email : extractEmail(toList.getAsString())) {
metadata.add(metadataResource.createTextMetadata(docIdNewsleak, "to", email));
}
}
}
metadataResource.appendMetadata(metadata);
}
/**
* Returns a string field value from a JSON object.
*
* @param o
* the Json object
* @param fieldname
* the fieldname
* @return the field
*/
private String getField(JsonObject o, String fieldname) {
JsonElement fieldValue = o.get(fieldname);
if (fieldValue == null) {
return null;
} else {
return fieldValue.isJsonNull() ? null : fieldValue.getAsString();
}
}
/**
* Returns a boolean field value from a Json Object.
*
* @param o
* the Json object
* @param fieldname
* the fieldname
* @return the field boolean
*/
private Boolean getFieldBoolean(JsonObject o, String fieldname) {
JsonElement fieldValue = o.get(fieldname);
if (fieldValue == null) {
return null;
} else {
return fieldValue.isJsonNull() ? null : fieldValue.getAsBoolean();
}
}
/**
* Returns an array field value from a Json Object.
*
* @param o
* the Json object
* @param fieldname
* the fieldname
* @return the field array
*/
private JsonArray getFieldArray(JsonObject o, String fieldname) {
JsonElement fieldValue = o.get(fieldname);
if (fieldValue == null) {
return null;
} else {
return fieldValue.isJsonNull() ? null : fieldValue.getAsJsonArray();
}
}
/**
* Extracts email addresses from a given string.
*
* @param s
* the string to match email patterns in
* @return an array of email addresses
*/
private ArrayList<String> extractEmail(String s) {
ArrayList<String> emails = new ArrayList<String>();
Matcher m = emailPattern.matcher(s);
while (m.find()) {
emails.add(m.group());
}
return emails;
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.collection.base_cpm.BaseCollectionReader#getProgress()
*/
public Progress[] getProgress() {
return new Progress[] { new ProgressImpl(Long.valueOf(currentRecord).intValue() - 1,
Long.valueOf(totalRecords).intValue(), Progress.ENTITIES) };
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.collection.base_cpm.BaseCollectionReader#hasNext()
*/
public boolean hasNext() throws IOException, CollectionException {
if (currentRecord < totalRecords) {
currentRecord++;
return true;
} else {
return false;
}
}
}
| 13,433 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NewsleakReader.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/reader/NewsleakReader.java | package uhh_lt.newsleak.reader;
import java.util.Scanner;
import org.apache.uima.fit.component.CasCollectionReader_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
/**
* This abstract reader provides basic functionality for all primary
* elasticsearch data readers. This is basically a maximum threshold for reading
* documents (for debug purposes), and some fulltext cleaning procedures.
*
* Fulltext cleaning encompasses omission of n >
* MAXIMUM_EMPTY_LINE_SEQUENCE_LENGTH repeated blank lines (this is to deal with
* fulltext extraction problems from spreadsheet documents such as xlsx files
* which may result in hundreds of thousands of blank lines), and pruning of
* documents to a maximum length (given in characters).
*/
public abstract class NewsleakReader extends CasCollectionReader_ImplBase {
/** The logger. */
protected Logger logger;
/** The Constant PARAM_DEBUG_MAX_DOCS. */
public static final String PARAM_DEBUG_MAX_DOCS = "maxRecords";
/** The max records. */
@ConfigurationParameter(name = PARAM_DEBUG_MAX_DOCS, mandatory = false)
protected Integer maxRecords = Integer.MAX_VALUE;
/** The Constant PARAM_MAX_DOC_LENGTH. */
public static final String PARAM_MAX_DOC_LENGTH = "maxDocumentLength";
/** The max document length. */
@ConfigurationParameter(name = PARAM_MAX_DOC_LENGTH, mandatory = false)
protected Integer maxDocumentLength = Integer.MAX_VALUE; // 1500 * 10000 = 15000000 = 10000 norm pages
/** The Constant MAXIMUM_EMPTY_LINE_SEQUENCE_LENGTH. */
public static final int MAXIMUM_EMPTY_LINE_SEQUENCE_LENGTH = 50;
/**
* Clean body text (prune to maximum length, delete long sequences of blank
* lines).
*
* @param bodyText
* the body text
* @return the string
*/
public String cleanBodyText(String bodyText) {
int origLength = bodyText.length();
StringBuilder sb = new StringBuilder();
Scanner scanner = new Scanner(bodyText);
int emptyLines = 0;
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
if (line.trim().isEmpty()) {
if (emptyLines > MAXIMUM_EMPTY_LINE_SEQUENCE_LENGTH) {
continue;
}
emptyLines++;
} else {
emptyLines = 0;
}
sb.append(line + "\n");
}
scanner.close();
bodyText = sb.toString();
if (bodyText.length() != origLength) {
logger.log(Level.INFO, "Multiple linebreaks have been collapsed.");
}
if (bodyText.length() > maxDocumentLength) {
logger.log(Level.INFO, "Document length exceeds maximum (" + maxDocumentLength + "): " + bodyText.length());
bodyText = bodyText.substring(0, maxDocumentLength);
}
return bodyText;
}
}
| 2,720 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NewsleakPreprocessor.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/preprocessing/NewsleakPreprocessor.java | package uhh_lt.newsleak.preprocessing;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.uima.UIMAFramework;
import org.apache.uima.fit.factory.ExternalResourceFactory;
import org.apache.uima.fit.factory.TypeSystemDescriptionFactory;
import org.apache.uima.resource.ExternalResourceDescription;
import org.apache.uima.resource.metadata.TypeSystemDescription;
import org.apache.uima.util.Logger;
import uhh_lt.newsleak.resources.ElasticsearchResource;
import uhh_lt.newsleak.resources.MetadataResource;
/**
* Abstract class to provide common functionality for each newsleak data reader
* (e.g. processing the preprocessing configuration file, initialization of the
* postgres database connection, and the metadata UIMA resource initialization).
*
*/
public abstract class NewsleakPreprocessor {
/** The logger. */
protected Logger logger;
/** command line options */
private Options cliOptions;
private String configfile;
protected String configDir;
/** config file options */
protected String readerType;
// processing parameters
protected String defaultLanguage;
protected String[] processLanguages;
protected String dataDirectory;
protected boolean paragraphsAsDocuments;
protected Integer paragraphMinimumLength;
protected Integer maxDocumentLength;
protected Integer threads;
protected Integer debugMaxDocuments;
// csv externally preprocessed data
protected String documentFile;
protected String metadataFile;
// newsleak elasticsearch configuration
protected String esHost;
protected String esClustername;
protected String esIndex;
protected String esPort;
// hoover elasticsearch configuration
protected String hooverHost;
protected String hooverClustername;
protected String hooverIndex;
protected String hooverPort;
protected String hooverTmpMetadata;
protected String hooverSearchUrl;
// newsleak postgres configuration
protected String dbUrl;
protected String dbName;
protected String dbUser;
protected String dbPass;
protected String dbSchema;
protected String dbIndices;
// newsleak-ner microservice configuration
protected String nerServiceUrl;
// dictionary and pattern extraction
protected String dictionaryFiles;
protected boolean patternEmail;
protected boolean patternUrl;
protected boolean patternPhone;
protected boolean patternIP;
// UIMA configuration variables
protected TypeSystemDescription typeSystem;
protected NewsleakStatusCallbackListener statusListener;
protected ExternalResourceDescription metadataResourceDesc = null;
// postgres connection
protected static Connection conn;
protected static Statement st;
/**
* Instantiates a new newsleak preprocessor.
*/
public NewsleakPreprocessor() {
super();
logger = UIMAFramework.getLogger();
}
/**
* Reads the configuration from a config file.
*
* @param cliArgs the cli args
* @return the configuration
*/
public void getConfiguration(String[] cliArgs) {
this.getCliOptions(cliArgs);
// config file
Properties prop = new Properties();
try {
this.configDir = new File(configfile).getParentFile().getAbsolutePath();
InputStream input = new FileInputStream(configfile);
prop.load(input);
readerType = prop.getProperty("datareader");
defaultLanguage = prop.getProperty("defaultlanguage");
processLanguages = prop.getProperty("processlanguages").split("[, ]+");
dataDirectory = prop.getProperty("datadirectory");
documentFile = prop.getProperty("documentfile");
metadataFile = prop.getProperty("metadatafile");
hooverHost = prop.getProperty("hooverurl");
hooverClustername = prop.getProperty("hooverclustername");
hooverIndex = prop.getProperty("hooverindex");
hooverPort = prop.getProperty("hooverport");
hooverTmpMetadata = prop.getProperty("hoovertmpmetadata");
hooverSearchUrl = prop.getProperty("hooversearchurl");
esHost = prop.getProperty("esurl");
esClustername = prop.getProperty("esclustername");
esIndex = prop.getProperty("esindex");
esPort = prop.getProperty("esport");
paragraphsAsDocuments = Boolean.parseBoolean(prop.getProperty("paragraphsasdocuments"));
paragraphMinimumLength = Integer.valueOf(prop.getProperty("paragraphminimumlength"));
maxDocumentLength = Integer.valueOf(prop.getProperty("maxdocumentlength"));
if (maxDocumentLength <= 0)
maxDocumentLength = Integer.MAX_VALUE;
dbUrl = prop.getProperty("dburl");
dbName = prop.getProperty("dbname");
dbUser = prop.getProperty("dbuser");
dbPass = prop.getProperty("dbpass");
dbSchema = prop.getProperty("dbschema");
dbIndices = prop.getProperty("dbindices");
nerServiceUrl = prop.getProperty("nerserviceurl");
dictionaryFiles = prop.getProperty("dictionaryfiles");
patternEmail = Boolean.parseBoolean(prop.getProperty("patternemail", "true"));
patternUrl = Boolean.parseBoolean(prop.getProperty("patternurl", "false"));
patternPhone = Boolean.parseBoolean(prop.getProperty("patternphone", "false"));
patternIP = Boolean.parseBoolean(prop.getProperty("patternip", "false"));
threads = Integer.valueOf(prop.getProperty("threads"));
debugMaxDocuments = Integer.valueOf(prop.getProperty("debugMaxDocuments"));
if (debugMaxDocuments <= 0)
debugMaxDocuments = null;
input.close();
} catch (IOException e) {
System.err.println("Could not read configuration file " + configfile);
System.exit(1);
}
// uima type system
String typeSystemFile = new File("desc/NewsleakDocument.xml").getAbsolutePath();
this.typeSystem = TypeSystemDescriptionFactory.createTypeSystemDescriptionFromPath(typeSystemFile);
}
/**
* Gets the cli options.
*
* @param args the args
* @return the cli options
*/
private void getCliOptions(String[] args) {
cliOptions = new Options();
Option configfileOpt = new Option("c", "configfile", true, "config file path");
configfileOpt.setRequired(true);
cliOptions.addOption(configfileOpt);
CommandLineParser parser = new DefaultParser();
HelpFormatter formatter = new HelpFormatter();
CommandLine cmd;
try {
cmd = parser.parse(cliOptions, args);
} catch (ParseException e) {
System.out.println(e.getMessage());
formatter.printHelp("utility-name", cliOptions);
System.exit(1);
return;
}
this.configfile = cmd.getOptionValue("configfile");
}
/**
* Initalizes the postgres db.
*
* @param dbName the db name
* @param ip the ip
* @param user the user
* @param pswd the pswd
* @throws InstantiationException the instantiation exception
* @throws IllegalAccessException the illegal access exception
* @throws ClassNotFoundException the class not found exception
* @throws SQLException the SQL exception
*/
protected void initDb(String dbName, String ip, String user, String pswd)
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
String url = "jdbc:postgresql://" + ip + "/";
String userName = user;
String password = pswd;
conn = DriverManager.getConnection(url + dbName, userName, password);
st = conn.createStatement();
}
/**
* Gets the metadata resource description.
*
* @return the metadata resource description
*/
protected ExternalResourceDescription getMetadataResourceDescription() {
if (metadataResourceDesc == null) {
metadataResourceDesc = ExternalResourceFactory.createExternalResourceDescription(MetadataResource.class,
MetadataResource.PARAM_METADATA_FILE, this.dataDirectory + File.separator + this.metadataFile,
MetadataResource.PARAM_RESET_METADATA_FILE, "true");
}
return metadataResourceDesc;
}
/**
* Gets the elasticsearch resource description.
*
* @param createNewIndex Should be "true" or "false". If "true", the index will be newly created (a pre-existing index with the same name will be overwritten)
* @return the metadata resource description
*/
protected ExternalResourceDescription getElasticsearchResourceDescription(String createNewIndex) {
ExternalResourceDescription esResource = ExternalResourceFactory.createExternalResourceDescription(
ElasticsearchResource.class, ElasticsearchResource.PARAM_CREATE_INDEX, createNewIndex,
ElasticsearchResource.PARAM_CLUSTERNAME, this.esClustername, ElasticsearchResource.PARAM_INDEX,
this.esIndex, ElasticsearchResource.PARAM_HOST, this.esHost, ElasticsearchResource.PARAM_PORT,
this.esPort, ElasticsearchResource.PARAM_DOCUMENT_MAPPING_FILE,
"desc/elasticsearch_mapping_document_2.4.json",
ElasticsearchResource.PARAM_METADATA_FILE, this.dataDirectory + File.separator + this.metadataFile);
return esResource;
}
}
| 9,138 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
CreateCollection.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/preprocessing/CreateCollection.java | package uhh_lt.newsleak.preprocessing;
/**
* Main class to create a new newsleak collection. It runs the information
* extraction pipeline which writes to a relational database. The second step
* then indexes retrieved entities and metadata from the database for fast
* restrieval and aggregation to the system's fulltext index (elasticsearch).
*
* For configuration it expects a config file as parsed in @see
* uhh_lt.newsleak.preprocessing.NewsleakPreprocessor
*/
public class CreateCollection {
/**
* The main method to start the creation process of a new collection.
*
* @param args
* expects a paramter -c to point to the config file
* @throws Exception
* Any exception which may occur...
*/
public static void main(String[] args) throws Exception {
long startTime = System.currentTimeMillis();
// extract fulltext, entities and metdadata and write to DB
InformationExtraction2Postgres.main(args);
// read from DB and write to fullext index
Postgres2ElasticsearchIndexer.main(args);
long estimatedTime = System.currentTimeMillis() - startTime;
System.out.println("Processing time passed (seconds): " + estimatedTime / 1000);
}
}
| 1,204 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NewsleakStatusCallbackListener.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/preprocessing/NewsleakStatusCallbackListener.java | package uhh_lt.newsleak.preprocessing;
import org.apache.uima.cas.CAS;
import org.apache.uima.collection.EntityProcessStatus;
import org.apache.uima.collection.StatusCallbackListener;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
/**
* The listener interface for receiving newsleakStatusCallback events.
* The class that is interested in processing a newsleakStatusCallback
* event implements this interface, and the object created
* with that class is registered with a component using the
* component's <code>addNewsleakStatusCallbackListener<code> method. When
* the newsleakStatusCallback event occurs, that object's appropriate
* method is invoked.
*
* @see NewsleakStatusCallbackEvent
*/
public class NewsleakStatusCallbackListener implements StatusCallbackListener {
/** The Constant ENTITY_CNT_FOR_LOG. */
private static final int ENTITY_CNT_FOR_LOG = 1;
/** The logger. */
private Logger logger;
/** The is processing. */
private boolean isProcessing = true;
/** The entity process count. */
private int entityProcessCount;
/**
* Instantiates a new newsleak status callback listener.
*
* @param logger the logger
*/
public NewsleakStatusCallbackListener(Logger logger) {
super();
this.logger = logger;
}
/* (non-Javadoc)
* @see org.apache.uima.collection.base_cpm.BaseStatusCallbackListener#resumed()
*/
public void resumed() {
logger.log(Level.INFO, "CPM resumed");
}
/* (non-Javadoc)
* @see org.apache.uima.collection.base_cpm.BaseStatusCallbackListener#paused()
*/
public void paused() {
logger.log(Level.INFO, "CPM paused");
}
/* (non-Javadoc)
* @see org.apache.uima.collection.base_cpm.BaseStatusCallbackListener#initializationComplete()
*/
public void initializationComplete() {
logger.log(Level.INFO, "CPM initialization completed");
}
/* (non-Javadoc)
* @see org.apache.uima.collection.base_cpm.BaseStatusCallbackListener#collectionProcessComplete()
*/
public void collectionProcessComplete() {
logger.log(Level.INFO, "CPM processing completed");
isProcessing = false;
}
/* (non-Javadoc)
* @see org.apache.uima.collection.base_cpm.BaseStatusCallbackListener#batchProcessComplete()
*/
public void batchProcessComplete() {
logger.log(Level.INFO, "CPM batch process completed");
}
/* (non-Javadoc)
* @see org.apache.uima.collection.base_cpm.BaseStatusCallbackListener#aborted()
*/
public void aborted() {
logger.log(Level.SEVERE, "CPM aborted");
isProcessing = false;
}
/* (non-Javadoc)
* @see org.apache.uima.collection.StatusCallbackListener#entityProcessComplete(org.apache.uima.cas.CAS, org.apache.uima.collection.EntityProcessStatus)
*/
public void entityProcessComplete(CAS arg0, EntityProcessStatus arg1) {
entityProcessCount++;
if (entityProcessCount % ENTITY_CNT_FOR_LOG == 0) {
logger.log(Level.INFO, "CPM entity process completed - " + entityProcessCount + " entities");
}
}
/**
* Checks if is processing.
*
* @return true, if is processing
*/
public boolean isProcessing() {
return isProcessing;
}
}
| 3,100 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Postgres2ElasticsearchIndexer.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/preprocessing/Postgres2ElasticsearchIndexer.java | package uhh_lt.newsleak.preprocessing;
import java.io.IOException;
import java.net.InetAddress;
import java.sql.Date;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import uhh_lt.newsleak.util.AtomicCounter;
import uhh_lt.newsleak.util.ResultSetIterable;
import org.apache.commons.lang3.StringUtils;
import org.apache.uima.util.Level;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
/**
* The Class Postgres2ElasticsearchIndexer reads fulltext and extracted
* information from the newsleak postgres database and feeds it to the newsleak
* elasticsearch index. For this, several mappings of elasticsearch data objects
* are created. Indexing itself is carried out in parallel bulk requests.
*
* As analyzer for fulltext search, one elasticsearch language analyzer is
* used. The analyzer used is to be configured as defaultlanguage configuration
* variable in the preprocessing configuration (ISO 639-3 code). If in
* elasticsearch no language analyzer for a given language code is available,
* the the English analyzer is used.
*/
public class Postgres2ElasticsearchIndexer extends NewsleakPreprocessor {
/** The Constant BATCH_SIZE. */
private static final int BATCH_SIZE = 100;
private String elasticsearchDefaultAnalyzer;
/**
* The main method.
*
* @param args
* the arguments
* @throws Exception
* the exception
*/
public static void main(String[] args) throws Exception {
Postgres2ElasticsearchIndexer indexer = new Postgres2ElasticsearchIndexer();
indexer.getConfiguration(args);
indexer.initDb(indexer.dbName, indexer.dbUrl, indexer.dbUser, indexer.dbPass);
indexer.setElasticsearchDefaultAnalyzer(indexer.defaultLanguage);
TransportClient client;
Settings settings = Settings.builder().put("cluster.name", indexer.esClustername).build();
st.setFetchSize(BATCH_SIZE);
try {
client = TransportClient.builder().settings(settings).build()
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(indexer.esHost),
Integer.parseInt(indexer.esPort)));
// remove existing index
client.admin().indices().delete(new DeleteIndexRequest(indexer.esIndex)).actionGet();
// create index with all extracted data
indexer.documentIndexer(client, indexer.esIndex, "document");
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
conn.close();
}
public String getElasticsearchDefaultAnalyzer() {
return elasticsearchDefaultAnalyzer;
}
public void setElasticsearchDefaultAnalyzer(String isoCode) {
// language analyzers supported by elasticsearch 2.4
HashMap<String, String> analyzers = new HashMap<String, String>();
analyzers.put("ara", "arabic");
analyzers.put("bul", "bulgarian");
analyzers.put("cat", "catalan");
analyzers.put("ces", "czech");
analyzers.put("dan", "danish");
analyzers.put("eng", "english");
analyzers.put("nld", "dutch");
analyzers.put("fin", "finnish");
analyzers.put("fra", "french");
analyzers.put("deu", "german");
analyzers.put("ell", "greek");
analyzers.put("hin", "hindi");
analyzers.put("hun", "hungarian");
analyzers.put("ind", "indonesian");
analyzers.put("ita", "italian");
analyzers.put("lav", "latvian");
analyzers.put("lit", "lithuanian");
analyzers.put("nno", "norwegian");
analyzers.put("fas", "persian");
analyzers.put("por", "portuguese");
analyzers.put("ron", "romanian");
analyzers.put("rus", "russian");
analyzers.put("spa", "spanish");
analyzers.put("swe", "swedish");
analyzers.put("tur", "turkish");
analyzers.put("tha", "thai");
// set elasticsearch analyse (english as default)
this.elasticsearchDefaultAnalyzer = analyzers.containsKey(isoCode) ? analyzers.get(isoCode) : "english";
if (!analyzers.containsKey(isoCode)) {
this.logger.log(Level.WARNING, "Configuration parameter defaultlanguage=" + isoCode
+ " is not supported by elasticsearch language analyzers. Switching to 'english' as default elasticsearch analyzer.");
}
}
/**
* Document indexer.
*
* @param client
* the client
* @param indexName
* the index name
* @param documentType
* the document type
* @throws Exception
* the exception
*/
private void documentIndexer(Client client, String indexName, String documentType) throws Exception {
try {
boolean exists = client.admin().indices().prepareExists(indexName).execute().actionGet().isExists();
if (!exists) {
System.out.println("Index " + indexName + " will be created.");
createElasticsearchIndex(client, indexName, documentType);
System.out.println("Index " + indexName + " is created.");
}
} catch (Exception e) {
System.out.println(e);
logger.log(Level.SEVERE, e.getMessage());
}
System.out.println("Start indexing");
ResultSet docSt = st.executeQuery("select * from document;");
BulkRequestConcurrent bulkRequestConcurrent = new BulkRequestConcurrent(client);
AtomicCounter bblen = new AtomicCounter();
ResultSet entTypes = conn.createStatement().executeQuery("select distinct type from entity;");
Set<String> types = new HashSet<>();
while (entTypes.next()) {
types.add(entTypes.getString("type").toLowerCase());
}
Function<ResultSet, String> indexDoc = new Function<ResultSet, String>() {
@Override
public String apply(ResultSet docSt) {
List<NamedEntity> namedEntity = new ArrayList<>();
String content;
Integer docId = 0;
try {
// fulltext
content = docSt.getString("content");
// creation date
Date dbCreated = docSt.getDate("created");
SimpleDateFormat simpleCreated = new SimpleDateFormat("yyyy-MM-dd");
String created = simpleCreated.format(dbCreated);
// document id
docId = docSt.getInt("id");
// entities
ResultSet docEntSt = conn.createStatement()
.executeQuery("select entid from entityoffset where docid = " + docId + ";");
Set<Long> ids = new HashSet<>();
while (docEntSt.next()) {
long entId = docEntSt.getLong("entid");
if (ids.contains(entId)) {
continue;
}
ids.add(entId);
ResultSet entSt = conn.createStatement()
.executeQuery("select * from entity where id = " + entId + ";");
if (entSt.next()) {
NamedEntity ne = new NamedEntity(entSt.getLong("id"), entSt.getString("name"),
entSt.getString("type"), 1 /* docEntSt.getInt("frequency") */);
namedEntity.add(ne);
}
}
// key terms (top 10 only)
ResultSet docTermSt = conn.createStatement()
.executeQuery("select * from terms where docid = " + docId + " limit 10;");
Map<String, Integer> termMap = new HashMap<>();
while (docTermSt.next()) {
String term = docTermSt.getString("term");
int freq = docTermSt.getInt("frequency");
termMap.put(term, freq);
}
// temporal expressions
ResultSet docTimexSt = conn.createStatement()
.executeQuery("select * from eventtime where docid = " + docId + ";");
List<TimeX> timexs = new ArrayList<>();
Set<String> simpeTimex = new HashSet<>();
while (docTimexSt.next()) {
String timeXValue = docTimexSt.getString("timexvalue");
TimeX t = new TimeX(docTimexSt.getInt("beginoffset"), docTimexSt.getInt("endoffset"),
docTimexSt.getString("timex"), docTimexSt.getString("type"), timeXValue);
timexs.add(t);
simpeTimex.add(timeXValue);
}
// metadata
ResultSet metadataSt = conn.createStatement()
.executeQuery("select * from metadata where docid =" + docId + ";");
// Create a JSON request object for adding the data to the index
// -------------------------------------------------------------
XContentBuilder xb = XContentFactory.jsonBuilder().startObject();
xb.field("Content", content).field("Created", created);
Map<String, List<String>> metas = new HashMap<>();
while (metadataSt.next()) {
// we capitalize the first character on purpose
String key = StringUtils.capitalize(metadataSt.getString("key").replace(".", "_"));
String value = metadataSt.getString("value");
metas.putIfAbsent(key, new ArrayList<>());
metas.get(key).add(value);
}
for (String key : metas.keySet()) {
if (metas.get(key).size() > 1) { // array field
xb.field(key, metas.get(key));
} else {
xb.field(key, metas.get(key).get(0));
}
}
// Adding entities
if (namedEntity.size() > 0) {
xb.startArray("Entities");
for (NamedEntity ne : namedEntity) {
xb.startObject();
xb.field("EntId", ne.id);
xb.field("Entname", ne.name);
xb.field("EntType", ne.type);
xb.field("EntFrequency", ne.frequency);
xb.endObject();
}
xb.endArray();
for (String type : types) {
xb.startArray("Entities" + type);
for (NamedEntity ne : namedEntity) {
if (ne.type.toLowerCase().equals(type)) {
xb.startObject();
xb.field("EntId", ne.id);
xb.field("Entname", ne.name);
xb.field("EntFrequency", ne.frequency);
xb.endObject();
}
}
xb.endArray();
}
}
// Adding terms
if (termMap.size() > 0) {
xb.startArray("Keywords");
for (String term : termMap.keySet()) {
xb.startObject();
xb.field("Keyword", term);
xb.field("TermFrequency", termMap.get(term));
xb.endObject();
}
xb.endArray();
}
// Adding TimeX
if (timexs.size() > 0) {
xb.startArray("EventTimes");
for (TimeX t : timexs) {
xb.startObject();
xb.field("Beginoffset", t.beginOffset);
xb.field("Endoffset", t.endOffset);
xb.field("Timex", t.timeX);
xb.field("TimeXType", t.timeXType);
xb.field("Timexvalue", t.timexValue);
xb.endObject();
}
xb.endArray();
xb.field("SimpleTimeExpresion", new ArrayList<>(simpeTimex));
}
xb.endObject();
metadataSt.close();
// perform concurrent bulk requests
synchronized (bulkRequestConcurrent) {
bulkRequestConcurrent
.add(client.prepareIndex(indexName, documentType, docId.toString()).setSource(xb));
bblen.increment();
if (bblen.value() % BATCH_SIZE == 0) {
logger.log(Level.INFO, "##### " + bblen.value() + " documents are indexed.");
bulkRequestConcurrent.execute();
}
}
} catch (SQLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return docId.toString();
}
};
// parallel execution
List<String> userIdList = new ResultSetIterable<String>(docSt, indexDoc).stream().collect(Collectors.toList());
// index last requests
try {
bulkRequestConcurrent.execute();
} catch (ActionRequestValidationException e) {
logger.log(Level.INFO, "All data has been indexed.");
}
docSt.close();
}
/**
* Creates the elasticsearch index mappings.
*
* @param client
* the client
* @param indexName
* the index name
* @param documentType
* the document type
* @throws IOException
* Signals that an I/O exception has occurred.
* @throws SQLException
* the SQL exception
*/
public void createElasticsearchIndex(Client client, String indexName, String documentType/* , String mapping */)
throws IOException, SQLException {
IndicesExistsResponse res = client.admin().indices().prepareExists(indexName).execute().actionGet();
if (res.isExists()) {
DeleteIndexRequestBuilder delIdx = client.admin().indices().prepareDelete(indexName);
delIdx.execute().actionGet();
}
CreateIndexRequestBuilder createIndexRequestBuilder = client.admin().indices().prepareCreate(indexName);
XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().startObject().startObject(documentType)
.startObject("properties");
mappingBuilder.startObject("Content").field("type", "string")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).endObject();
mappingBuilder.startObject("Created").field("type", "date").field("format", "yyyy-MM-dd").
startObject("fields").startObject("raw").field("type", "date").field("format", "yyyy-MM-dd").endObject()
.endObject().endObject();
System.out.println("creating entities mapping ...");
createEntitesPerTypeMappings(mappingBuilder, "Entities");
System.out.println("creating entities mapping ... done");
ResultSet entTypes = conn.createStatement().executeQuery("select distinct type from entity;");
System.out.println("creating nested entities mapping ...");
while (entTypes.next()) {
String type = entTypes.getString("type").toLowerCase();
createEntitesPerTypeMappings(mappingBuilder, "Entities" + type);
}
System.out.println("creating nested entities mapping ... done");
createKeywordsMappings(mappingBuilder);
createEventTimeMappings(mappingBuilder);
createSimpleTimexMappings(mappingBuilder);
Map<String, String> metaFields = new HashMap<>();
System.out.println("creating metadata mapping ...");
ResultSet metadataSt = conn.createStatement()
.executeQuery("select key, value, type from metadata group by key, value, type;");
while (metadataSt.next()) {
String key = StringUtils.capitalize(metadataSt.getString("key").replace(".", "_"));
String type = metadataSt.getString("type");
if (type.toLowerCase().equals("date")) {
type = "date";
} else if (type.toLowerCase().equals("number") || type.toLowerCase().startsWith("int")) {
type = "long";
} else {
type = "string";
}
metaFields.put(key, type);
}
System.out.println("creating metadata mapping ... done");
for (String meta : metaFields.keySet()) {
createMetadataMappings(mappingBuilder, meta, metaFields.get(meta));
}
mappingBuilder.endObject().endObject().endObject();
System.out.println(mappingBuilder.string());
createIndexRequestBuilder.addMapping(documentType, mappingBuilder);
createIndexRequestBuilder.execute().actionGet();
}
/**
* Creates the entites per type mappings.
*
* @param mappingBuilder
* the mapping builder
* @param neType
* the ne type
* @throws IOException
* Signals that an I/O exception has occurred.
*/
private void createEntitesPerTypeMappings(XContentBuilder mappingBuilder, String neType) throws IOException {
mappingBuilder.startObject(neType);
mappingBuilder.startObject("properties");
mappingBuilder.startObject("EntId").field("type", "long").endObject();
mappingBuilder.startObject("Entname").field("type", "string")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).startObject("fields").startObject("raw")
.field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject();
mappingBuilder.startObject("EntType").field("type", "string")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).startObject("fields").startObject("raw")
.field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject();
mappingBuilder.startObject("EntFrequency").field("type", "long").endObject().endObject().endObject();
}
/**
* Creates the event time mappings.
*
* @param mappingBuilder
* the mapping builder
* @throws IOException
* Signals that an I/O exception has occurred.
*/
private void createEventTimeMappings(XContentBuilder mappingBuilder) throws IOException {
mappingBuilder.startObject("EventTimes");
mappingBuilder.startObject("properties");
mappingBuilder.startObject("Beginoffset").field("type", "long").endObject().startObject("Endoffset")
.field("type", "long").endObject();
mappingBuilder.startObject("TimeXType").field("type", "string")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).startObject("fields").startObject("raw")
.field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject();
mappingBuilder.startObject("Timex").field("type", "string")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).startObject("fields").startObject("raw")
.field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject();
mappingBuilder.startObject("Timexvalue").field("type", "string")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).startObject("fields").startObject("raw")
.field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject().endObject()
.endObject();
}
/**
* Creates the keywords mappings.
*
* @param mappingBuilder
* the mapping builder
* @throws IOException
* Signals that an I/O exception has occurred.
*/
private void createKeywordsMappings(XContentBuilder mappingBuilder) throws IOException {
mappingBuilder.startObject("Keywords");
mappingBuilder.startObject("properties");
mappingBuilder.startObject("Keyword").field("type", "String")
.field("analyzer", this.getElasticsearchDefaultAnalyzer()).startObject("fields").startObject("raw")
.field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject();
mappingBuilder.startObject("TermFrequency").field("type", "long").endObject().endObject().endObject();
}
/**
* Creates the simple timex mappings.
*
* @param mappingBuilder
* the mapping builder
* @throws IOException
* Signals that an I/O exception has occurred.
*/
private static void createSimpleTimexMappings(XContentBuilder mappingBuilder) throws IOException {
mappingBuilder.startObject("SimpleTimeExpresion").field("type", "date")
.field("format", "yyyy-MM-dd || yyyy || yyyy-MM").startObject("fields").startObject("raw")
.field("type", "date").field("format", "yyyy-MM-dd || yyyy || yyyy-MM").endObject().endObject()
.endObject();
}
/**
* Creates the metadata mappings for the elasticsearch index.
*
* @param mappingBuilder
* the mapping builder
* @param meta
* the meta
* @param type
* the type
* @throws IOException
* Signals that an I/O exception has occurred.
*/
private static void createMetadataMappings(XContentBuilder mappingBuilder, String meta, String type)
throws IOException {
mappingBuilder.startObject(meta).field("type", type).startObject("fields").startObject("raw")
.field("type", type).field("index", "not_analyzed").endObject().endObject().endObject();
}
/**
* The Class NamedEntity.
*/
static class NamedEntity {
/** The id. */
long id;
/** The name. */
String name;
/** The type. */
String type;
/** The frequency. */
int frequency;
/**
* Instantiates a new named entity.
*
* @param aId
* the a id
* @param aName
* the a name
* @param aType
* the a type
* @param aFreq
* the a freq
*/
public NamedEntity(long aId, String aName, String aType, int aFreq) {
this.id = aId;
this.name = aName;
this.type = aType;
this.frequency = aFreq;
}
}
/**
* The Class TimeX.
*/
static class TimeX {
/** The begin offset. */
int beginOffset;
/** The end offset. */
int endOffset;
/** The time X. */
String timeX;
/** The time X type. */
String timeXType;
/** The timex value. */
String timexValue;
/**
* Instantiates a new time X.
*
* @param aBeginOffset
* the a begin offset
* @param aEndOffset
* the a end offset
* @param aTimeX
* the a time X
* @param aTimexType
* the a timex type
* @param aTimexValue
* the a timex value
*/
public TimeX(int aBeginOffset, int aEndOffset, String aTimeX, String aTimexType, String aTimexValue) {
this.beginOffset = aBeginOffset;
this.endOffset = aEndOffset;
this.timeX = aTimeX;
this.timeXType = aTimexType;
this.timexValue = aTimexValue;
}
}
/**
* The Class BulkRequestConcurrent.
*/
class BulkRequestConcurrent {
/** The bulk request. */
private BulkRequestBuilder bulkRequest;
/** The elasticsearch client. */
private Client client;
/**
* Instantiates a new concurrent bulk request.
*
* @param client
* the client
*/
public BulkRequestConcurrent(Client client) {
super();
this.client = client;
this.bulkRequest = this.client.prepareBulk();
}
/**
* Adds a bulk of data to the index.
*
* @param request
* the request
*/
public synchronized void add(IndexRequestBuilder request) {
this.bulkRequest.add(request);
}
/**
* Executes a bulk request.
*/
public synchronized void execute() {
BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures()) {
logger.log(Level.SEVERE,
"##### Bulk Request failure with error: " + bulkResponse.buildFailureMessage());
}
this.bulkRequest = client.prepareBulk();
}
}
} | 22,176 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
InformationExtraction2Postgres.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/preprocessing/InformationExtraction2Postgres.java | package uhh_lt.newsleak.preprocessing;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVPrinter;
import org.apache.commons.csv.CSVRecord;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.uima.analysis_engine.AnalysisEngineDescription;
import org.apache.uima.collection.CollectionProcessingEngine;
import org.apache.uima.collection.CollectionReaderDescription;
import org.apache.uima.fit.cpe.CpeBuilder;
import org.apache.uima.fit.examples.experiment.pos.XmiWriter;
import org.apache.uima.fit.factory.AnalysisEngineFactory;
import org.apache.uima.fit.factory.CollectionReaderFactory;
import org.apache.uima.fit.factory.ExternalResourceFactory;
import org.apache.uima.resource.ExternalResourceDescription;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.postgresql.copy.CopyManager;
import org.postgresql.core.BaseConnection;
import uhh_lt.newsleak.annotator.DictionaryExtractor;
import uhh_lt.newsleak.annotator.HeidelTimeOpenNLP;
import uhh_lt.newsleak.annotator.KeytermExtractor;
import uhh_lt.newsleak.annotator.LanguageDetector;
import uhh_lt.newsleak.annotator.NerMicroservice;
import uhh_lt.newsleak.annotator.SentenceCleaner;
import uhh_lt.newsleak.annotator.SegmenterICU;
import uhh_lt.newsleak.reader.HooverElasticsearchReader;
import uhh_lt.newsleak.reader.NewsleakCsvStreamReader;
import uhh_lt.newsleak.reader.NewsleakElasticsearchReader;
import uhh_lt.newsleak.reader.NewsleakReader;
import uhh_lt.newsleak.resources.DictionaryResource;
import uhh_lt.newsleak.resources.ElasticsearchResource;
import uhh_lt.newsleak.resources.HooverResource;
import uhh_lt.newsleak.resources.LanguageDetectorResource;
import uhh_lt.newsleak.resources.PostgresResource;
import uhh_lt.newsleak.resources.TextLineWriterResource;
import uhh_lt.newsleak.writer.ElasticsearchDocumentWriter;
import uhh_lt.newsleak.writer.PostgresDbWriter;
import uhh_lt.newsleak.writer.TextLineWriter;
/**
* Information extraction pipeline. The process iterates over the entire dataset
* twice.
*
* The first process reads fulltexts and metadata from a @see
* uhh_lt.newsleak.reader.NewsleakReader, then determines the language for each
* document and writes everything temporarily to an elasticsearch index
* (metadata is written to a temporary file on the disk for later insertion into
* the database).
*
* The second process iterates over the elasticsearch index and extracts
* entities from fulltexts for each of the configured languages separately. If
* languages mainly contained in a collection are unknown, the first process
* outputs a statistic of how many documents of a language it has seen.
*
* Extracted information is written into a relation database (postgres) to allow
* the newsleak explorer app relational queries lateron.
*
*/
public class InformationExtraction2Postgres extends NewsleakPreprocessor {
/**
* The main method running language detection and information extraction.
*
* @param args
* CLI option pointing to the configuration file
* @throws Exception
* anything that can go wrong...
*/
public static void main(String[] args) throws Exception {
InformationExtraction2Postgres np = new InformationExtraction2Postgres();
// read configuration file
np.getConfiguration(args);
// run language detection
np.pipelineLanguageDetection();
// extract information (per language)
np.pipelineAnnotation();
// init postgres db
np.initDb(np.dbName, np.dbUrl, np.dbUser, np.dbPass);
// create postgres indices
String indexSql = FileUtils.readFileToString(new File(np.dbIndices)).replace("\n", "");
try {
st.executeUpdate(indexSql);
np.logger.log(Level.INFO, "Index created");
} catch (Exception e) {
e.printStackTrace();
}
// import temporary metadata.csv
np.metadataToPostgres();
conn.close();
}
/**
* Metadata is supposed to be presented in a four-tuple CSV format (docid, key,
* value, type). @see uhh_lt.newsleak.reader.NewsleakReader should write a
* temporary metadata file in that format (or assume it was produced by an
* external process)
*
* The CSV file is imported via postgres directly.
*
* See <i>data/metadata_example.csv</i> for an example.
*/
private void metadataToPostgres() {
try {
// we need a mapping of document ids since ElasticsearchDocumentWriter generates
// new Ids from an autoincrement-value
String mappedMetadataFilepath = this.dataDirectory + File.separator + this.metadataFile + ".mapped";
mappingIdsInMetadata(mappedMetadataFilepath);
// import csv into postgres db
CopyManager cpManager = new CopyManager((BaseConnection) conn);
st.executeUpdate("TRUNCATE TABLE metadata;");
this.logger.log(Level.INFO, "Importing metadata from " + mappedMetadataFilepath);
Long n = cpManager.copyIn("COPY metadata FROM STDIN WITH CSV", new FileReader(mappedMetadataFilepath));
this.logger.log(Level.INFO, n + " metadata imported");
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
private void mappingIdsInMetadata(String mappedMetadataFile) throws Exception {
// read mappings file
FileInputStream fis = new FileInputStream(this.dataDirectory + File.separator + this.metadataFile + ".id-map");
ObjectInputStream ois = new ObjectInputStream(fis);
HashMap<Integer, ArrayList<Integer>> documentIdMapping = (HashMap<Integer, ArrayList<Integer>>) ois
.readObject();
ois.close();
// open metadata file, replace ids, write to temporary metadata file
BufferedWriter writer = new BufferedWriter(new FileWriter(mappedMetadataFile));
CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.RFC4180);
BufferedReader reader = new BufferedReader(new FileReader(this.dataDirectory + File.separator + this.metadataFile));
Iterable<CSVRecord> records = CSVFormat.RFC4180.parse(reader);
for (CSVRecord record : records) {
Integer tmpDocId = Integer.parseInt(record.get(0));
if (documentIdMapping.containsKey(tmpDocId)) {
ArrayList<Integer> mappedIds = documentIdMapping.get(tmpDocId);
int nParts = mappedIds.size();
int partCounter = 0;
for (Integer newsleakDocId : mappedIds) {
String key = StringUtils.capitalize(record.get(1));
String value = record.get(2);
if (nParts > 1 && key.equals("Subject")) {
partCounter++;
value += " (" + partCounter + "/" + nParts + ")";
}
ArrayList<String> meta = new ArrayList<String>();
meta.add(newsleakDocId.toString());
meta.add(key);
meta.add(value);
meta.add(record.get(3));
csvPrinter.printRecord(meta);
}
}
}
csvPrinter.close();
reader.close();
}
/**
* Gets the UIMA reader according to the current configuration.
*
* @param type
* The reader type (e.g. "csv" for externally preprocessed fulltexts
* and metadata, or "hoover" for the Hoover text extraction system)
* @return the reader
* @throws ResourceInitializationException
* the resource initialization exception
*/
public CollectionReaderDescription getReader(String type) throws ResourceInitializationException {
CollectionReaderDescription reader = null;
if (type.equals("csv")) {
reader = CollectionReaderFactory.createReaderDescription(NewsleakCsvStreamReader.class, this.typeSystem,
NewsleakCsvStreamReader.PARAM_DOCUMENT_FILE, this.documentFile,
NewsleakCsvStreamReader.PARAM_METADATA_FILE, this.metadataFile,
NewsleakCsvStreamReader.PARAM_INPUTDIR, this.dataDirectory,
NewsleakCsvStreamReader.PARAM_DEFAULT_LANG, this.defaultLanguage,
NewsleakReader.PARAM_DEBUG_MAX_DOCS, this.debugMaxDocuments, NewsleakReader.PARAM_MAX_DOC_LENGTH,
this.maxDocumentLength);
} else if (type.equals("hoover")) {
this.metadataFile = this.hooverTmpMetadata;
ExternalResourceDescription hooverResource = ExternalResourceFactory.createExternalResourceDescription(
HooverResource.class, HooverResource.PARAM_HOST, this.hooverHost, HooverResource.PARAM_CLUSTERNAME,
this.hooverClustername, HooverResource.PARAM_INDEX, this.hooverIndex, HooverResource.PARAM_PORT,
this.hooverPort, HooverResource.PARAM_SEARCHURL, this.hooverSearchUrl);
reader = CollectionReaderFactory.createReaderDescription(HooverElasticsearchReader.class, this.typeSystem,
HooverElasticsearchReader.RESOURCE_HOOVER, hooverResource,
HooverElasticsearchReader.RESOURCE_METADATA, this.getMetadataResourceDescription(),
NewsleakReader.PARAM_DEBUG_MAX_DOCS, this.debugMaxDocuments, NewsleakReader.PARAM_MAX_DOC_LENGTH,
this.maxDocumentLength);
} else {
this.logger.log(Level.SEVERE, "Unknown reader type: " + type);
System.exit(1);
}
return reader;
}
/**
* The language detection pipeline detects the language of each document and
* writes this information and the metadata acquired by the the reader
* temporarily to disk. The extracted fulltext is temporarily stored in the
* elasticsearch index.
*
* @throws Exception
* the exception
*/
public void pipelineLanguageDetection() throws Exception {
statusListener = new NewsleakStatusCallbackListener(this.logger);
// check for language support
HashSet<String> supportedLanguages = LanguageDetector.getSupportedLanguages();
for (String lang : this.processLanguages) {
if (!supportedLanguages.contains(lang)) {
logger.log(Level.SEVERE, "Language " + lang + " not supported (use ISO 639-3 codes)");
System.exit(1);
}
}
// reader
CollectionReaderDescription reader = getReader(this.readerType);
// language detection annotator
ExternalResourceDescription resourceLangDect = ExternalResourceFactory.createExternalResourceDescription(
LanguageDetectorResource.class, LanguageDetectorResource.PARAM_MODEL_FILE,
"resources/langdetect-183.bin");
AnalysisEngineDescription langDetect = AnalysisEngineFactory.createEngineDescription(LanguageDetector.class,
LanguageDetector.MODEL_FILE, resourceLangDect, LanguageDetector.METADATA_FILE,
this.getMetadataResourceDescription(), LanguageDetector.PARAM_DEFAULT_LANG, this.defaultLanguage,
LanguageDetector.DOCLANG_FILE, "data/documentLanguages.ser");
// elasticsearch writer to store fulltexts
AnalysisEngineDescription esWriter = AnalysisEngineFactory.createEngineDescription(
ElasticsearchDocumentWriter.class, ElasticsearchDocumentWriter.RESOURCE_ESCLIENT,
this.getElasticsearchResourceDescription("true"),
ElasticsearchDocumentWriter.PARAM_PARAGRAPHS_AS_DOCUMENTS, this.paragraphsAsDocuments,
ElasticsearchDocumentWriter.PARAM_MINIMUM_PARAGRAPH_LENGTH, this.paragraphMinimumLength,
ElasticsearchDocumentWriter.PARAM_MAX_DOC_LENGTH, this.maxDocumentLength);
// create pipeline
AnalysisEngineDescription ldPipeline = AnalysisEngineFactory.createEngineDescription(langDetect, esWriter);
// run pipeline in parallel manner with UIMA CPE
CpeBuilder ldCpeBuilder = new CpeBuilder();
ldCpeBuilder.setReader(reader);
ldCpeBuilder.setMaxProcessingUnitThreadCount(this.threads);
ldCpeBuilder.setAnalysisEngine(ldPipeline);
CollectionProcessingEngine engine = ldCpeBuilder.createCpe(statusListener);
engine.process();
// wait until language detection has finished before running the next
// information extraction processing step
while (statusListener.isProcessing()) {
Thread.sleep(500);
}
}
/**
* The annotation pipeline performs several annotation tasks, for each language
* separately (sentence detection, sentence cleaning, temporal expression
* detection, named entity recognition, keyterm extraction, and dictionary
* annotation). Extracted information is stored in a postgres database.
*
* Languages to process have to be configured as a comma separated list of
* ISO-639-3 language codes in the configuration file ("processlanguages").
*
* @throws Exception
* the exception
*/
public void pipelineAnnotation() throws Exception {
/*
* Proceeding for multi-language collections: - 1. run language detection and
* write language per document to ES index - 2. set document language for
* unsupported languages to default language - 3. run annotation pipeline per
* language with lang dependent resources
*/
// iterate over configured ISO-639-3 language codes
boolean firstLanguage = true;
for (String currentLanguage : processLanguages) {
NewsleakStatusCallbackListener annotationListener = new NewsleakStatusCallbackListener(this.logger);
Map<String, Locale> localeMap = LanguageDetector.localeToISO();
Locale currentLocale = localeMap.get(currentLanguage);
logger.log(Level.INFO, "Processing " + currentLocale.getDisplayName() + " (" + currentLanguage + ")");
Thread.sleep(2000);
// reader
CollectionReaderDescription esReader = CollectionReaderFactory.createReaderDescription(
NewsleakElasticsearchReader.class, this.typeSystem, NewsleakElasticsearchReader.RESOURCE_ESCLIENT,
this.getElasticsearchResourceDescription("false"), NewsleakElasticsearchReader.PARAM_LANGUAGE,
currentLanguage);
// sentences
AnalysisEngineDescription sentenceICU = AnalysisEngineFactory.createEngineDescription(SegmenterICU.class,
SegmenterICU.PARAM_LOCALE, currentLanguage);
// sentence cleaner
AnalysisEngineDescription sentenceCleaner = AnalysisEngineFactory
.createEngineDescription(SentenceCleaner.class);
// heideltime
AnalysisEngineDescription heideltime = AnalysisEngineFactory.createEngineDescription(
HeidelTimeOpenNLP.class, HeidelTimeOpenNLP.PARAM_LANGUAGE,
"auto-" + currentLocale.getDisplayName().toLowerCase(), HeidelTimeOpenNLP.PARAM_LOCALE, "en_US");
// named entity recognition
AnalysisEngineDescription nerMicroservice = AnalysisEngineFactory.createEngineDescription(
NerMicroservice.class, NerMicroservice.NER_SERVICE_URL, this.nerServiceUrl);
// keyterms
AnalysisEngineDescription keyterms = AnalysisEngineFactory.createEngineDescription(KeytermExtractor.class,
KeytermExtractor.PARAM_N_KEYTERMS, 15, KeytermExtractor.PARAM_LANGUAGE_CODE, currentLanguage);
// dictionaries
ExternalResourceDescription dictResource = ExternalResourceFactory.createExternalResourceDescription(
DictionaryResource.class, DictionaryResource.PARAM_DATADIR,
this.configDir + File.separator + "dictionaries", DictionaryResource.PARAM_DICTIONARY_FILES,
this.dictionaryFiles, DictionaryResource.PARAM_LANGUAGE_CODE, currentLanguage);
AnalysisEngineDescription dictionaries = AnalysisEngineFactory.createEngineDescription(
DictionaryExtractor.class,
DictionaryExtractor.RESOURCE_DICTIONARIES, dictResource,
DictionaryExtractor.PARAM_EXTRACT_EMAIL, this.patternEmail,
DictionaryExtractor.PARAM_EXTRACT_URL, this.patternUrl,
DictionaryExtractor.PARAM_EXTRACT_PHONE, this.patternPhone,
DictionaryExtractor.PARAM_EXTRACT_IP, this.patternIP);
// alternative writers for testing purposes (rawtext, xmi) ...
// ... raw text writer
// ExternalResourceDescription resourceLinewriter =
// ExternalResourceFactory.createExternalResourceDescription(
// TextLineWriterResource.class,
// TextLineWriterResource.PARAM_OUTPUT_FILE, this.dataDirectory + File.separator
// + "output.txt");
// AnalysisEngineDescription linewriter =
// AnalysisEngineFactory.createEngineDescription(
// TextLineWriter.class,
// TextLineWriter.RESOURCE_LINEWRITER, resourceLinewriter
// );
//
// ... xmi writer
// AnalysisEngineDescription xmi =
// AnalysisEngineFactory.createEngineDescription(
// XmiWriter.class,
// XmiWriter.PARAM_OUTPUT_DIRECTORY, this.dataDirectory + File.separator + "xmi"
// );
// postgres writer
ExternalResourceDescription resourcePostgres = ExternalResourceFactory.createExternalResourceDescription(
PostgresResource.class, PostgresResource.PARAM_DBURL, this.dbUrl, PostgresResource.PARAM_DBNAME,
this.dbName, PostgresResource.PARAM_DBUSER, this.dbUser, PostgresResource.PARAM_DBPASS, this.dbPass,
PostgresResource.PARAM_TABLE_SCHEMA, this.dbSchema, PostgresResource.PARAM_INDEX_SCHEMA,
this.dbIndices, PostgresResource.PARAM_CREATE_DB, firstLanguage ? "true" : "false");
AnalysisEngineDescription postgresWriter = AnalysisEngineFactory.createEngineDescription(
PostgresDbWriter.class, PostgresDbWriter.RESOURCE_POSTGRES, resourcePostgres);
// define pipeline
AnalysisEngineDescription pipeline = AnalysisEngineFactory.createEngineDescription(sentenceICU,
sentenceCleaner, dictionaries, heideltime, nerMicroservice, keyterms,
// linewriter,
// xmi,
postgresWriter);
// run as UIMA CPE
CpeBuilder cpeBuilder = new CpeBuilder();
cpeBuilder.setReader(esReader);
cpeBuilder.setMaxProcessingUnitThreadCount(this.threads);
cpeBuilder.setAnalysisEngine(pipeline);
// run processing
CollectionProcessingEngine engine = cpeBuilder.createCpe(annotationListener);
engine.process();
while (annotationListener.isProcessing()) {
// wait...
Thread.sleep(1);
}
firstLanguage = false;
}
}
}
| 17,639 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
PostgresDbWriter.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/writer/PostgresDbWriter.java | package uhh_lt.newsleak.writer;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.FSCollectionFactory;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.tcas.Annotation;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import opennlp.uima.Location;
import opennlp.uima.Organization;
import opennlp.uima.Person;
import uhh_lt.newsleak.resources.PostgresResource;
import uhh_lt.newsleak.types.DictTerm;
import uhh_lt.newsleak.types.Metadata;
/**
* A writer to populate the newsleak postgres database with final fulltexts and
* extracted entities from a prior annotation chain. This writer does not modify
* the documents.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = false)
public class PostgresDbWriter extends JCasAnnotator_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant RESOURCE_POSTGRES. */
public static final String RESOURCE_POSTGRES = "postgresResource";
/** The postgres resource. */
@ExternalResource(key = RESOURCE_POSTGRES)
private PostgresResource postgresResource;
/** The time formatter. */
private NewsleakTimeFormatter timeFormatter;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.apache.
* uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
logger = context.getLogger();
timeFormatter = new NewsleakTimeFormatter();
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.analysis_component.AnalysisComponent_ImplBase#
* collectionProcessComplete()
*/
@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
super.collectionProcessComplete();
// commit final inserts/updates
postgresResource.commit();
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.apache.
* uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
Metadata metadata = (Metadata) jcas.getAnnotationIndex(Metadata.type).iterator().next();
Integer docId = Integer.parseInt(metadata.getDocId());
try {
// documents
String docText = jcas.getDocumentText().replaceAll("\r", "");
String docDate = metadata.getTimestamp();
postgresResource.insertDocument(docId, docText, docDate);
// entities and offsets
Collection<Person> persons = JCasUtil.select(jcas, Person.class);
processEntities(persons, "PER", docId);
Collection<Organization> orgs = JCasUtil.select(jcas, Organization.class);
processEntities(orgs, "ORG", docId);
Collection<Location> locs = JCasUtil.select(jcas, Location.class);
processEntities(locs, "LOC", docId);
// dictionary entities
HashMap<String, HashSet<DictTerm>> dictAnnotations = new HashMap<String, HashSet<DictTerm>>();
HashMap<String, HashMap<String, String>> baseFormMap = new HashMap<String, HashMap<String, String>>();
Collection<DictTerm> dictTerms = JCasUtil.select(jcas, DictTerm.class);
for (DictTerm dictTerm : dictTerms) {
Collection<String> typeList = FSCollectionFactory.create(dictTerm.getDictType());
int i = 0;
for (String type : typeList) {
HashSet<DictTerm> typeTerms = dictAnnotations.containsKey(type) ? dictAnnotations.get(type)
: new HashSet<DictTerm>();
HashMap<String, String> baseForms = baseFormMap.containsKey(type) ? baseFormMap.get(type)
: new HashMap<String, String>();
typeTerms.add(dictTerm);
baseForms.put(dictTerm.getCoveredText(), dictTerm.getDictTerm().getNthElement(i));
i++;
dictAnnotations.put(type, typeTerms);
baseFormMap.put(type, baseForms);
}
}
for (String type : dictAnnotations.keySet()) {
processEntities(dictAnnotations.get(type), type, docId, baseFormMap.get(type));
}
// eventtime
ArrayList<String> extractedTimes = timeFormatter.format(jcas);
if (extractedTimes.size() > 0) {
for (String line : extractedTimes) {
String[] items = line.split("\t");
try {
String formattedDate = timeFormatter.filterDate(items[4]);
if (formattedDate != null) {
postgresResource.insertEventtime(docId, Integer.parseInt(items[0]),
Integer.parseInt(items[1]), items[2], items[3], formattedDate);
}
} catch (Exception e) {
System.out.println(items);
}
}
}
// terms
String keytermList = metadata.getKeyterms();
if (keytermList != null) {
for (String item : metadata.getKeyterms().split("\t")) {
String[] termFrq = item.split(":");
if (termFrq.length == 2) {
postgresResource.insertKeyterms(docId, termFrq[0], Integer.parseInt(termFrq[1]));
}
}
}
// execute batches
postgresResource.executeBatches();
} catch (SQLException e) {
logger.log(Level.SEVERE, "Could not write document " + docId);
e.printStackTrace();
System.exit(1);
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
/**
* Process entities.
*
* @param matches
* the matches
* @param type
* the type
* @param docId
* the doc id
* @throws SQLException
* the SQL exception
*/
private void processEntities(Collection<? extends Annotation> matches, String type, Integer docId)
throws SQLException {
processEntities(matches, type, docId, null);
}
/**
* Process entities.
*
* @param matches
* the matches
* @param type
* the type
* @param docId
* the doc id
* @param baseForms
* the base forms
* @throws SQLException
* the SQL exception
*/
private void processEntities(Collection<? extends Annotation> matches, String type, Integer docId,
HashMap<String, String> baseForms) throws SQLException {
HashMap<String, Integer> counter = new HashMap<String, Integer>();
HashMap<String, ArrayList<Annotation>> offsets = new HashMap<String, ArrayList<Annotation>>();
for (Annotation annotation : matches) {
String entity;
if (baseForms == null) {
entity = annotation.getCoveredText();
} else {
String coveredText = annotation.getCoveredText();
entity = baseForms.containsKey(coveredText) ? baseForms.get(coveredText) : coveredText;
}
counter.put(entity, counter.containsKey(entity) ? counter.get(entity) + 1 : 1);
if (offsets.containsKey(entity)) {
offsets.get(entity).add(annotation);
} else {
ArrayList<Annotation> l = new ArrayList<Annotation>();
l.add(annotation);
offsets.put(entity, l);
}
}
for (String entity : counter.keySet()) {
Integer entityId = postgresResource.insertEntity(entity, type, counter.get(entity));
for (Annotation annotation : offsets.get(entity)) {
postgresResource.insertEntityoffset(docId, entityId, annotation.getBegin(), annotation.getEnd());
}
}
}
}
| 7,448 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
TextLineWriter.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/writer/TextLineWriter.java | package uhh_lt.newsleak.writer;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import opennlp.uima.Sentence;
import opennlp.uima.Token;
import uhh_lt.newsleak.resources.TextLineWriterResource;
import uhh_lt.newsleak.types.Metadata;
/**
* A simple writer for debug and development purposes only. It write fulltexts
* and/or extracted entities to disk.
*
* This writer is not used in any production setting.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = false)
public class TextLineWriter extends JCasAnnotator_ImplBase {
/** The sample id hash. */
private HashSet<String> sampleIdHash = new HashSet<String>();
/** The logger. */
Logger logger;
/** The lang stats. */
public HashMap<String, String> langStats;
/** The Constant RESOURCE_LINEWRITER. */
public static final String RESOURCE_LINEWRITER = "linewriter";
/** The linewriter. */
@ExternalResource(key = RESOURCE_LINEWRITER)
private TextLineWriterResource linewriter;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.apache.
* uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
langStats = new HashMap<String, String>();
logger = context.getLogger();
// restrict to samples
String[] sampleIds = { "9141", "9099", "10779", "6823", "7455", "8078", "9538", "10051", "9660", "10521" };
sampleIdHash.addAll(Arrays.asList(sampleIds));
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.apache.
* uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
String docText = jcas.getDocumentText();
// Language
String outputText = jcas.getDocumentLanguage() + "\t";
// n sentencs
Collection<Sentence> sentences = JCasUtil.selectCovered(jcas, Sentence.class, 0,
jcas.getDocumentText().length());
outputText += sentences.size() + "\t";
// n tokens
Collection<Token> tokens = JCasUtil.selectCovered(jcas, Token.class, 0, jcas.getDocumentText().length());
outputText += tokens.size() + "\t";
// pos
String firstPOS = tokens.iterator().next().getPos();
outputText += firstPOS + "\t";
// text
outputText += docText.replaceAll("\n", " ");
// linewriter.append(outputText);
Metadata metadata = (Metadata) jcas.getAnnotationIndex(Metadata.type).iterator().next();
langStats.put(metadata.getDocId(), jcas.getDocumentLanguage());
if (sampleIdHash.contains(metadata.getDocId())) {
int i = 0;
for (Sentence s : sentences) {
i++;
String sOut = metadata.getDocId() + "\t" + i + "\t";
String tOut = "";
for (Token t : JCasUtil.selectCovered(jcas, Token.class, s.getBegin(), s.getEnd())) {
tOut += t.getCoveredText() + " ";
}
sOut += tOut.trim();
linewriter.append(sOut);
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.analysis_component.AnalysisComponent_ImplBase#
* collectionProcessComplete()
*/
@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
logger.log(Level.INFO, langStats.toString());
}
}
| 3,773 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NewsleakTimeFormatter.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/writer/NewsleakTimeFormatter.java | package uhh_lt.newsleak.writer;
import de.unihd.dbs.uima.types.heideltime.Timex3;
import de.unihd.dbs.uima.types.heideltime.Timex3Interval;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.HashSet;
import java.util.TreeMap;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.uima.cas.FSIterator;
import org.apache.uima.jcas.JCas;
/**
* Extraction and formatting of dates and date ranges as annotated by HeidelTime
* to be used for temporal filtering in newsleak.
*/
public class NewsleakTimeFormatter {
/** The formatter. */
private DateFormat formatter;
/** The lower bound. */
Date lowerBound;
/** The upper bound. */
Date upperBound;
/**
* Instantiates a new newsleak time formatter.
*/
/*
* Extracts time expressions from 1900 to NOW + 20 years
*/
public NewsleakTimeFormatter() {
super();
formatter = new SimpleDateFormat("yyyy-MM-dd");
try {
lowerBound = formatter.parse("1900-01-01");
} catch (ParseException e) {
e.printStackTrace();
}
Calendar cal = Calendar.getInstance();
cal.add(Calendar.YEAR, 20);
upperBound = cal.getTime();
}
/**
* Format.
*
* @param jcas
* the jcas
* @return the array list
* @throws Exception
* the exception
*/
public ArrayList<String> format(JCas jcas) throws Exception {
final String documentText = jcas.getDocumentText();
ArrayList<String> outList = new ArrayList<String>();
String outText = "";
// get the timex3 intervals, do some pre-selection on them
FSIterator iterIntervals = jcas.getAnnotationIndex(Timex3Interval.type).iterator();
TreeMap<Integer, Timex3Interval> intervals = new TreeMap<Integer, Timex3Interval>();
while (iterIntervals.hasNext()) {
Timex3Interval t = (Timex3Interval) iterIntervals.next();
// disregard intervals that likely aren't a real interval, but just a
// timex-translation
if (t.getTimexValueLE().equals(t.getTimexValueLB()) && t.getTimexValueEE().equals(t.getTimexValueEB()))
continue;
if (intervals.containsKey(t.getBegin())) {
Timex3Interval tInt = intervals.get(t.getBegin());
// always get the "larger" intervals
if (t.getEnd() - t.getBegin() > tInt.getEnd() - tInt.getBegin()) {
intervals.put(t.getBegin(), t);
}
} else {
intervals.put(t.getBegin(), t);
}
}
FSIterator iterTimex = jcas.getAnnotationIndex(Timex3.type).iterator();
TreeMap<Integer, Timex3> forwardTimexes = new TreeMap<Integer, Timex3>(),
backwardTimexes = new TreeMap<Integer, Timex3>();
while (iterTimex.hasNext()) {
Timex3 t = (Timex3) iterTimex.next();
forwardTimexes.put(t.getBegin(), t);
backwardTimexes.put(t.getEnd(), t);
}
HashSet<Timex3> timexesToSkip = new HashSet<Timex3>();
Timex3 prevT = null;
Timex3 thisT = null;
// iterate over timexes to find overlaps
for (Integer begin : forwardTimexes.navigableKeySet()) {
thisT = (Timex3) forwardTimexes.get(begin);
// check for whether this and the previous timex overlap. ex: [early (friday]
// morning)
if (prevT != null && prevT.getEnd() > thisT.getBegin()) {
Timex3 removedT = null; // only for debug message
// assuming longer value string means better granularity
if (prevT.getTimexValue().length() > thisT.getTimexValue().length()) {
timexesToSkip.add(thisT);
removedT = thisT;
/* prevT stays the same. */
} else {
timexesToSkip.add(prevT);
removedT = prevT;
prevT = thisT; // this iteration's prevT was removed; setting for new iteration
}
// ask user to let us know about possibly incomplete rules
Logger l = Logger.getLogger("TimeMLResultFormatter");
l.log(Level.WARNING,
"Two overlapping Timexes have been discovered:" + System.getProperty("line.separator")
+ "Timex A: " + prevT.getCoveredText() + " [\"" + prevT.getTimexValue() + "\" / "
+ prevT.getBegin() + ":" + prevT.getEnd() + "]" + System.getProperty("line.separator")
+ "Timex B: " + removedT.getCoveredText() + " [\"" + removedT.getTimexValue() + "\" / "
+ removedT.getBegin() + ":" + removedT.getEnd() + "]" + " [removed]"
+ System.getProperty("line.separator") + "The writer chose, for granularity: "
+ prevT.getCoveredText() + System.getProperty("line.separator")
+ "This usually happens with an incomplete ruleset. Please consider adding "
+ "a new rule that covers the entire expression.");
} else { // no overlap found? set current timex as next iteration's previous timex
prevT = thisT;
}
}
// alternative xml creation method
Timex3Interval interval = null;
Timex3 timex = null;
for (Integer docOffset = 0; docOffset <= documentText.length(); docOffset++) {
/**
* see if we have to finish off old timexes/intervals
*/
if (timex != null && timex.getEnd() == docOffset) {
if (!outText.isEmpty())
outList.add(outText);
outText = "";
timex = null;
}
if (interval != null && interval.getEnd() == docOffset) {
if (!outText.isEmpty())
outList.add(outText);
outText = "";
interval = null;
}
/**
* grab a new interval/timex if this offset marks the beginning of one
*/
if (interval == null && intervals.containsKey(docOffset))
interval = intervals.get(docOffset);
if (timex == null && forwardTimexes.containsKey(docOffset)
&& !timexesToSkip.contains(forwardTimexes.get(docOffset)))
timex = forwardTimexes.get(docOffset);
// handle timex openings after that
if (timex != null && timex.getBegin() == docOffset) {
String timexTag = "";
if (!timex.getTimexType().equals(""))
timexTag += timex.getTimexType();
if (!timex.getTimexValue().equals(""))
timexTag += "\t" + timex.getTimexValue();
outText += timex.getBegin() + "\t" + timex.getEnd() + "\t" + timex.getCoveredText() + "\t" + timexTag;
outList.add(outText);
}
}
return outList;
}
/**
* Filter date.
*
* @param timexvalue
* the timexvalue
* @return the string
*/
public String filterDate(String timexvalue) {
Date timexDateValPars = null;
String timexDateValFormatted = null;
try {
try {
timexDateValPars = formatter.parse(timexvalue);
timexDateValFormatted = formatter.format(timexDateValPars);
} catch (Exception e) {
try {
formatter = new SimpleDateFormat("yyyy-MM");
timexDateValPars = formatter.parse(timexvalue);
timexDateValFormatted = formatter.format(timexDateValPars);
} catch (Exception e2) {
try {
formatter = new SimpleDateFormat("yyyy");
timexDateValPars = formatter.parse(timexvalue);
timexDateValFormatted = formatter.format(timexDateValPars);
} catch (Exception e3) {
// do nothing
}
}
}
} catch (Exception e) {
// do nothing
}
// filter
if (timexDateValFormatted != null) {
if (timexDateValPars.before(lowerBound) || timexDateValPars.after(upperBound)) {
timexDateValFormatted = null;
}
}
return timexDateValFormatted;
}
}
| 7,183 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
ElasticsearchDocumentWriter.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/writer/ElasticsearchDocumentWriter.java | package uhh_lt.newsleak.writer;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.safety.Whitelist;
import org.apache.uima.fit.util.JCasUtil;
import uhh_lt.newsleak.resources.ElasticsearchResource;
import uhh_lt.newsleak.types.Metadata;
import uhh_lt.newsleak.types.Paragraph;
/**
* A writer to populate a temporary elasticsearch index with fulltexts from a
* prior annotation chain. This writer may modify original document contents in
* the following way:
*
* - splitting of long documents into paragraphs of a certain minimum length
* (1500 characters, i.e. one norm page).
*
* - standardization of line breaks and conversion of of HTML line breaks /
* paragraph markup to text line breaks
*
* - pruning of documents to a maximum length (can be configured in the
* preprocessing configuration)
*
* Paragraph splitting is heuristically assumed at occurrence of one or more
* empty lines.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class ElasticsearchDocumentWriter extends JCasAnnotator_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant ES_TYPE_DOCUMENT. */
public static final String ES_TYPE_DOCUMENT = "document";
/** The Constant RESOURCE_ESCLIENT. */
public static final String RESOURCE_ESCLIENT = "esResource";
/** The es resource. */
@ExternalResource(key = RESOURCE_ESCLIENT)
private ElasticsearchResource esResource;
/** The elasticsearch client. */
private TransportClient client;
/** The Constant PARAM_PARAGRAPHS_AS_DOCUMENTS. */
public static final String PARAM_PARAGRAPHS_AS_DOCUMENTS = "splitIntoParagraphs";
/** The split into paragraphs. */
@ConfigurationParameter(name = PARAM_PARAGRAPHS_AS_DOCUMENTS, mandatory = false, defaultValue = "false")
private boolean splitIntoParagraphs;
/** The Constant PARAM_MINIMUM_PARAGRAPH_LENGTH. */
public static final String PARAM_MINIMUM_PARAGRAPH_LENGTH = "MINIMUM_PARAGRAPH_LENGTH";
/** The minimum paragraph length. */
@ConfigurationParameter(name = PARAM_MINIMUM_PARAGRAPH_LENGTH, mandatory = false, defaultValue = "1500")
private int MINIMUM_PARAGRAPH_LENGTH;
/** The paragraph pattern. */
private Pattern paragraphPattern = Pattern.compile("[?!\\.]( *\\r?\\n){2,}", Pattern.MULTILINE);
/** The Constant PARAM_MAX_DOC_LENGTH. */
public static final String PARAM_MAX_DOC_LENGTH = "maxDocumentLength";
/** The max document length. */
@ConfigurationParameter(name = PARAM_MAX_DOC_LENGTH, mandatory = false)
protected Integer maxDocumentLength = Integer.MAX_VALUE;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.apache.
* uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
logger = context.getLogger();
client = esResource.getClient();
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.apache.
* uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
String docText = jcas.getDocumentText();
// skip indexing empty documents
if (docText.trim().length() > 0) {
// always convert windows line breaks to unix line break
docText = docText.replaceAll("\\r\\n", "\n");
docText = docText.replaceAll("\\r", "\n");
// process text normalization
docText = dehyphenate(docText);
docText = replaceHtmlLineBreaks(docText);
// get temporary document id (as assigned by the reader) and prepare mapping to
// new ids
Metadata metadata = (Metadata) jcas.getAnnotationIndex(Metadata.type).iterator().next();
String tmpDocId = metadata.getDocId();
ArrayList<Integer> newsleakDocIds = new ArrayList<Integer>();
if (!splitIntoParagraphs) {
// write entire document into the index
newsleakDocIds.add(writeToIndex(jcas, docText, tmpDocId));
} else {
// look for paragraoph boundaries
annotateParagraphs(jcas);
// write each paragraph as new document into the index
for (Paragraph paragraph : JCasUtil.select(jcas, Paragraph.class)) {
newsleakDocIds.add(writeToIndex(jcas, paragraph.getCoveredText(), tmpDocId));
}
}
// keep track of mapping from tmp ids to new ids (for metadata assignment)
esResource.addDocumentIdMapping(Integer.parseInt(tmpDocId), newsleakDocIds);
}
}
/**
* Write document to temporary newsleak elasticsearch index.
*
* @param jcas
* the jcas
* @param docText
* the doc text
* @param tmpDocId
* the tmp doc id
* @return the integer
*/
public Integer writeToIndex(JCas jcas, String docText, String tmpDocId) {
// init with tmp id
Integer newsleakDocId = Integer.parseInt(tmpDocId);
if (docText.length() > maxDocumentLength) {
// skip overly long documents
logger.log(Level.SEVERE,
"Skipping document " + tmpDocId + ". Exceeds maximum length (" + maxDocumentLength + ")");
} else {
Metadata metadata = (Metadata) jcas.getAnnotationIndex(Metadata.type).iterator().next();
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
// generate new id from auto-increment
newsleakDocId = esResource.getNextDocumentId();
// index document, and date + language metadata alogn with new document id
XContentBuilder builder;
try {
Date created = dateFormat.parse(metadata.getTimestamp());
builder = XContentFactory.jsonBuilder().startObject().field("id", newsleakDocId.toString())
.field("Content", docText).field("Created", dateFormat.format(created))
.field("DocumentLanguage", jcas.getDocumentLanguage()).endObject();
IndexResponse response = client
.prepareIndex(esResource.getIndex(), ES_TYPE_DOCUMENT, newsleakDocId.toString())
.setSource(builder).get();
logger.log(Level.INFO, response.toString());
} catch (IOException e) {
e.printStackTrace();
} catch (ParseException e) {
logger.log(Level.SEVERE, "Could not parse document date from document " + tmpDocId);
e.printStackTrace();
} catch (NullPointerException e) {
logger.log(Level.SEVERE, "No date for document " + tmpDocId);
e.printStackTrace();
}
}
return newsleakDocId;
}
/**
* Replace html line breaks and > < entities.
*
* @param html
* the html
* @return the string
*/
public static String replaceHtmlLineBreaks(String html) {
if (html == null)
return html;
Document document = Jsoup.parse(html);
// makes html() preserve linebreaks and spacing
document.outputSettings(new Document.OutputSettings().prettyPrint(false));
document.select("br").append("\\n");
document.select("p").prepend("\\n\\n");
String s = document.html().replaceAll("\\\\n", "\n");
String cleanedString = Jsoup.clean(s, "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false));
cleanedString = cleanedString.replaceAll(">", ">");
cleanedString = cleanedString.replaceAll("<", "<");
return cleanedString;
}
/**
* An advanced dehyphanator based on regex.
*
* - " -" is accepted as hyphen
*
* - "und"/"and" and "or"/"oder" in second line prevent dehyphenation
*
* - leaves the hyphen if there are a number or an upper case letter as first
* character in second line
*
* - deletes the first spaces in second line
*
* @param sequence
* A string to dehyphenate
* @return A dehyphenated string
*/
public static String dehyphenate(String sequence) {
if (!sequence.contains("\n")) {
// do nothing
return sequence;
}
String dehyphenatedString = sequence.replaceAll(" ", " ");
StringBuilder regexForDehyphenation = new StringBuilder();
// Before hyphen a string with letters, numbers and signs
regexForDehyphenation.append("(\\s)*(\\S*\\w{2,})");
// a hyphen, some spaces, a newline and some spaces
regexForDehyphenation.append("([‐‑‒–]\\s*\\n{1,2}\\s*)");
// the first word starts
regexForDehyphenation.append("(");
// no 'and' or 'or' in new line
regexForDehyphenation.append("(?!und )(?!oder )(?!and )(?!or )");
// the first two characters are not allowed to be numbers or punctuation
regexForDehyphenation.append("(?![\\p{P}\\p{N}])");
// the first word end ending of this group
regexForDehyphenation.append("\\w+)");
Pattern p = Pattern.compile(regexForDehyphenation.toString(), Pattern.UNICODE_CHARACTER_CLASS);
Matcher m = p.matcher(sequence);
while (m.find()) {
String sep = "";
Character firstLetterOfNewline = m.group(4).toCharArray()[0];
// If the first character of the word in the second line is uppercase or a
// number leave the hyphen
if (Character.isUpperCase(firstLetterOfNewline) || Character.isDigit(firstLetterOfNewline)) {
sep = "-";
}
String replaceString = "\n" + m.group(2) + sep + m.group(4);
dehyphenatedString = dehyphenatedString.replace(m.group(0), replaceString);
}
return dehyphenatedString;
}
/**
* Annotate paragraphs.
*
* @param jcas
* the jcas
*/
private void annotateParagraphs(JCas jcas) {
Matcher matcher = paragraphPattern.matcher(jcas.getDocumentText());
Paragraph paragraph = new Paragraph(jcas);
paragraph.setBegin(0);
paragraph.setLanguage(jcas.getDocumentLanguage());
while (matcher.find()) {
if (matcher.start() > 0 && (matcher.start() - paragraph.getBegin()) > MINIMUM_PARAGRAPH_LENGTH) {
paragraph.setEnd(matcher.start() + 1);
paragraph.addToIndexes();
paragraph = new Paragraph(jcas);
paragraph.setBegin(matcher.end());
paragraph.setLanguage(jcas.getDocumentLanguage());
}
}
paragraph.setEnd(jcas.getDocumentText().length());
paragraph.addToIndexes();
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.analysis_component.AnalysisComponent_ImplBase#
* collectionProcessComplete()
*/
@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
try {
esResource.writeDocumentIdMapping();
} catch (IOException e) {
throw new AnalysisEngineProcessException(e);
}
super.collectionProcessComplete();
}
}
| 11,151 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
DictTerm.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/DictTerm.java |
/* First created by JCasGen Thu Mar 22 16:30:30 CET 2018 */
package uhh_lt.newsleak.types;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.cas.StringList;
import org.apache.uima.jcas.tcas.Annotation;
/** Dictionary terms
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class DictTerm extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(DictTerm.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected DictTerm() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public DictTerm(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public DictTerm(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public DictTerm(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: dictType
/** getter for dictType - gets Type of dictionary added
* @generated
* @return value of the feature
*/
public StringList getDictType() {
if (DictTerm_Type.featOkTst && ((DictTerm_Type)jcasType).casFeat_dictType == null)
jcasType.jcas.throwFeatMissing("dictType", "uhh_lt.newsleak.types.DictTerm");
return (StringList)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((DictTerm_Type)jcasType).casFeatCode_dictType)));}
/** setter for dictType - sets Type of dictionary added
* @generated
* @param v value to set into the feature
*/
public void setDictType(StringList v) {
if (DictTerm_Type.featOkTst && ((DictTerm_Type)jcasType).casFeat_dictType == null)
jcasType.jcas.throwFeatMissing("dictType", "uhh_lt.newsleak.types.DictTerm");
jcasType.ll_cas.ll_setRefValue(addr, ((DictTerm_Type)jcasType).casFeatCode_dictType, jcasType.ll_cas.ll_getFSRef(v));}
//*--------------*
//* Feature: dictTerm
/** getter for dictTerm - gets Base word types from dictionary list
* @generated
* @return value of the feature
*/
public StringList getDictTerm() {
if (DictTerm_Type.featOkTst && ((DictTerm_Type)jcasType).casFeat_dictTerm == null)
jcasType.jcas.throwFeatMissing("dictTerm", "uhh_lt.newsleak.types.DictTerm");
return (StringList)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((DictTerm_Type)jcasType).casFeatCode_dictTerm)));}
/** setter for dictTerm - sets Base word types from dictionary list
* @generated
* @param v value to set into the feature
*/
public void setDictTerm(StringList v) {
if (DictTerm_Type.featOkTst && ((DictTerm_Type)jcasType).casFeat_dictTerm == null)
jcasType.jcas.throwFeatMissing("dictTerm", "uhh_lt.newsleak.types.DictTerm");
jcasType.ll_cas.ll_setRefValue(addr, ((DictTerm_Type)jcasType).casFeatCode_dictTerm, jcasType.ll_cas.ll_getFSRef(v));}
}
| 4,049 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Metadata_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/Metadata_Type.java | package uhh_lt.newsleak.types;
/* First created by JCasGen Wed Nov 22 15:48:08 CET 2017 */
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.DocumentAnnotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Metadata_Type extends DocumentAnnotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Metadata.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("uhh_lt.newsleak.types.Metadata");
/** @generated */
final Feature casFeat_metaTripletsNames;
/** @generated */
final int casFeatCode_metaTripletsNames;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getMetaTripletsNames(int addr) {
if (featOkTst && casFeat_metaTripletsNames == null)
jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
return ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setMetaTripletsNames(int addr, int v) {
if (featOkTst && casFeat_metaTripletsNames == null)
jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
ll_cas.ll_setRefValue(addr, casFeatCode_metaTripletsNames, v);}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @return value at index i in the array
*/
public String getMetaTripletsNames(int addr, int i) {
if (featOkTst && casFeat_metaTripletsNames == null)
jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
if (lowLevelTypeChecks)
return ll_cas.ll_getStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames), i, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames), i);
return ll_cas.ll_getStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames), i);
}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @param v value to set
*/
public void setMetaTripletsNames(int addr, int i, String v) {
if (featOkTst && casFeat_metaTripletsNames == null)
jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
if (lowLevelTypeChecks)
ll_cas.ll_setStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames), i, v, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames), i);
ll_cas.ll_setStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsNames), i, v);
}
/** @generated */
final Feature casFeat_metaTripletsValues;
/** @generated */
final int casFeatCode_metaTripletsValues;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getMetaTripletsValues(int addr) {
if (featOkTst && casFeat_metaTripletsValues == null)
jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
return ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setMetaTripletsValues(int addr, int v) {
if (featOkTst && casFeat_metaTripletsValues == null)
jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
ll_cas.ll_setRefValue(addr, casFeatCode_metaTripletsValues, v);}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @return value at index i in the array
*/
public String getMetaTripletsValues(int addr, int i) {
if (featOkTst && casFeat_metaTripletsValues == null)
jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
if (lowLevelTypeChecks)
return ll_cas.ll_getStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues), i, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues), i);
return ll_cas.ll_getStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues), i);
}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @param v value to set
*/
public void setMetaTripletsValues(int addr, int i, String v) {
if (featOkTst && casFeat_metaTripletsValues == null)
jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
if (lowLevelTypeChecks)
ll_cas.ll_setStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues), i, v, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues), i);
ll_cas.ll_setStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsValues), i, v);
}
/** @generated */
final Feature casFeat_metaTripletsTypes;
/** @generated */
final int casFeatCode_metaTripletsTypes;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getMetaTripletsTypes(int addr) {
if (featOkTst && casFeat_metaTripletsTypes == null)
jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
return ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setMetaTripletsTypes(int addr, int v) {
if (featOkTst && casFeat_metaTripletsTypes == null)
jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
ll_cas.ll_setRefValue(addr, casFeatCode_metaTripletsTypes, v);}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @return value at index i in the array
*/
public String getMetaTripletsTypes(int addr, int i) {
if (featOkTst && casFeat_metaTripletsTypes == null)
jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
if (lowLevelTypeChecks)
return ll_cas.ll_getStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes), i, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes), i);
return ll_cas.ll_getStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes), i);
}
/** @generated
* @param addr low level Feature Structure reference
* @param i index of item in the array
* @param v value to set
*/
public void setMetaTripletsTypes(int addr, int i, String v) {
if (featOkTst && casFeat_metaTripletsTypes == null)
jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
if (lowLevelTypeChecks)
ll_cas.ll_setStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes), i, v, true);
jcas.checkArrayBounds(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes), i);
ll_cas.ll_setStringArrayValue(ll_cas.ll_getRefValue(addr, casFeatCode_metaTripletsTypes), i, v);
}
/** @generated */
final Feature casFeat_docId;
/** @generated */
final int casFeatCode_docId;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getDocId(int addr) {
if (featOkTst && casFeat_docId == null)
jcas.throwFeatMissing("docId", "uhh_lt.newsleak.types.Metadata");
return ll_cas.ll_getStringValue(addr, casFeatCode_docId);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setDocId(int addr, String v) {
if (featOkTst && casFeat_docId == null)
jcas.throwFeatMissing("docId", "uhh_lt.newsleak.types.Metadata");
ll_cas.ll_setStringValue(addr, casFeatCode_docId, v);}
/** @generated */
final Feature casFeat_timestamp;
/** @generated */
final int casFeatCode_timestamp;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getTimestamp(int addr) {
if (featOkTst && casFeat_timestamp == null)
jcas.throwFeatMissing("timestamp", "uhh_lt.newsleak.types.Metadata");
return ll_cas.ll_getStringValue(addr, casFeatCode_timestamp);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setTimestamp(int addr, String v) {
if (featOkTst && casFeat_timestamp == null)
jcas.throwFeatMissing("timestamp", "uhh_lt.newsleak.types.Metadata");
ll_cas.ll_setStringValue(addr, casFeatCode_timestamp, v);}
/** @generated */
final Feature casFeat_keyterms;
/** @generated */
final int casFeatCode_keyterms;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getKeyterms(int addr) {
if (featOkTst && casFeat_keyterms == null)
jcas.throwFeatMissing("keyterms", "uhh_lt.newsleak.types.Metadata");
return ll_cas.ll_getStringValue(addr, casFeatCode_keyterms);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setKeyterms(int addr, String v) {
if (featOkTst && casFeat_keyterms == null)
jcas.throwFeatMissing("keyterms", "uhh_lt.newsleak.types.Metadata");
ll_cas.ll_setStringValue(addr, casFeatCode_keyterms, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Metadata_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_metaTripletsNames = jcas.getRequiredFeatureDE(casType, "metaTripletsNames", "uima.cas.StringArray", featOkTst);
casFeatCode_metaTripletsNames = (null == casFeat_metaTripletsNames) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_metaTripletsNames).getCode();
casFeat_metaTripletsValues = jcas.getRequiredFeatureDE(casType, "metaTripletsValues", "uima.cas.StringArray", featOkTst);
casFeatCode_metaTripletsValues = (null == casFeat_metaTripletsValues) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_metaTripletsValues).getCode();
casFeat_metaTripletsTypes = jcas.getRequiredFeatureDE(casType, "metaTripletsTypes", "uima.cas.StringArray", featOkTst);
casFeatCode_metaTripletsTypes = (null == casFeat_metaTripletsTypes) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_metaTripletsTypes).getCode();
casFeat_docId = jcas.getRequiredFeatureDE(casType, "docId", "uima.cas.String", featOkTst);
casFeatCode_docId = (null == casFeat_docId) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_docId).getCode();
casFeat_timestamp = jcas.getRequiredFeatureDE(casType, "timestamp", "uima.cas.String", featOkTst);
casFeatCode_timestamp = (null == casFeat_timestamp) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_timestamp).getCode();
casFeat_keyterms = jcas.getRequiredFeatureDE(casType, "keyterms", "uima.cas.String", featOkTst);
casFeatCode_keyterms = (null == casFeat_keyterms) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_keyterms).getCode();
}
}
| 11,960 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
DictTerm_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/DictTerm_Type.java |
/* First created by JCasGen Thu Mar 22 16:30:30 CET 2018 */
package uhh_lt.newsleak.types;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/** Dictionary terms
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class DictTerm_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = DictTerm.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("uhh_lt.newsleak.types.DictTerm");
/** @generated */
final Feature casFeat_dictType;
/** @generated */
final int casFeatCode_dictType;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getDictType(int addr) {
if (featOkTst && casFeat_dictType == null)
jcas.throwFeatMissing("dictType", "uhh_lt.newsleak.types.DictTerm");
return ll_cas.ll_getRefValue(addr, casFeatCode_dictType);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setDictType(int addr, int v) {
if (featOkTst && casFeat_dictType == null)
jcas.throwFeatMissing("dictType", "uhh_lt.newsleak.types.DictTerm");
ll_cas.ll_setRefValue(addr, casFeatCode_dictType, v);}
/** @generated */
final Feature casFeat_dictTerm;
/** @generated */
final int casFeatCode_dictTerm;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public int getDictTerm(int addr) {
if (featOkTst && casFeat_dictTerm == null)
jcas.throwFeatMissing("dictTerm", "uhh_lt.newsleak.types.DictTerm");
return ll_cas.ll_getRefValue(addr, casFeatCode_dictTerm);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setDictTerm(int addr, int v) {
if (featOkTst && casFeat_dictTerm == null)
jcas.throwFeatMissing("dictTerm", "uhh_lt.newsleak.types.DictTerm");
ll_cas.ll_setRefValue(addr, casFeatCode_dictTerm, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public DictTerm_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_dictType = jcas.getRequiredFeatureDE(casType, "dictType", "uima.cas.StringList", featOkTst);
casFeatCode_dictType = (null == casFeat_dictType) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_dictType).getCode();
casFeat_dictTerm = jcas.getRequiredFeatureDE(casType, "dictTerm", "uima.cas.StringList", featOkTst);
casFeatCode_dictTerm = (null == casFeat_dictTerm) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_dictTerm).getCode();
}
}
| 3,204 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Metadata.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/Metadata.java | package uhh_lt.newsleak.types;
/* First created by JCasGen Wed Nov 22 15:48:08 CET 2017 */
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.cas.StringArray;
import org.apache.uima.jcas.tcas.DocumentAnnotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Metadata extends DocumentAnnotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Metadata.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Metadata() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Metadata(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Metadata(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Metadata(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: metaTripletsNames
/** getter for metaTripletsNames - gets
* @generated
* @return value of the feature
*/
public StringArray getMetaTripletsNames() {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsNames == null)
jcasType.jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
return (StringArray)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsNames)));}
/** setter for metaTripletsNames - sets
* @generated
* @param v value to set into the feature
*/
public void setMetaTripletsNames(StringArray v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsNames == null)
jcasType.jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
jcasType.ll_cas.ll_setRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsNames, jcasType.ll_cas.ll_getFSRef(v));}
/** indexed getter for metaTripletsNames - gets an indexed value -
* @generated
* @param i index in the array to get
* @return value of the element at index i
*/
public String getMetaTripletsNames(int i) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsNames == null)
jcasType.jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsNames), i);
return jcasType.ll_cas.ll_getStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsNames), i);}
/** indexed setter for metaTripletsNames - sets an indexed value -
* @generated
* @param i index in the array to set
* @param v value to set into the array
*/
public void setMetaTripletsNames(int i, String v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsNames == null)
jcasType.jcas.throwFeatMissing("metaTripletsNames", "uhh_lt.newsleak.types.Metadata");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsNames), i);
jcasType.ll_cas.ll_setStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsNames), i, v);}
//*--------------*
//* Feature: metaTripletsValues
/** getter for metaTripletsValues - gets
* @generated
* @return value of the feature
*/
public StringArray getMetaTripletsValues() {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsValues == null)
jcasType.jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
return (StringArray)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsValues)));}
/** setter for metaTripletsValues - sets
* @generated
* @param v value to set into the feature
*/
public void setMetaTripletsValues(StringArray v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsValues == null)
jcasType.jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
jcasType.ll_cas.ll_setRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsValues, jcasType.ll_cas.ll_getFSRef(v));}
/** indexed getter for metaTripletsValues - gets an indexed value -
* @generated
* @param i index in the array to get
* @return value of the element at index i
*/
public String getMetaTripletsValues(int i) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsValues == null)
jcasType.jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsValues), i);
return jcasType.ll_cas.ll_getStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsValues), i);}
/** indexed setter for metaTripletsValues - sets an indexed value -
* @generated
* @param i index in the array to set
* @param v value to set into the array
*/
public void setMetaTripletsValues(int i, String v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsValues == null)
jcasType.jcas.throwFeatMissing("metaTripletsValues", "uhh_lt.newsleak.types.Metadata");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsValues), i);
jcasType.ll_cas.ll_setStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsValues), i, v);}
//*--------------*
//* Feature: metaTripletsTypes
/** getter for metaTripletsTypes - gets
* @generated
* @return value of the feature
*/
public StringArray getMetaTripletsTypes() {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsTypes == null)
jcasType.jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
return (StringArray)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsTypes)));}
/** setter for metaTripletsTypes - sets
* @generated
* @param v value to set into the feature
*/
public void setMetaTripletsTypes(StringArray v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsTypes == null)
jcasType.jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
jcasType.ll_cas.ll_setRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsTypes, jcasType.ll_cas.ll_getFSRef(v));}
/** indexed getter for metaTripletsTypes - gets an indexed value -
* @generated
* @param i index in the array to get
* @return value of the element at index i
*/
public String getMetaTripletsTypes(int i) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsTypes == null)
jcasType.jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsTypes), i);
return jcasType.ll_cas.ll_getStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsTypes), i);}
/** indexed setter for metaTripletsTypes - sets an indexed value -
* @generated
* @param i index in the array to set
* @param v value to set into the array
*/
public void setMetaTripletsTypes(int i, String v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_metaTripletsTypes == null)
jcasType.jcas.throwFeatMissing("metaTripletsTypes", "uhh_lt.newsleak.types.Metadata");
jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsTypes), i);
jcasType.ll_cas.ll_setStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Metadata_Type)jcasType).casFeatCode_metaTripletsTypes), i, v);}
//*--------------*
//* Feature: docId
/** getter for docId - gets
* @generated
* @return value of the feature
*/
public String getDocId() {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_docId == null)
jcasType.jcas.throwFeatMissing("docId", "uhh_lt.newsleak.types.Metadata");
return jcasType.ll_cas.ll_getStringValue(addr, ((Metadata_Type)jcasType).casFeatCode_docId);}
/** setter for docId - sets
* @generated
* @param v value to set into the feature
*/
public void setDocId(String v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_docId == null)
jcasType.jcas.throwFeatMissing("docId", "uhh_lt.newsleak.types.Metadata");
jcasType.ll_cas.ll_setStringValue(addr, ((Metadata_Type)jcasType).casFeatCode_docId, v);}
//*--------------*
//* Feature: timestamp
/** getter for timestamp - gets
* @generated
* @return value of the feature
*/
public String getTimestamp() {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_timestamp == null)
jcasType.jcas.throwFeatMissing("timestamp", "uhh_lt.newsleak.types.Metadata");
return jcasType.ll_cas.ll_getStringValue(addr, ((Metadata_Type)jcasType).casFeatCode_timestamp);}
/** setter for timestamp - sets
* @generated
* @param v value to set into the feature
*/
public void setTimestamp(String v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_timestamp == null)
jcasType.jcas.throwFeatMissing("timestamp", "uhh_lt.newsleak.types.Metadata");
jcasType.ll_cas.ll_setStringValue(addr, ((Metadata_Type)jcasType).casFeatCode_timestamp, v);}
//*--------------*
//* Feature: keyterms
/** getter for keyterms - gets List of key terms
* @generated
* @return value of the feature
*/
public String getKeyterms() {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_keyterms == null)
jcasType.jcas.throwFeatMissing("keyterms", "uhh_lt.newsleak.types.Metadata");
return jcasType.ll_cas.ll_getStringValue(addr, ((Metadata_Type)jcasType).casFeatCode_keyterms);}
/** setter for keyterms - sets List of key terms
* @generated
* @param v value to set into the feature
*/
public void setKeyterms(String v) {
if (Metadata_Type.featOkTst && ((Metadata_Type)jcasType).casFeat_keyterms == null)
jcasType.jcas.throwFeatMissing("keyterms", "uhh_lt.newsleak.types.Metadata");
jcasType.ll_cas.ll_setStringValue(addr, ((Metadata_Type)jcasType).casFeatCode_keyterms, v);}
}
| 12,028 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Paragraph.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/Paragraph.java |
/* First created by JCasGen Wed Apr 04 11:24:03 CEST 2018 */
package uhh_lt.newsleak.types;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.jcas.cas.TOP_Type;
import org.apache.uima.jcas.tcas.Annotation;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* XML source: /Users/gwiedemann/Projects/newsleak-frontend/preprocessing/desc/NewsleakDocument.xml
* @generated */
public class Paragraph extends Annotation {
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int typeIndexID = JCasRegistry.register(Paragraph.class);
/** @generated
* @ordered
*/
@SuppressWarnings ("hiding")
public final static int type = typeIndexID;
/** @generated
* @return index of the type
*/
@Override
public int getTypeIndexID() {return typeIndexID;}
/** Never called. Disable default constructor
* @generated */
protected Paragraph() {/* intentionally empty block */}
/** Internal - constructor used by generator
* @generated
* @param addr low level Feature Structure reference
* @param type the type of this Feature Structure
*/
public Paragraph(int addr, TOP_Type type) {
super(addr, type);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
*/
public Paragraph(JCas jcas) {
super(jcas);
readObject();
}
/** @generated
* @param jcas JCas to which this Feature Structure belongs
* @param begin offset to the begin spot in the SofA
* @param end offset to the end spot in the SofA
*/
public Paragraph(JCas jcas, int begin, int end) {
super(jcas);
setBegin(begin);
setEnd(end);
readObject();
}
/**
* <!-- begin-user-doc -->
* Write your own initialization here
* <!-- end-user-doc -->
*
* @generated modifiable
*/
private void readObject() {/*default - does nothing empty block */}
//*--------------*
//* Feature: language
/** getter for language - gets Language of the paragraph
* @generated
* @return value of the feature
*/
public String getLanguage() {
if (Paragraph_Type.featOkTst && ((Paragraph_Type)jcasType).casFeat_language == null)
jcasType.jcas.throwFeatMissing("language", "uhh_lt.newsleak.types.Paragraph");
return jcasType.ll_cas.ll_getStringValue(addr, ((Paragraph_Type)jcasType).casFeatCode_language);}
/** setter for language - sets Language of the paragraph
* @generated
* @param v value to set into the feature
*/
public void setLanguage(String v) {
if (Paragraph_Type.featOkTst && ((Paragraph_Type)jcasType).casFeat_language == null)
jcasType.jcas.throwFeatMissing("language", "uhh_lt.newsleak.types.Paragraph");
jcasType.ll_cas.ll_setStringValue(addr, ((Paragraph_Type)jcasType).casFeatCode_language, v);}
//*--------------*
//* Feature: isNotFulltext
/** getter for isNotFulltext - gets flags paragraphs which suppedly do not contain interesting fulltext content (e.g. log files)
* @generated
* @return value of the feature
*/
public boolean getIsNotFulltext() {
if (Paragraph_Type.featOkTst && ((Paragraph_Type)jcasType).casFeat_isNotFulltext == null)
jcasType.jcas.throwFeatMissing("isNotFulltext", "uhh_lt.newsleak.types.Paragraph");
return jcasType.ll_cas.ll_getBooleanValue(addr, ((Paragraph_Type)jcasType).casFeatCode_isNotFulltext);}
/** setter for isNotFulltext - sets flags paragraphs which suppedly do not contain interesting fulltext content (e.g. log files)
* @generated
* @param v value to set into the feature
*/
public void setIsNotFulltext(boolean v) {
if (Paragraph_Type.featOkTst && ((Paragraph_Type)jcasType).casFeat_isNotFulltext == null)
jcasType.jcas.throwFeatMissing("isNotFulltext", "uhh_lt.newsleak.types.Paragraph");
jcasType.ll_cas.ll_setBooleanValue(addr, ((Paragraph_Type)jcasType).casFeatCode_isNotFulltext, v);}
}
| 4,038 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
TimeX.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/TimeX.java | package uhh_lt.newsleak.types;
public class TimeX {
int beginOffset;
int endOffset;
String timeX;
String timeXType;
String timexValue;
public TimeX(int aBeginOffset, int aEndOffset, String aTimeX, String aTimexType, String aTimexValue) {
this.beginOffset = aBeginOffset;
this.endOffset = aEndOffset;
this.timeX = aTimeX;
this.timeXType = aTimexType;
this.timexValue = aTimexValue;
}
} | 404 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
Paragraph_Type.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/types/Paragraph_Type.java |
/* First created by JCasGen Wed Apr 04 11:24:03 CEST 2018 */
package uhh_lt.newsleak.types;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.JCasRegistry;
import org.apache.uima.cas.impl.TypeImpl;
import org.apache.uima.cas.Type;
import org.apache.uima.cas.impl.FeatureImpl;
import org.apache.uima.cas.Feature;
import org.apache.uima.jcas.tcas.Annotation_Type;
/**
* Updated by JCasGen Thu Apr 04 18:38:29 CEST 2019
* @generated */
public class Paragraph_Type extends Annotation_Type {
/** @generated */
@SuppressWarnings ("hiding")
public final static int typeIndexID = Paragraph.typeIndexID;
/** @generated
@modifiable */
@SuppressWarnings ("hiding")
public final static boolean featOkTst = JCasRegistry.getFeatOkTst("uhh_lt.newsleak.types.Paragraph");
/** @generated */
final Feature casFeat_language;
/** @generated */
final int casFeatCode_language;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public String getLanguage(int addr) {
if (featOkTst && casFeat_language == null)
jcas.throwFeatMissing("language", "uhh_lt.newsleak.types.Paragraph");
return ll_cas.ll_getStringValue(addr, casFeatCode_language);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setLanguage(int addr, String v) {
if (featOkTst && casFeat_language == null)
jcas.throwFeatMissing("language", "uhh_lt.newsleak.types.Paragraph");
ll_cas.ll_setStringValue(addr, casFeatCode_language, v);}
/** @generated */
final Feature casFeat_isNotFulltext;
/** @generated */
final int casFeatCode_isNotFulltext;
/** @generated
* @param addr low level Feature Structure reference
* @return the feature value
*/
public boolean getIsNotFulltext(int addr) {
if (featOkTst && casFeat_isNotFulltext == null)
jcas.throwFeatMissing("isNotFulltext", "uhh_lt.newsleak.types.Paragraph");
return ll_cas.ll_getBooleanValue(addr, casFeatCode_isNotFulltext);
}
/** @generated
* @param addr low level Feature Structure reference
* @param v value to set
*/
public void setIsNotFulltext(int addr, boolean v) {
if (featOkTst && casFeat_isNotFulltext == null)
jcas.throwFeatMissing("isNotFulltext", "uhh_lt.newsleak.types.Paragraph");
ll_cas.ll_setBooleanValue(addr, casFeatCode_isNotFulltext, v);}
/** initialize variables to correspond with Cas Type and Features
* @generated
* @param jcas JCas
* @param casType Type
*/
public Paragraph_Type(JCas jcas, Type casType) {
super(jcas, casType);
casImpl.getFSClassRegistry().addGeneratorForType((TypeImpl)this.casType, getFSGenerator());
casFeat_language = jcas.getRequiredFeatureDE(casType, "language", "uima.cas.String", featOkTst);
casFeatCode_language = (null == casFeat_language) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_language).getCode();
casFeat_isNotFulltext = jcas.getRequiredFeatureDE(casType, "isNotFulltext", "uima.cas.Boolean", featOkTst);
casFeatCode_isNotFulltext = (null == casFeat_isNotFulltext) ? JCas.INVALID_FEATURE_CODE : ((FeatureImpl)casFeat_isNotFulltext).getCode();
}
}
| 3,292 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
NerMicroservice.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/NerMicroservice.java | package uhh_lt.newsleak.annotator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Locale;
import java.util.Map;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.tcas.Annotation;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import opennlp.uima.Location;
import opennlp.uima.Organization;
import opennlp.uima.Person;
import opennlp.uima.Sentence;
import opennlp.uima.Token;
import uhh_lt.newsleak.types.Metadata;
import uhh_lt.newsleak.types.Paragraph;
/**
* Annotator for Named Entity Recognition. The annotator queries a micro-service
* via JSON API. The default micro-service (available at
* https://github.com/uhh-lt/newsleak-ner) wraps the polyglot NLP library
* (https://github.com/aboSamoor/polyglot) in a Flask APP
* (http://flask.pocoo.org/) and is deployed as a docker container
* (https://www.docker.com/).
*
* Currently, polyglot NER only supports O, I-PER, I-LOC and I-ORG labels. No
* information about beginning of a new entity is present. Our heuristic assumes
* that sequences of tokens with identical I-tags are one entity
*
* Moreover, polyglot NER tends to produce many false positives for noisy text
* data. Thus, we utilize a simple heuristic to remove detected entities which
* contain less than 2 alphabetic characters.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class NerMicroservice extends JCasAnnotator_ImplBase {
/** The Constant NER_SERVICE_URL. */
public final static String NER_SERVICE_URL = "nerMicroserviceUrl";
/** The ner microservice url. */
@ConfigurationParameter(name = NER_SERVICE_URL, mandatory = true, description = "Url to microservice for named entity recognition (JSON API)")
private String nerMicroserviceUrl;
/** The locale map. */
private Map<String, Locale> localeMap;
/** The log. */
Logger log;
/** The http client. */
HttpClient httpClient;
/** The request. */
HttpPost request;
private static final int MAXIMUM_SENTENCES_PER_REQUEST = 1000;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.apache.
* uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
log = context.getLogger();
httpClient = HttpClientBuilder.create().build();
request = new HttpPost(nerMicroserviceUrl);
localeMap = LanguageDetector.localeToISO();
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.apache.
* uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
// document language in ISO-639-1 format
String docLang = localeMap.get(jcas.getDocumentLanguage()).getLanguage();
Collection<Paragraph> paragraphs = JCasUtil.select(jcas, Paragraph.class);
for (Paragraph paragraph : paragraphs) {
if (paragraph.getIsNotFulltext()) {
log.log(Level.FINEST, "Skipping paragraph for NER annotation.");
} else {
try {
// create json request for microservice
Collection<Sentence> sentences = JCasUtil.selectCovered(jcas, Sentence.class, paragraph.getBegin(),
paragraph.getEnd());
// annotate in batches
if (sentences.size() < MAXIMUM_SENTENCES_PER_REQUEST) {
annotateNer(jcas, docLang, sentences);
} else {
ArrayList<Sentence> sentenceList = new ArrayList<Sentence>(sentences);
int batches = (int) Math.ceil(sentenceList.size() / MAXIMUM_SENTENCES_PER_REQUEST);
for (int i = 0; i < batches; i++) {
int start = i * MAXIMUM_SENTENCES_PER_REQUEST;
int end = Math.min(sentenceList.size(), (i + 1) * MAXIMUM_SENTENCES_PER_REQUEST);
annotateNer(jcas, docLang, sentenceList.subList(start, end));
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
// remove unlikely entities
cleanNerAnnotations(jcas);
}
private void annotateNer(JCas jcas, String docLang, Collection<Sentence> sentences)
throws IOException, ClientProtocolException, AnalysisEngineProcessException {
XContentBuilder sb = XContentFactory.jsonBuilder().startObject();
sb.field("lang", docLang);
sb.field("sentences").startArray();
for (Sentence sentence : sentences) {
Collection<Token> tokens = JCasUtil.selectCovered(jcas, Token.class, sentence.getBegin(),
sentence.getEnd());
sb.startArray();
for (Token token : tokens) {
sb.value(token.getCoveredText());
}
sb.endArray();
}
sb.endArray();
StringEntity entity = new StringEntity(sb.string(), ContentType.APPLICATION_JSON);
request.setEntity(entity);
HttpResponse response = httpClient.execute(request);
// evaluate request response
String responseText = EntityUtils.toString(response.getEntity());
JsonObject obj = new JsonParser().parse(responseText).getAsJsonObject();
JsonArray sentenceArray = null;
try {
sentenceArray = obj.getAsJsonArray("result");
} catch (Exception e) {
log.log(Level.SEVERE, "Invalid NER result. Check if there is a model for language '" + docLang
+ "' in the NER microservice?");
log.log(Level.SEVERE, "Json request string: " + sb.string());
// System.exit(1);
throw new AnalysisEngineProcessException();
}
ArrayList<String> tagList = new ArrayList<String>();
for (int i = 0; i < sentenceArray.size(); i++) {
JsonArray tokenArray = sentenceArray.get(i).getAsJsonArray();
for (int j = 0; j < tokenArray.size(); j++) {
JsonArray annotationArray = tokenArray.get(j).getAsJsonArray();
tagList.add(annotationArray.get(1).getAsString());
}
}
int position = -1;
// annotate NE types
for (Sentence sentence : sentences) {
Collection<Token> tokens = JCasUtil.selectCovered(jcas, Token.class, sentence.getBegin(),
sentence.getEnd());
Annotation annotation = null;
String prevTag = "";
for (Token token : tokens) {
position++;
String tag = tagList.get(position);
if (tag.equals(prevTag)) {
if (annotation != null) {
annotation.setEnd(token.getEnd());
}
} else {
if (annotation != null) {
annotation.addToIndexes();
}
if (tag.equals("O")) {
annotation = null;
} else if (tag.equals("I-PER")) {
annotation = new Person(jcas);
} else if (tag.equals("I-LOC")) {
annotation = new Location(jcas);
} else if (tag.equals("I-ORG")) {
annotation = new Organization(jcas);
}
if (annotation != null) {
annotation.setBegin(token.getBegin());
annotation.setEnd(token.getEnd());
}
}
prevTag = tag;
}
if (annotation != null) {
annotation.addToIndexes();
}
}
}
/**
* Remove NE annotation for unlikely PER, ORG and LOC entities.
*
* @param jcas
* the jcas
*/
private void cleanNerAnnotations(JCas jcas) {
// do not apply filter to chinese or japanese texts
if (jcas.getDocumentLanguage().equals("zho") || jcas.getDocumentLanguage().equals("jpn"))
return;
Collection<Person> persons = JCasUtil.select(jcas, Person.class);
cleanAnnotation(persons);
Collection<Organization> organizations = JCasUtil.select(jcas, Organization.class);
cleanAnnotation(organizations);
Collection<Location> locations = JCasUtil.select(jcas, Location.class);
cleanAnnotation(locations);
}
/**
* Remove unlikely annotation.
*
* @param annotations
* removal candidates
*/
private void cleanAnnotation(Collection<? extends Annotation> annotations) {
for (Annotation a : annotations) {
// less than two letters
String ne = a.getCoveredText();
if (ne.replaceAll("[^\\p{L}]", "").length() < 2) {
log.log(Level.FINEST, "Removing Named Entity: " + ne);
a.removeFromIndexes();
}
}
}
}
| 8,843 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
SegmenterICU.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/SegmenterICU.java | package uhh_lt.newsleak.annotator;
import java.text.BreakIterator;
import java.util.Collection;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import org.apache.uima.fit.util.JCasUtil;
import opennlp.uima.Sentence;
import opennlp.uima.Token;
import uhh_lt.newsleak.types.Paragraph;
/**
* Tokenization, sentence and paragraph annotation with the ICU4J library. ICU4J
* provides segmentation rules for all unicode locales to iterate over tokens
* and sentences (BreakIterator).
*
* Paragraph splits are heuristically inferred at one or more empty lines.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class SegmenterICU extends JCasAnnotator_ImplBase {
/** The Constants TTR_THRESHOLD and TTR_MIN_LENGTH. */
private static final double TTR_THRESHOLD = 0.1;
/** The Constant TTR_MIN_LENGTH. */
private static final int TTR_MIN_LENGTH = 100;
/** The Constant PARAM_LOCALE. */
public final static String PARAM_LOCALE = "localeString";
/** The locale string. */
@ConfigurationParameter(name = PARAM_LOCALE, mandatory = true, description = "Locale string for ICU4J sentence segmentation")
private String localeString;
/** The locale map. */
private Map<String, Locale> localeMap;
/** The paragraph pattern. */
private Pattern paragraphPattern = Pattern.compile("( *\\r?\\n){2,}", Pattern.MULTILINE);
// private Pattern paragraphPattern = Pattern.compile("(^\\s*$)+",
// Pattern.MULTILINE);
/** The log. */
Logger log;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.apache.
* uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
log = context.getLogger();
localeMap = LanguageDetector.localeToISO();
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.apache.
* uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
// annotate paragraphs in document text
annotateParagraphs(jcas);
// get locale from current language
Locale locale = localeMap.get(localeString);
Collection<Paragraph> paragraphs = JCasUtil.select(jcas, Paragraph.class);
for (Paragraph paragraph : paragraphs) {
int parStart = paragraph.getBegin();
// load language specific rule set
BreakIterator sentenceBreaker = BreakIterator.getSentenceInstance(locale);
sentenceBreaker.setText(paragraph.getCoveredText());
// find sentence breaks
int sentStart = sentenceBreaker.first();
for (int sentEnd = sentenceBreaker
.next(); sentEnd != BreakIterator.DONE; sentStart = sentEnd, sentEnd = sentenceBreaker.next()) {
Sentence sentence = new Sentence(jcas);
sentence.setBegin(parStart + sentStart);
sentence.setEnd(parStart + sentEnd);
BreakIterator tokenBreaker = BreakIterator.getWordInstance(locale);
tokenBreaker.setText(sentence.getCoveredText());
// find token breaks
int tokStart = tokenBreaker.first();
boolean containsTokens = false;
for (int tokEnd = tokenBreaker
.next(); tokEnd != BreakIterator.DONE; tokStart = tokEnd, tokEnd = tokenBreaker.next()) {
Token token = new Token(jcas);
token.setBegin(parStart + sentStart + tokStart);
token.setEnd(parStart + sentStart + tokEnd);
// add non-empty tokens
if (!token.getCoveredText().trim().isEmpty()) {
token.addToIndexes();
containsTokens = true;
}
}
// add non-empty sentences
if (containsTokens) {
sentence.addToIndexes();
}
}
}
// collapse single characters into one
collapseSingleCharacterTokens(jcas);
// flag unlikely fulltext paragraphs (e.g. log files)
flagDubiousParagraphs(jcas);
}
/**
* Collapses sequences of single character tokens of the same character into one token (e.g. .......).
*
* @param jcas the jcas
*/
private void collapseSingleCharacterTokens(JCas jcas) {
Collection<Token> tokens = JCasUtil.select(jcas, Token.class);
boolean sequenceStarted = false;
String lastToken = "";
int newTokenEnd = 0;
Token tokenToCollapse = null;
// iterate over all tokens
for (Token token : tokens) {
String currentToken = token.getCoveredText();
// only look at single char tokens
if (currentToken.length() == 1) {
if (!sequenceStarted) {
newTokenEnd = token.getEnd();
tokenToCollapse = token;
sequenceStarted = true;
} else {
if (currentToken.equals(lastToken)) {
newTokenEnd = token.getEnd();
token.removeFromIndexes();
} else {
if (newTokenEnd - tokenToCollapse.getEnd() > 0) {
tokenToCollapse.setEnd(newTokenEnd);
tokenToCollapse.addToIndexes();
}
sequenceStarted = false;
}
}
} else {
sequenceStarted = false;
tokenToCollapse = null;
}
lastToken = currentToken;
}
}
/**
* Annotate paragraphs such that every document contains at least one, starting
* at the beginning and ending at the end of a document.
*
* @param jcas
* the jcas
*/
private void annotateParagraphs(JCas jcas) {
Matcher matcher = paragraphPattern.matcher(jcas.getDocumentText());
Paragraph paragraph = new Paragraph(jcas);
paragraph.setBegin(0);
paragraph.setLanguage(localeString);
while (matcher.find()) {
if (matcher.start() > 0) {
paragraph.setEnd(matcher.start());
paragraph.addToIndexes();
paragraph = new Paragraph(jcas);
paragraph.setBegin(matcher.end());
paragraph.setLanguage(localeString);
}
}
paragraph.setEnd(jcas.getDocumentText().length());
paragraph.addToIndexes();
}
/**
* Flag unlikely documents. Documents with a very low type token ratio are
* assumed to be log files or other non-fulltext documents. These documents can
* be excluded from the information extraction pipeline, if the flag
* noFulltextDocument is set to true by this annotator.
*
* @param jcas
* the jcas
*/
private void flagDubiousParagraphs(JCas jcas) {
Collection<Paragraph> paragraphs = JCasUtil.select(jcas, Paragraph.class);
for (Paragraph paragraph : paragraphs) {
boolean noFulltextParagraph = false;
Collection<Token> tokens = JCasUtil.selectCovered(jcas, Token.class, paragraph.getBegin(), paragraph.getEnd());
if (tokens.size() > TTR_MIN_LENGTH) {
// calculate type-token ratio
int tokenCount = 0;
HashSet<String> vocabulary = new HashSet<String>();
for (Token token : tokens) {
String word = token.getCoveredText();
if (StringUtils.isNumeric(word)) {
continue;
}
tokenCount++;
if (!vocabulary.contains(word)) {
vocabulary.add(word);
}
}
double typeTokenRatio = vocabulary.size() / (double) tokenCount;
// set flag for very low TTR
if (typeTokenRatio < TTR_THRESHOLD) {
noFulltextParagraph = true;
String paragraphText = paragraph.getCoveredText();
log.log(Level.FINEST, "Unlikely fulltext paragraph flagged:\n----------------------------\n"
+ paragraphText.substring(0, Math.min(paragraphText.length(), 1000)));
}
}
paragraph.setIsNotFulltext(noFulltextParagraph);
paragraph.addToIndexes();
}
}
}
| 7,896 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
LanguageDetector.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/LanguageDetector.java | package uhh_lt.newsleak.annotator;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.stream.Stream;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Logger;
import opennlp.tools.langdetect.LanguageDetectorME;
import uhh_lt.newsleak.resources.LanguageDetectorResource;
import uhh_lt.newsleak.resources.MetadataResource;
import uhh_lt.newsleak.types.Metadata;
/**
* Detects the language of a document based on the first 3000 characters of its
* content. The annotator wraps the Apache OpenNLP maximum entropy language
* identifier model.
*
* Inferred language is written as metadata to a temporary metadata collection.
* ISO codes for supported languages (one per line) should reside in
* <i>resources/supportedLanguages.txt</i>
*/
@OperationalProperties(multipleDeploymentAllowed=true, modifiesCas=true)
public class LanguageDetector extends JCasAnnotator_ImplBase {
/** The OpenNLP model. */
private LanguageDetectorME languageDetector;
/** The OpenNLP model file */
public final static String MODEL_FILE = "languageDetectorResource";
/** The language detector resource. */
@ExternalResource(key = MODEL_FILE)
private LanguageDetectorResource languageDetectorResource;
/** Location of the temporary METADATA_FILE collection. */
public final static String METADATA_FILE = "metadataResource";
/** The metadata resource. */
@ExternalResource(key = METADATA_FILE)
private MetadataResource metadataResource;
/** The Constant PARAM_DEFAULT_LANG. */
public static final String PARAM_DEFAULT_LANG = "defaultLanguage";
/** The default language (English). */
@ConfigurationParameter(name = PARAM_DEFAULT_LANG, mandatory = false, defaultValue = "eng")
private String defaultLanguage;
/** The Constant DOCLANG_FILE. */
public final static String DOCLANG_FILE = "documentLanguagesFile";
/** Temporary file to keep document language information. */
@ConfigurationParameter(name = DOCLANG_FILE, mandatory = true, description = "temporary file to keep document language information")
private String documentLanguagesFile;
/** The supported languages. */
public HashSet<String> supportedLanguages;
/** The logger. */
Logger logger;
/*
* (non-Javadoc)
*
* @see org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.
* apache.uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
supportedLanguages = getSupportedLanguages();
languageDetector = new LanguageDetectorME(languageDetectorResource.getModel());
logger = context.getLogger();
}
/**
* Gets the supported languages.
*
* @return the supported languages
*/
public static HashSet<String> getSupportedLanguages() {
HashSet<String> supportedLanguages = new HashSet<String>();
try (Stream<String> stream = Files.lines(Paths.get("resources/supportedLanguages.txt"))) {
stream.forEach(supportedLanguages::add);
} catch (IOException e) {
e.printStackTrace();
}
return supportedLanguages;
}
/*
* Infers doc language from the first 3000 characters, counts a statistic
* and sets CAS metadata.
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.
* apache.uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
String docText = jcas.getDocumentText();
Integer maxLength = Math.min(docText.length(), 3000);
String docBeginning = docText.substring(0, maxLength);
String docLang = languageDetector.predictLanguage(docBeginning).getLang();
// count languages for statistics
languageDetectorResource.addLanguage(docLang);
Metadata metadata = (Metadata) jcas.getAnnotationIndex(Metadata.type).iterator().next();
// Only set language metadata, if it is supported
if (supportedLanguages.contains(docLang)) {
jcas.setDocumentLanguage(docLang);
// append language information to metadata file
ArrayList<List<String>> langmetadata = new ArrayList<List<String>>();
langmetadata.add(metadataResource.createTextMetadata(metadata.getDocId(), "language", docLang));
metadataResource.appendMetadata(langmetadata);
}
}
/**
* Locale to ISO.
*
* @return map containing ISO : locale pairs
*/
public static Map<String, Locale> localeToISO() {
String[] languages = Locale.getISOLanguages();
Map<String, Locale> localeMap = new HashMap<String, Locale>(languages.length);
for (String language : languages) {
Locale locale = new Locale(language);
localeMap.put(locale.getISO3Language(), locale);
}
return localeMap;
}
/*
* Logs language counts after process has completed.
*
* @see org.apache.uima.analysis_component.AnalysisComponent_ImplBase#
* collectionProcessComplete()
*/
@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
languageDetectorResource.logLanguageStatistics(logger);
super.collectionProcessComplete();
}
}
| 5,579 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
DictionaryExtractor.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/DictionaryExtractor.java | package uhh_lt.newsleak.annotator;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.ExternalResource;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.jcas.cas.StringList;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Logger;
import opennlp.uima.Token;
import uhh_lt.newsleak.resources.DictionaryResource;
import uhh_lt.newsleak.resources.DictionaryResource.Dictionary;
import uhh_lt.newsleak.types.DictTerm;
/**
* A UIMA annotator to annotate regular expression patterns and dictionary
* lists. REs are emails, phone numbers (does not work too well), URLs, and IP
* addresses. Dictionaries are text files containing category terms. Dictionary
* files should be stored in <i>conf/dictionaries</i> and follow the naming
* convention <i>dictionarytype.langcode</i> (e.g. spam.deu for German spam
* terms). The files should contain dictionary terms one per line. Terms can be
* multi word units (MWU). MWUs are searched by regular expression patterns
* (case-insensitive). Single work units are stemmed and lowercased before
* comparison with tokens. Matching tokens are annotated as DictTerm type.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class DictionaryExtractor extends JCasAnnotator_ImplBase {
/** REGEX_EMAIL. */
public static final Pattern REGEX_EMAIL = Pattern.compile("[\\p{L}0-9._%+-]+@[\\p{L}0-9.-]+\\.[\\p{L}]{2,6}",
Pattern.UNICODE_CHARACTER_CLASS);
/** REGEX_PHONE (needs probably a better pattern...). */
public static final Pattern REGEX_PHONE = Pattern.compile(
"\\+(9[976]\\d|8[987530]\\d|6[987]\\d|5[90]\\d|42\\d|3[875]\\d|2[98654321]\\d|9[8543210]|8[6421]|6[6543210]|5[87654321]|4[987654310]|3[9643210]|2[70]|7|1)\\d{1,14}$");
/** REGEX_URL. */
public static final Pattern REGEX_URL = Pattern.compile(
"(https?|ftp|file)://[-\\p{L}0-9+&@#/%?=~_|!:,.;]*[-\\p{L}0-9+&@#/%=~_|]", Pattern.UNICODE_CHARACTER_CLASS);
/** REGEX_IP. */
public static final Pattern REGEX_IP = Pattern
.compile("(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)");
/** Dictionary Resource. */
public final static String RESOURCE_DICTIONARIES = "dictTermExtractor";
@ExternalResource(key = RESOURCE_DICTIONARIES)
private DictionaryResource dictTermExtractor;
/** PARAM_EXTRACT_EMAIL. */
public static final String PARAM_EXTRACT_EMAIL = "extractEmail";
@ConfigurationParameter(name = PARAM_EXTRACT_EMAIL, mandatory = false, defaultValue = "true")
private boolean extractEmail;
/** PARAM_EXTRACT_URL. */
public static final String PARAM_EXTRACT_URL = "extractUrl";
@ConfigurationParameter(name = PARAM_EXTRACT_URL, mandatory = false, defaultValue = "false")
private boolean extractUrl;
/** PARAM_EXTRACT_IP. */
public static final String PARAM_EXTRACT_IP = "extractIp";
@ConfigurationParameter(name = PARAM_EXTRACT_IP, mandatory = false, defaultValue = "false")
private boolean extractIp;
/** PARAM_EXTRACT_PHONE. */
public static final String PARAM_EXTRACT_PHONE = "extractPhone";
@ConfigurationParameter(name = PARAM_EXTRACT_PHONE, mandatory = false, defaultValue = "false")
private boolean extractPhone;
private Logger log;
/** The unigram dictionaries. */
private HashMap<String, Dictionary> unigramDictionaries;
/** The mwu dictionaries. */
private HashMap<String, Dictionary> mwuDictionaries;
/*
* Uima initializer fetching dictionary entries.
*
* @see org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.
* apache.uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
log = context.getLogger();
unigramDictionaries = dictTermExtractor.getUnigramDictionaries();
mwuDictionaries = dictTermExtractor.getMwuDictionaries();
}
/*
* Uima process method extracting REGEX patterns for URLs, IPs, Email and
* Phone numbers as well as dictionary patterns.
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.
* apache.uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
ArrayList<DictTerm> termsToTokenList = new ArrayList<DictTerm>();
// EMAIL
if (extractEmail)
termsToTokenList.addAll(annotateRegex(jcas, REGEX_EMAIL, "EMAIL"));
// URL
if (extractUrl)
termsToTokenList.addAll(annotateRegex(jcas, REGEX_URL, "URL"));
// IP
if (extractIp)
termsToTokenList.addAll(annotateRegex(jcas, REGEX_IP, "IP"));
// PHONE
if (extractPhone)
termsToTokenList.addAll(annotateRegex(jcas, REGEX_PHONE, "PHONE"));
// Set new token and sentence boundaries for pattern matches
correctTokenBoundaries(jcas, termsToTokenList);
// Dictionary multi word units
annotateMultiWordUnits(jcas);
// Dictionary unigrams
Collection<Token> tokens = JCasUtil.select(jcas, Token.class);
for (Token t : tokens) {
annotateDictTypes(jcas, t);
}
}
/**
* Resets token boundaries for retrieved REGEX matches which span over
* multiple tokens.
*
* @param jcas
* the jcas
* @param termsToTokenList
* the terms to token list
*/
private void correctTokenBoundaries(JCas jcas, ArrayList<DictTerm> termsToTokenList) {
for (DictTerm dictTerm : termsToTokenList) {
// tokens
Collection<Token> coveredTokens = JCasUtil.selectCovered(jcas, Token.class, dictTerm);
if (coveredTokens.size() > 1) {
Token newToken = new Token(jcas);
boolean firstTok = true;
for (Token t : coveredTokens) {
if (firstTok) {
newToken.setBegin(t.getBegin());
newToken.setPos(t.getPos());
firstTok = false;
}
newToken.setEnd(t.getEnd());
t.removeFromIndexes();
}
newToken.addToIndexes();
}
}
}
/**
* Annotate dict types.
*
* @param jcas
* the jcas
* @param token
* the token to annotate
*/
public void annotateDictTypes(JCas jcas, Token token) {
String tokenStem = dictTermExtractor.stem(token.getCoveredText()).toLowerCase();
String tokenValue = token.getCoveredText().toLowerCase();
boolean dictTermFound = false;
StringList typeList = new StringList(jcas);
StringList baseFormList = new StringList(jcas);
for (String dictType : unigramDictionaries.keySet()) {
HashMap<String, String> dict = unigramDictionaries.get(dictType);
if (dict.containsKey(tokenStem)) {
String baseForm = dict.get(tokenStem);
if (tokenValue.startsWith(baseForm)) {
typeList = typeList.push(dictType);
baseFormList = baseFormList.push(baseForm);
dictTermFound = true;
}
}
}
// add to cas index
if (dictTermFound) {
DictTerm dictTerm = new DictTerm(jcas);
dictTerm.setBegin(token.getBegin());
dictTerm.setEnd(token.getEnd());
dictTerm.setDictType(typeList);
dictTerm.setDictTerm(baseFormList);
dictTerm.addToIndexes();
}
}
/**
* Annotate multi word units (with regex pattern).
*
* @param jcas
* the jcas
*/
private void annotateMultiWordUnits(JCas jcas) {
for (String dictType : mwuDictionaries.keySet()) {
HashMap<String, String> dict = mwuDictionaries.get(dictType);
for (String regexPattern : dict.keySet()) {
annotateRegex(jcas, Pattern.compile(regexPattern), dictType);
}
}
}
/**
* Annotate regex patterns (URLs, IPs, email addresses and Phone numbers)
*
* @param jcas
* the jcas
* @param pattern
* the pattern
* @param type
* the type
* @return the array list
*/
public ArrayList<DictTerm> annotateRegex(JCas jcas, Pattern pattern, String type) {
String docText = jcas.getDocumentText();
ArrayList<DictTerm> regexMatches = new ArrayList<DictTerm>();
Matcher matcher = pattern.matcher(docText);
// Check all occurrences
while (matcher.find()) {
DictTerm dictTerm = new DictTerm(jcas);
dictTerm.setBegin(matcher.start());
dictTerm.setEnd(matcher.end());
StringList typeList = new StringList(jcas);
StringList baseFormList = new StringList(jcas);
typeList = typeList.push(type);
baseFormList = baseFormList.push(matcher.group());
dictTerm.setDictType(typeList);
dictTerm.setDictTerm(baseFormList);
dictTerm.addToIndexes();
regexMatches.add(dictTerm);
}
return regexMatches;
}
/**
* Placeholder for no stemming (if no stemmer is available for the current
* document language)
*/
private class noStemmer extends org.tartarus.snowball.SnowballStemmer {
/*
* (non-Javadoc)
*
* @see org.tartarus.snowball.SnowballStemmer#stem()
*/
@Override
public boolean stem() {
return true;
}
}
}
| 9,150 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
SentenceCleaner.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/SentenceCleaner.java | package uhh_lt.newsleak.annotator;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import com.ibm.icu.text.DateFormatSymbols;
import opennlp.uima.Sentence;
import opennlp.uima.Token;
/**
* Annotator to remove implausible Sentences. The ICU4J sentence segmentation
* may result in unlikely sentence structures such as very long base64-encoded
* blocks from email attachments. This annotator restricts sentence annotations
* to a maximum number of tokens and tokens to a maximum character length.
* Overly long tokens are removed from the CAS index. Overly long sentences are
* split into smaller chunks. In addition, wrong sentence breaks in German dates
* (e.g. "25. September 2019") are corrected. Also, documents are flagged by
* noFulltextDocument=true in the document metadata of the CAS if a very low
* type token ratio is observed.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class SentenceCleaner extends JCasAnnotator_ImplBase {
/** The Constant MAX_TOKENS_PER_SENTENCE. */
public static final int MAX_TOKENS_PER_SENTENCE = 150;
/** The Constant RESIZE_TOKENS_PER_SENTENCE. */
public static final int RESIZE_TOKENS_PER_SENTENCE = 25;
/** The Constant MAX_TOKEN_LENGTH. */
private static final int MAX_TOKEN_LENGTH = 70;
/** Acquire German month names */
DateFormatSymbols dfs;
/** The month names. */
HashSet<String> monthNames;
/** The log. */
Logger log;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.apache.
* uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
log = context.getLogger();
log.setLevel(Level.ALL);
dfs = new DateFormatSymbols(Locale.GERMAN);
monthNames = new HashSet<String>(Arrays.asList(dfs.getMonths()));
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.apache.
* uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
// step 1
cleanTokens(jcas);
// step 2
repairSentenceBreaks(jcas);
// step 3
restructureSentences(jcas);
}
/**
* This function removes token annotations from overly long tokens which can be
* large URLs or base64 encoded data. After token annotation removal potentially
* empty sentences are removed, too.
*
* @param jcas
* the jcas
*/
private void cleanTokens(JCas jcas) {
// remove too long tokens
Collection<Token> tokens = JCasUtil.select(jcas, Token.class);
for (Token token : tokens) {
if (token.getCoveredText().length() > MAX_TOKEN_LENGTH) {
token.removeFromIndexes();
}
}
// remove empty sentences
Collection<Sentence> sentences = JCasUtil.select(jcas, Sentence.class);
for (Sentence sentence : sentences) {
tokens = JCasUtil.selectCovered(jcas, Token.class, sentence.getBegin(), sentence.getEnd());
if (tokens.isEmpty()) {
sentence.removeFromIndexes();
}
}
}
/**
* ICU sentence separator separates German (and other language?) constructions
* with dates such as "25. Oktober". This function merges sentences falsely
* separated at this date punctuation mark.
*
* @param jcas
* the jcas
*/
private void repairSentenceBreaks(JCas jcas) {
Collection<Sentence> sentences = JCasUtil.select(jcas, Sentence.class);
// merge falsely separated sentences
List<Token> lastSentenceTokens = new ArrayList<Token>();
Sentence lastSentence = null;
for (Sentence sentence : sentences) {
List<Token> tokens = JCasUtil.selectCovered(jcas, Token.class, sentence.getBegin(), sentence.getEnd());
// check pattern
if (monthNames.contains(tokens.get(0).getCoveredText()) && lastSentenceTokens.size() > 1
&& lastSentenceTokens.get(lastSentenceTokens.size() - 2).getCoveredText().matches("\\d{1,2}")
&& lastSentenceTokens.get(lastSentenceTokens.size() - 1).getCoveredText().matches("\\.")) {
lastSentence.setEnd(sentence.getEnd());
lastSentence.addToIndexes();
sentence.removeFromIndexes();
}
lastSentenceTokens = tokens;
lastSentence = sentence;
}
}
/**
* Sentence tokenization may lead to non-sentences, e.g. very long lists or
* automatically generated text files (e.g. logs). This function splits overly
* long sentences into smaller pieces.
*
* @param jcas
* the jcas
*/
public void restructureSentences(JCas jcas) {
Collection<Sentence> sentences = JCasUtil.select(jcas, Sentence.class);
for (Sentence sentence : sentences) {
Collection<Token> tokens = JCasUtil.selectCovered(jcas, Token.class, sentence.getBegin(),
sentence.getEnd());
if (tokens.size() > MAX_TOKENS_PER_SENTENCE) {
log.log(Level.FINEST, "Restructuring long sentence: " + sentence.getBegin() + " " + sentence.getEnd());
int sStart = sentence.getBegin();
boolean startNew = true;
int nTok = 0;
for (Token token : tokens) {
nTok++;
if (startNew) {
sStart = token.getBegin();
startNew = false;
}
if (nTok % RESIZE_TOKENS_PER_SENTENCE == 0) {
Sentence s = new Sentence(jcas);
s.setBegin(sStart);
s.setEnd(token.getEnd());
s.addToIndexes();
startNew = true;
log.log(Level.FINEST, "New sentence: " + sStart + " " + token.getEnd());
}
}
if (!startNew) {
Sentence s = new Sentence(jcas);
s.setBegin(sStart);
s.setEnd(sentence.getEnd());
s.addToIndexes();
log.log(Level.FINEST, "New sentence: " + sStart + " " + sentence.getEnd());
}
sentence.removeFromIndexes();
}
}
}
}
| 6,244 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
HeidelTimeOpenNLP.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/HeidelTimeOpenNLP.java | package uhh_lt.newsleak.annotator;
/* Based on: https://github.com/HeidelTime/heideltime/blob/master/src/de/unihd/dbs/uima/annotator/heideltime/HeidelTime.java
*
* HeidelTime.java
*
* Copyright (c) 2011, Database Research Group, Institute of Computer Science, Heidelberg University.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU General Public License.
*
* author: Jannik Strötgen
* email: [email protected]
*
* HeidelTime is a multilingual, cross-domain temporal tagger.
* For details, see http://dbs.ifi.uni-heidelberg.de/heideltime
*/
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.regex.MatchResult;
import java.util.regex.Pattern;
import org.apache.uima.UimaContext;
import org.apache.uima.cas.FSIterator;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import de.unihd.dbs.uima.annotator.heideltime.ProcessorManager;
import de.unihd.dbs.uima.annotator.heideltime.ProcessorManager.Priority;
import de.unihd.dbs.uima.annotator.heideltime.processors.TemponymPostprocessing;
import de.unihd.dbs.uima.annotator.heideltime.resources.Language;
import de.unihd.dbs.uima.annotator.heideltime.resources.NormalizationManager;
import de.unihd.dbs.uima.annotator.heideltime.resources.RePatternManager;
import de.unihd.dbs.uima.annotator.heideltime.resources.RegexHashMap;
import de.unihd.dbs.uima.annotator.heideltime.resources.RuleManager;
import de.unihd.dbs.uima.annotator.heideltime.utilities.DateCalculator;
import de.unihd.dbs.uima.annotator.heideltime.utilities.ContextAnalyzer;
import de.unihd.dbs.uima.annotator.heideltime.utilities.LocaleException;
import de.unihd.dbs.uima.annotator.heideltime.utilities.Logger;
import de.unihd.dbs.uima.annotator.heideltime.utilities.Toolbox;
import de.unihd.dbs.uima.types.heideltime.Dct;
import opennlp.uima.Sentence;
import de.unihd.dbs.uima.types.heideltime.Timex3;
import opennlp.uima.Token;
import uhh_lt.newsleak.types.Paragraph;
/**
* HeidelTime finds temporal expressions and normalizes them according to the
* TIMEX3 TimeML annotation standard.
*
* This class is based on:
* https://github.com/HeidelTime/heideltime/blob/master/src/de/unihd/dbs/uima/
* annotator/heideltime/HeidelTime.java
*
* We modified it to fit into the newsleak pipeline (accepting our type system
* and the absence of pos tags).
*
* @author jannik stroetgen
*
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class HeidelTimeOpenNLP extends JCasAnnotator_ImplBase {
// TOOL NAME (may be used as componentId)
private Class<?> component = this.getClass();
// PROCESSOR MANAGER
private ProcessorManager procMan = new ProcessorManager();
// COUNTER (how many timexes added to CAS? (finally)
public int timex_counter = 0;
public int timex_counter_global = 0;
// FLAG (for historic expressions referring to BC)
public Boolean flagHistoricDates = false;
// COUNTER FOR TIMEX IDS
private int timexID = 0;
// INPUT PARAMETER HANDLING WITH UIMA
// supported languages (2012-05-19): english, german, dutch, englishcoll,
// englishsci
public static final String PARAM_LANGUAGE = "requestedLanguage";
@ConfigurationParameter(name = PARAM_LANGUAGE, mandatory = true, description = "document language")
private String requestedLanguage;
private Language language;
// chosen locale parameter name
public static final String PARAM_LOCALE = "locale";
@ConfigurationParameter(name = PARAM_LOCALE, mandatory = false, defaultValue = "en_GB", description = "document locale")
private String requestedLocale;
// supported types (2012-05-19): news (english, german, dutch), narrative
// (english, german, dutch), colloquial
private String PARAM_TYPE_TO_PROCESS = "Type";
private String typeToProcess = "news";
// INPUT PARAMETER HANDLING WITH UIMA (which types shall be extracted)
private String PARAM_DATE = "Date";
private String PARAM_TIME = "Time";
private String PARAM_DURATION = "Duration";
private String PARAM_SET = "Set";
private String PARAM_TEMPONYMS = "Temponym";
private String PARAM_DEBUG = "Debugging";
private String PARAM_GROUP = "ConvertDurations";
private Boolean find_dates = true;
private Boolean find_times = false;
private Boolean find_durations = false;
private Boolean find_sets = false;
private Boolean find_temponyms = false;
private Boolean group_gran = true;
// FOR DEBUGGING PURPOSES (IF FALSE)
private Boolean deleteOverlapped = true;
/**
* @see AnalysisComponent#initialize(UimaContext)
*/
public void initialize(UimaContext aContext) throws ResourceInitializationException {
super.initialize(aContext);
/////////////////////////////////
// DEBUGGING PARAMETER SETTING //
/////////////////////////////////
this.deleteOverlapped = true;
Boolean doDebug = (Boolean) aContext.getConfigParameterValue(PARAM_DEBUG);
Logger.setPrintDetails(doDebug == null ? false : doDebug);
/////////////////////////////////
// HANDLE LOCALE //
/////////////////////////////////
if (requestedLocale == null || requestedLocale.length() == 0) { // if
// the
// PARAM_LOCALE
// setting
// was
// left
// empty,
Locale.setDefault(Locale.UK); // use a default, the ISO8601-adhering
// UK locale (equivalent to "en_GB")
} else { // otherwise, check if the desired locale exists in the JVM's
// available locale repertoire
try {
Locale locale = DateCalculator.getLocaleFromString(requestedLocale);
Locale.setDefault(locale); // sets it for the entire JVM session
} catch (LocaleException e) {
Logger.printError(
"Supplied locale parameter couldn't be resolved to a working locale. Try one of these:");
String localesString = new String();
for (Locale l : Locale.getAvailableLocales()) { // list all
// available
// locales
localesString += l.toString() + " ";
}
Logger.printError(localesString);
System.exit(-1);
}
}
//////////////////////////////////
// GET CONFIGURATION PARAMETERS //
//////////////////////////////////
language = Language.getLanguageFromString(requestedLanguage);
// typeToProcess = (String)
// aContext.getConfigParameterValue(PARAM_TYPE_TO_PROCESS);
// find_dates = (Boolean) aContext.getConfigParameterValue(PARAM_DATE);
// find_times = (Boolean) aContext.getConfigParameterValue(PARAM_TIME);
// find_durations = (Boolean)
// aContext.getConfigParameterValue(PARAM_DURATION);
// find_sets = (Boolean) aContext.getConfigParameterValue(PARAM_SET);
// find_temponyms = (Boolean)
// aContext.getConfigParameterValue(PARAM_TEMPONYMS);
// group_gran = (Boolean) aContext.getConfigParameterValue(PARAM_GROUP);
////////////////////////////////////////////////////////////
// READ NORMALIZATION RESOURCES FROM FILES AND STORE THEM //
////////////////////////////////////////////////////////////
NormalizationManager.getInstance(language, find_temponyms);
//////////////////////////////////////////////////////
// READ PATTERN RESOURCES FROM FILES AND STORE THEM //
//////////////////////////////////////////////////////
RePatternManager.getInstance(language, find_temponyms);
///////////////////////////////////////////////////
// READ RULE RESOURCES FROM FILES AND STORE THEM //
///////////////////////////////////////////////////
RuleManager.getInstance(language, find_temponyms);
/////////////////////////////////////////////////////////////////////////////////
// SUBPROCESSOR CONFIGURATION. REGISTER YOUR OWN PROCESSORS HERE FOR
///////////////////////////////////////////////////////////////////////////////// EXECUTION
///////////////////////////////////////////////////////////////////////////////// //
/////////////////////////////////////////////////////////////////////////////////
procMan.registerProcessor("de.unihd.dbs.uima.annotator.heideltime.processors.HolidayProcessor");
procMan.registerProcessor("de.unihd.dbs.uima.annotator.heideltime.processors.DecadeProcessor");
procMan.initializeAllProcessors(aContext);
/////////////////////////////
// PRINT WHAT WILL BE DONE //
/////////////////////////////
if (find_dates)
Logger.printDetail("Getting Dates...");
if (find_times)
Logger.printDetail("Getting Times...");
if (find_durations)
Logger.printDetail("Getting Durations...");
if (find_sets)
Logger.printDetail("Getting Sets...");
if (find_temponyms)
Logger.printDetail("Getting Temponyms...");
}
/**
* @see JCasAnnotator_ImplBase#process(JCas)
*/
public void process(JCas jcas) {
// check whether a given DCT (if any) is of the correct format and if
// not, skip this call
if (!isValidDCT(jcas)) {
Logger.printError(component, "The reader component of this workflow has set an incorrect DCT."
+ " HeidelTime expects either \"YYYYMMDD\" or \"YYYY-MM-DD...\". This document was skipped.");
return;
}
// run preprocessing processors
procMan.executeProcessors(jcas, Priority.PREPROCESSING);
RuleManager rulem = RuleManager.getInstance(language, find_temponyms);
timexID = 1; // reset counter once per document processing
timex_counter = 0;
flagHistoricDates = false;
////////////////////////////////////////////
// CHECK SENTENCE BY SENTENCE FOR TIMEXES //
////////////////////////////////////////////
Collection<Paragraph> paragraphs = JCasUtil.select(jcas, Paragraph.class);
for (Paragraph paragraph : paragraphs) {
if (paragraph.getIsNotFulltext()) {
Logger.printDetail("Skipping paragraph for TIMEX annotation.");
} else {
Collection<Sentence> sentences = JCasUtil.selectCovered(jcas, Sentence.class, paragraph.getBegin(),
paragraph.getEnd());
Iterator<Sentence> sentIter = sentences.iterator();
/*
* check if the pipeline has annotated any sentences. if not, heideltime can't
* do any work, will return from process() with a warning message.
*/
if (!sentIter.hasNext()) {
Logger.printError(component,
"HeidelTime has not found any sentence tokens in this document. "
+ "HeidelTime needs sentence tokens tagged by a preprocessing UIMA analysis engine to "
+ "do its work. Please check your UIMA workflow and add an analysis engine that creates "
+ "these sentence tokens.");
}
while (sentIter.hasNext()) {
Sentence s = (Sentence) sentIter.next();
Boolean debugIteration = false;
Boolean oldDebugState = Logger.getPrintDetails();
do {
try {
if (find_dates) {
findTimexes("DATE", rulem.getHmDatePattern(), rulem.getHmDateOffset(),
rulem.getHmDateNormalization(), s, jcas);
}
if (find_times) {
findTimexes("TIME", rulem.getHmTimePattern(), rulem.getHmTimeOffset(),
rulem.getHmTimeNormalization(), s, jcas);
}
/*
* check for historic dates/times starting with BC to check if post-processing
* step is required
*/
if (typeToProcess.equals("narrative") || typeToProcess.equals("narratives")) {
FSIterator iterDates = jcas.getAnnotationIndex(Timex3.type).iterator();
while (iterDates.hasNext()) {
Timex3 t = (Timex3) iterDates.next();
if (t.getTimexValue().startsWith("BC")) {
flagHistoricDates = true;
break;
}
}
}
if (find_sets) {
findTimexes("SET", rulem.getHmSetPattern(), rulem.getHmSetOffset(),
rulem.getHmSetNormalization(), s, jcas);
}
if (find_durations) {
findTimexes("DURATION", rulem.getHmDurationPattern(), rulem.getHmDurationOffset(),
rulem.getHmDurationNormalization(), s, jcas);
}
if (find_temponyms) {
findTimexes("TEMPONYM", rulem.getHmTemponymPattern(), rulem.getHmTemponymOffset(),
rulem.getHmTemponymNormalization(), s, jcas);
}
} catch (NullPointerException npe) {
if (!debugIteration) {
debugIteration = true;
Logger.setPrintDetails(true);
Logger.printError(component, "HeidelTime's execution has been interrupted by an exception that "
+ "is likely rooted in faulty normalization resource files. Please consider opening an issue "
+ "report containing the following information at our GitHub project issue tracker: "
+ "https://github.com/HeidelTime/heideltime/issues - Thanks!");
npe.printStackTrace();
Logger.printError(component,
"Sentence [" + s.getBegin() + "-" + s.getEnd() + "]: " + s.getCoveredText());
Logger.printError(component, "Language: " + language);
Logger.printError(component, "Re-running this sentence with DEBUGGING enabled...");
} else {
debugIteration = false;
Logger.setPrintDetails(oldDebugState);
Logger.printError(component, "Execution will now resume.");
}
}
} while (debugIteration);
}
}}
/*
* kick out some overlapping expressions
*/
if (deleteOverlapped == true)
deleteOverlappedTimexesPreprocessing(jcas);
/*
* specify ambiguous values, e.g.: specific year for date values of format
* UNDEF-year-01-01; specific month for values of format UNDEF-last-month
*/
specifyAmbiguousValues(jcas);
// disambiguate historic dates
// check dates without explicit hints to AD or BC if they might refer to
// BC dates
if (flagHistoricDates)
try {
disambiguateHistoricDates(jcas);
} catch (Exception e) {
Logger.printError("Something went wrong disambiguating historic dates.");
e.printStackTrace();
}
if (find_temponyms) {
TemponymPostprocessing.handleIntervals(jcas);
}
/*
* kick out the rest of the overlapping expressions
*/
if (deleteOverlapped == true)
deleteOverlappedTimexesPostprocessing(jcas);
// run arbitrary processors
procMan.executeProcessors(jcas, Priority.ARBITRARY);
// remove invalid timexes
removeInvalids(jcas);
// run postprocessing processors
procMan.executeProcessors(jcas, Priority.POSTPROCESSING);
timex_counter_global = timex_counter_global + timex_counter;
Logger.printDetail(component,
"Number of Timexes added to CAS: " + timex_counter + "(global: " + timex_counter_global + ")");
}
/**
* Add timex annotation to CAS object.
*
* @param timexType
* @param begin
* @param end
* @param timexValue
* @param timexId
* @param foundByRule
* @param jcas
*/
public void addTimexAnnotation(String timexType, int begin, int end, Sentence sentence, String timexValue,
String timexQuant, String timexFreq, String timexMod, String emptyValue, String timexId, String foundByRule,
JCas jcas) {
Timex3 annotation = new Timex3(jcas);
annotation.setBegin(begin);
annotation.setEnd(end);
// annotation.setFilename(sentence.getFilename());
// annotation.setSentId(sentence.getSentenceId());
annotation.setEmptyValue(emptyValue);
FSIterator iterToken = jcas.getAnnotationIndex(Token.type).subiterator(sentence);
String allTokIds = "";
Integer runningTokenId = 0;
while (iterToken.hasNext()) {
Token tok = (Token) iterToken.next();
tok.setTokenId(++runningTokenId);
if (tok.getBegin() <= begin && tok.getEnd() > begin) {
annotation.setFirstTokId(tok.getTokenId());
allTokIds = "BEGIN<-->" + tok.getTokenId();
}
if ((tok.getBegin() > begin) && (tok.getEnd() <= end)) {
allTokIds = allTokIds + "<-->" + tok.getTokenId();
}
}
annotation.setAllTokIds(allTokIds);
annotation.setTimexType(timexType);
annotation.setTimexValue(timexValue);
annotation.setTimexId(timexId);
annotation.setFoundByRule(foundByRule);
if ((timexType.equals("DATE")) || (timexType.equals("TIME"))) {
if ((timexValue.startsWith("X")) || (timexValue.startsWith("UNDEF"))) {
annotation.setFoundByRule(foundByRule + "-relative");
} else {
annotation.setFoundByRule(foundByRule + "-explicit");
}
}
if (!(timexQuant == null)) {
annotation.setTimexQuant(timexQuant);
}
if (!(timexFreq == null)) {
annotation.setTimexFreq(timexFreq);
}
if (!(timexMod == null)) {
annotation.setTimexMod(timexMod);
}
annotation.addToIndexes();
this.timex_counter++;
Logger.printDetail(annotation.getTimexId() + "EXTRACTION PHASE: " + " found by:" + annotation.getFoundByRule()
+ " text:" + annotation.getCoveredText());
Logger.printDetail(annotation.getTimexId() + "NORMALIZATION PHASE:" + " found by:" + annotation.getFoundByRule()
+ " text:" + annotation.getCoveredText() + " value:" + annotation.getTimexValue());
}
/**
* Postprocessing: Check dates starting with "0" which were extracted without
* explicit "AD" hints if it is likely that they refer to the respective date BC
*
* @param jcas
*/
public void disambiguateHistoricDates(JCas jcas) {
// build up a list with all found TIMEX expressions
List<Timex3> linearDates = new ArrayList<Timex3>();
FSIterator iterTimex = jcas.getAnnotationIndex(Timex3.type).iterator();
// Create List of all Timexes of types "date" and "time"
while (iterTimex.hasNext()) {
Timex3 timex = (Timex3) iterTimex.next();
if (timex.getTimexType().equals("DATE") || timex.getTimexType().equals("TIME")) {
linearDates.add(timex);
}
}
//////////////////////////////////////////////
// go through list of Date and Time timexes //
//////////////////////////////////////////////
for (int i = 1; i < linearDates.size(); i++) {
Timex3 t_i = (Timex3) linearDates.get(i);
String value_i = t_i.getTimexValue();
String newValue = value_i;
Boolean change = false;
if (!(t_i.getFoundByRule().contains("-BCADhint"))) {
if (value_i.startsWith("0")) {
Integer offset = 1, counter = 1;
do {
if ((i == 1 || (i > 1 && !change))
&& linearDates.get(i - offset).getTimexValue().startsWith("BC")) {
if (value_i.length() > 1) {
if ((linearDates.get(i - offset).getTimexValue()
.startsWith("BC" + value_i.substring(0, 2)))
|| (linearDates.get(i - offset).getTimexValue().startsWith("BC" + String
.format("%02d", (Integer.parseInt(value_i.substring(0, 2)) + 1))))) {
if (((value_i.startsWith("00"))
&& (linearDates.get(i - offset).getTimexValue().startsWith("BC00")))
|| ((value_i.startsWith("01")) && (linearDates.get(i - offset)
.getTimexValue().startsWith("BC01")))) {
if ((value_i.length() > 2)
&& (linearDates.get(i - offset).getTimexValue().length() > 4)) {
if (Integer.parseInt(value_i.substring(0, 3)) <= Integer.parseInt(
linearDates.get(i - offset).getTimexValue().substring(2, 5))) {
newValue = "BC" + value_i;
change = true;
Logger.printDetail("DisambiguateHistoricDates: " + value_i + " to "
+ newValue + ". Expression " + t_i.getCoveredText() + " due to "
+ linearDates.get(i - offset).getCoveredText());
}
}
} else {
newValue = "BC" + value_i;
change = true;
Logger.printDetail("DisambiguateHistoricDates: " + value_i + " to " + newValue
+ ". Expression " + t_i.getCoveredText() + " due to "
+ linearDates.get(i - offset).getCoveredText());
}
}
}
}
if ((linearDates.get(i - offset).getTimexType().equals("TIME")
|| linearDates.get(i - offset).getTimexType().equals("DATE"))
&& (linearDates.get(i - offset).getTimexValue().matches("^\\d.*"))) {
counter++;
}
} while (counter < 5 && ++offset < i);
}
}
if (!(newValue.equals(value_i))) {
t_i.removeFromIndexes();
Logger.printDetail("DisambiguateHistoricDates: value changed to BC");
t_i.setTimexValue(newValue);
t_i.addToIndexes();
linearDates.set(i, t_i);
}
}
}
/**
* Postprocessing: Remove invalid timex expressions. These are already marked as
* invalid: timexValue().equals("REMOVE")
*
* @param jcas
*/
public void removeInvalids(JCas jcas) {
/*
* Iterate over timexes and add invalids to HashSet (invalids cannot be removed
* directly since iterator is used)
*/
FSIterator iterTimex = jcas.getAnnotationIndex(Timex3.type).iterator();
HashSet<Timex3> hsTimexToRemove = new HashSet<Timex3>();
while (iterTimex.hasNext()) {
Timex3 timex = (Timex3) iterTimex.next();
if (timex.getTimexValue().equals("REMOVE")) {
hsTimexToRemove.add(timex);
}
}
// remove invalids, finally
for (Timex3 timex3 : hsTimexToRemove) {
timex3.removeFromIndexes();
this.timex_counter--;
Logger.printDetail(timex3.getTimexId() + " REMOVING PHASE: " + "found by:" + timex3.getFoundByRule()
+ " text:" + timex3.getCoveredText() + " value:" + timex3.getTimexValue());
}
}
@SuppressWarnings("unused")
public String specifyAmbiguousValuesString(String ambigString, Timex3 t_i, Integer i, List<Timex3> linearDates,
JCas jcas) {
NormalizationManager norm = NormalizationManager.getInstance(language, find_temponyms);
// //////////////////////////////////////
// IS THERE A DOCUMENT CREATION TIME? //
// //////////////////////////////////////
boolean dctAvailable = false;
// ////////////////////////////
// DOCUMENT TYPE TO PROCESS //
// //////////////////////////
boolean documentTypeNews = false;
boolean documentTypeNarrative = false;
boolean documentTypeColloquial = false;
boolean documentTypeScientific = false;
if (typeToProcess.equals("news")) {
documentTypeNews = true;
}
if (typeToProcess.equals("narrative") || typeToProcess.equals("narratives")) {
documentTypeNarrative = true;
}
if (typeToProcess.equals("colloquial")) {
documentTypeColloquial = true;
}
if (typeToProcess.equals("scientific")) {
documentTypeScientific = true;
}
// get the dct information
String dctValue = "";
int dctCentury = 0;
int dctYear = 0;
int dctDecade = 0;
int dctMonth = 0;
int dctDay = 0;
String dctSeason = "";
String dctQuarter = "";
String dctHalf = "";
int dctWeekday = 0;
int dctWeek = 0;
// ////////////////////////////////////////////
// INFORMATION ABOUT DOCUMENT CREATION TIME //
// ////////////////////////////////////////////
FSIterator dctIter = jcas.getAnnotationIndex(Dct.type).iterator();
if (dctIter.hasNext()) {
dctAvailable = true;
Dct dct = (Dct) dctIter.next();
dctValue = dct.getValue();
// year, month, day as mentioned in the DCT
if (dctValue.matches("\\d\\d\\d\\d\\d\\d\\d\\d")) {
dctCentury = Integer.parseInt(dctValue.substring(0, 2));
dctYear = Integer.parseInt(dctValue.substring(0, 4));
dctDecade = Integer.parseInt(dctValue.substring(2, 3));
dctMonth = Integer.parseInt(dctValue.substring(4, 6));
dctDay = Integer.parseInt(dctValue.substring(6, 8));
Logger.printDetail("dctCentury:" + dctCentury);
Logger.printDetail("dctYear:" + dctYear);
Logger.printDetail("dctDecade:" + dctDecade);
Logger.printDetail("dctMonth:" + dctMonth);
Logger.printDetail("dctDay:" + dctDay);
} else {
dctCentury = Integer.parseInt(dctValue.substring(0, 2));
dctYear = Integer.parseInt(dctValue.substring(0, 4));
dctDecade = Integer.parseInt(dctValue.substring(2, 3));
dctMonth = Integer.parseInt(dctValue.substring(5, 7));
dctDay = Integer.parseInt(dctValue.substring(8, 10));
Logger.printDetail("dctCentury:" + dctCentury);
Logger.printDetail("dctYear:" + dctYear);
Logger.printDetail("dctDecade:" + dctDecade);
Logger.printDetail("dctMonth:" + dctMonth);
Logger.printDetail("dctDay:" + dctDay);
}
dctQuarter = "Q" + norm.getFromNormMonthInQuarter(norm.getFromNormNumber(dctMonth + ""));
dctHalf = "H1";
if (dctMonth > 6) {
dctHalf = "H2";
}
// season, week, weekday, have to be calculated
dctSeason = norm.getFromNormMonthInSeason(norm.getFromNormNumber(dctMonth + "") + "");
dctWeekday = DateCalculator.getWeekdayOfDate(
dctYear + "-" + norm.getFromNormNumber(dctMonth + "") + "-" + norm.getFromNormNumber(dctDay + ""));
dctWeek = DateCalculator.getWeekOfDate(
dctYear + "-" + norm.getFromNormNumber(dctMonth + "") + "-" + norm.getFromNormNumber(dctDay + ""));
Logger.printDetail("dctQuarter:" + dctQuarter);
Logger.printDetail("dctSeason:" + dctSeason);
Logger.printDetail("dctWeekday:" + dctWeekday);
Logger.printDetail("dctWeek:" + dctWeek);
} else {
Logger.printDetail("No DCT available...");
}
// check if value_i has month, day, season, week (otherwise no
// UNDEF-year is possible)
Boolean viHasMonth = false;
Boolean viHasDay = false;
Boolean viHasSeason = false;
Boolean viHasWeek = false;
Boolean viHasQuarter = false;
Boolean viHasHalf = false;
int viThisMonth = 0;
int viThisDay = 0;
String viThisSeason = "";
String viThisQuarter = "";
String viThisHalf = "";
String[] valueParts = ambigString.split("-");
// check if UNDEF-year or UNDEF-century
if ((ambigString.startsWith("UNDEF-year")) || (ambigString.startsWith("UNDEF-century"))) {
if (valueParts.length > 2) {
// get vi month
if (valueParts[2].matches("\\d\\d")) {
viHasMonth = true;
viThisMonth = Integer.parseInt(valueParts[2]);
}
// get vi season
else if ((valueParts[2].equals("SP")) || (valueParts[2].equals("SU")) || (valueParts[2].equals("FA"))
|| (valueParts[2].equals("WI"))) {
viHasSeason = true;
viThisSeason = valueParts[2];
}
// get v1 quarter
else if ((valueParts[2].equals("Q1")) || (valueParts[2].equals("Q2")) || (valueParts[2].equals("Q3"))
|| (valueParts[2].equals("Q4"))) {
viHasQuarter = true;
viThisQuarter = valueParts[2];
}
// get v1 half
else if ((valueParts[2].equals("H1")) || (valueParts[2].equals("H2"))) {
viHasHalf = true;
viThisHalf = valueParts[2];
}
// get vi day
if ((valueParts.length > 3) && (valueParts[3].matches("\\d\\d"))) {
viHasDay = true;
viThisDay = Integer.parseInt(valueParts[3]);
}
}
} else {
if (valueParts.length > 1) {
// get vi month
if (valueParts[1].matches("\\d\\d")) {
viHasMonth = true;
viThisMonth = Integer.parseInt(valueParts[1]);
}
// get vi season
else if ((valueParts[1].equals("SP")) || (valueParts[1].equals("SU")) || (valueParts[1].equals("FA"))
|| (valueParts[1].equals("WI"))) {
viHasSeason = true;
viThisSeason = valueParts[1];
}
// get v1 quarter
else if ((valueParts[1].equals("Q1")) || (valueParts[1].equals("Q2")) || (valueParts[1].equals("Q3"))
|| (valueParts[1].equals("Q4"))) {
viHasQuarter = true;
viThisQuarter = valueParts[1];
}
// get v1 half
else if ((valueParts[1].equals("H1")) || (valueParts[1].equals("H2"))) {
viHasHalf = true;
viThisHalf = valueParts[1];
}
// get vi day
if ((valueParts.length > 2) && (valueParts[2].matches("\\d\\d"))) {
viHasDay = true;
viThisDay = Integer.parseInt(valueParts[2]);
}
}
}
// get the last tense (depending on the part of speech tags used in
// front or behind the expression)
String last_used_tense = ContextAnalyzer.getLastTense(t_i, jcas, language);
//////////////////////////
// DISAMBIGUATION PHASE //
//////////////////////////
////////////////////////////////////////////////////
// IF YEAR IS COMPLETELY UNSPECIFIED (UNDEF-year) //
////////////////////////////////////////////////////
String valueNew = ambigString;
if (ambigString.startsWith("UNDEF-year")) {
String newYearValue = dctYear + "";
// vi has month (ignore day)
if ((viHasMonth == true) && (viHasSeason == false)) {
// WITH DOCUMENT CREATION TIME
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// Tense is FUTURE
if ((last_used_tense.equals("FUTURE")) || (last_used_tense.equals("PRESENTFUTURE"))) {
// if dct-month is larger than vi-month, than add 1 to
// dct-year
if (dctMonth > viThisMonth) {
int intNewYear = dctYear + 1;
newYearValue = intNewYear + "";
}
}
// Tense is PAST
if ((last_used_tense.equals("PAST"))) {
// if dct-month is smaller than vi month, than substrate
// 1 from dct-year
if (dctMonth < viThisMonth) {
int intNewYear = dctYear - 1;
newYearValue = intNewYear + "";
}
}
}
// WITHOUT DOCUMENT CREATION TIME
else {
newYearValue = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
}
}
// vi has quaurter
if (viHasQuarter == true) {
// WITH DOCUMENT CREATION TIME
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// Tense is FUTURE
if ((last_used_tense.equals("FUTURE")) || (last_used_tense.equals("PRESENTFUTURE"))) {
if (Integer.parseInt(dctQuarter.substring(1)) < Integer.parseInt(viThisQuarter.substring(1))) {
int intNewYear = dctYear + 1;
newYearValue = intNewYear + "";
}
}
// Tense is PAST
if ((last_used_tense.equals("PAST"))) {
if (Integer.parseInt(dctQuarter.substring(1)) < Integer.parseInt(viThisQuarter.substring(1))) {
int intNewYear = dctYear - 1;
newYearValue = intNewYear + "";
}
}
// IF NO TENSE IS FOUND
if (last_used_tense.equals("")) {
if (documentTypeColloquial) {
// IN COLLOQUIAL: future temporal expressions
if (Integer.parseInt(dctQuarter.substring(1)) < Integer
.parseInt(viThisQuarter.substring(1))) {
int intNewYear = dctYear + 1;
newYearValue = intNewYear + "";
}
} else {
// IN NEWS: past temporal expressions
if (Integer.parseInt(dctQuarter.substring(1)) < Integer
.parseInt(viThisQuarter.substring(1))) {
int intNewYear = dctYear - 1;
newYearValue = intNewYear + "";
}
}
}
}
// WITHOUT DOCUMENT CREATION TIME
else {
newYearValue = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
}
}
// vi has half
if (viHasHalf == true) {
// WITH DOCUMENT CREATION TIME
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// Tense is FUTURE
if ((last_used_tense.equals("FUTURE")) || (last_used_tense.equals("PRESENTFUTURE"))) {
if (Integer.parseInt(dctHalf.substring(1)) < Integer.parseInt(viThisHalf.substring(1))) {
int intNewYear = dctYear + 1;
newYearValue = intNewYear + "";
}
}
// Tense is PAST
if ((last_used_tense.equals("PAST"))) {
if (Integer.parseInt(dctHalf.substring(1)) < Integer.parseInt(viThisHalf.substring(1))) {
int intNewYear = dctYear - 1;
newYearValue = intNewYear + "";
}
}
// IF NO TENSE IS FOUND
if (last_used_tense.equals("")) {
if (documentTypeColloquial) {
// IN COLLOQUIAL: future temporal expressions
if (Integer.parseInt(dctHalf.substring(1)) < Integer.parseInt(viThisHalf.substring(1))) {
int intNewYear = dctYear + 1;
newYearValue = intNewYear + "";
}
} else {
// IN NEWS: past temporal expressions
if (Integer.parseInt(dctHalf.substring(1)) < Integer.parseInt(viThisHalf.substring(1))) {
int intNewYear = dctYear - 1;
newYearValue = intNewYear + "";
}
}
}
}
// WITHOUT DOCUMENT CREATION TIME
else {
newYearValue = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
}
}
// vi has season
if ((viHasMonth == false) && (viHasDay == false) && (viHasSeason == true)) {
// TODO check tenses?
// WITH DOCUMENT CREATION TIME
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
newYearValue = dctYear + "";
}
// WITHOUT DOCUMENT CREATION TIME
else {
newYearValue = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
}
}
// vi has week
if (viHasWeek) {
// WITH DOCUMENT CREATION TIME
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
newYearValue = dctYear + "";
}
// WITHOUT DOCUMENT CREATION TIME
else {
newYearValue = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
}
}
// REPLACE THE UNDEF-YEAR WITH THE NEWLY CALCULATED YEAR AND ADD
// TIMEX TO INDEXES
if (newYearValue.equals("")) {
valueNew = ambigString.replaceFirst("UNDEF-year", "XXXX");
} else {
valueNew = ambigString.replaceFirst("UNDEF-year", newYearValue);
}
}
///////////////////////////////////////////////////
// just century is unspecified (UNDEF-century86) //
///////////////////////////////////////////////////
else if ((ambigString.startsWith("UNDEF-century"))) {
String newCenturyValue = dctCentury + "";
// NEWS and COLLOQUIAL DOCUMENTS
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& !ambigString.equals("UNDEF-century")) {
int viThisDecade = Integer.parseInt(ambigString.substring(13, 14));
Logger.printDetail("dctCentury" + dctCentury);
newCenturyValue = dctCentury + "";
Logger.printDetail("dctCentury" + dctCentury);
// Tense is FUTURE
if ((last_used_tense.equals("FUTURE")) || (last_used_tense.equals("PRESENTFUTURE"))) {
if (viThisDecade < dctDecade) {
newCenturyValue = dctCentury + 1 + "";
} else {
newCenturyValue = dctCentury + "";
}
}
// Tense is PAST
if ((last_used_tense.equals("PAST"))) {
if (dctDecade < viThisDecade) {
newCenturyValue = dctCentury - 1 + "";
} else {
newCenturyValue = dctCentury + "";
}
}
}
// NARRATIVE DOCUMENTS
else {
newCenturyValue = ContextAnalyzer.getLastMentionedX(linearDates, i, "century", language);
if (!(newCenturyValue.startsWith("BC"))) {
if ((newCenturyValue.matches("^\\d\\d.*"))
&& (Integer.parseInt(newCenturyValue.substring(0, 2)) < 10)) {
newCenturyValue = "00";
}
} else {
newCenturyValue = "00";
}
}
if (newCenturyValue.equals("")) {
if (!(documentTypeNarrative)) {
// always assume that sixties, twenties, and so on are 19XX
// if no century found (LREC change)
valueNew = ambigString.replaceFirst("UNDEF-century", "19");
}
// LREC change: assume in narrative-style documents that if no
// other century was mentioned before, 1st century
else {
valueNew = ambigString.replaceFirst("UNDEF-century", "00");
}
} else {
valueNew = ambigString.replaceFirst("UNDEF-century", newCenturyValue);
}
// always assume that sixties, twenties, and so on are 19XX -- if
// not narrative document (LREC change)
if ((valueNew.matches("\\d\\d\\d")) && (!(documentTypeNarrative))) {
valueNew = "19" + valueNew.substring(2);
}
}
////////////////////////////////////////////////////
// CHECK IMPLICIT EXPRESSIONS STARTING WITH UNDEF //
////////////////////////////////////////////////////
else if (ambigString.startsWith("UNDEF")) {
valueNew = ambigString;
if (ambigString.matches("^UNDEF-REFDATE$")) {
if (i > 0) {
Timex3 anyDate = linearDates.get(i - 1);
String lmDate = anyDate.getTimexValue();
valueNew = lmDate;
} else {
valueNew = "XXXX-XX-XX";
}
//////////////////
// TO CALCULATE //
//////////////////
// year to calculate
} else if (ambigString.matches("^UNDEF-(this|REFUNIT|REF)-(.*?)-(MINUS|PLUS)-([0-9]+).*")) {
for (MatchResult mr : Toolbox.findMatches(
Pattern.compile("^(UNDEF-(this|REFUNIT|REF)-(.*?)-(MINUS|PLUS)-([0-9]+)).*"), ambigString)) {
String checkUndef = mr.group(1);
String ltn = mr.group(2);
String unit = mr.group(3);
String op = mr.group(4);
String sDiff = mr.group(5);
int diff = 0;
try {
diff = Integer.parseInt(sDiff);
} catch (Exception e) {
Logger.printError(component, "Expression difficult to normalize: ");
Logger.printError(component, ambigString);
Logger.printError(component, sDiff + " probably too long for parsing as integer.");
Logger.printError(component, "set normalized value as PAST_REF / FUTURE_REF:");
if (op.equals("PLUS")) {
valueNew = "FUTURE_REF";
} else {
valueNew = "PAST_REF";
}
break;
}
// do the processing for SCIENTIFIC documents (TPZ
// identification could be improved)
if ((documentTypeScientific)) {
String opSymbol = "-";
if (op.equals("PLUS")) {
opSymbol = "+";
}
if (unit.equals("year")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "000" + diff;
} else if (diff < 100) {
diffString = "00" + diff;
} else if (diff < 1000) {
diffString = "0" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
} else if (unit.equals("month")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "0000-0" + diff;
} else {
diffString = "0000-" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
} else if (unit.equals("week")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "0000-W0" + diff;
} else {
diffString = "0000-W" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
} else if (unit.equals("day")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "0000-00-0" + diff;
} else {
diffString = "0000-00-" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
} else if (unit.equals("hour")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "0000-00-00T0" + diff;
} else {
diffString = "0000-00-00T" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
} else if (unit.equals("minute")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "0000-00-00T00:0" + diff;
} else {
diffString = "0000-00-00T00:" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
} else if (unit.equals("second")) {
String diffString = diff + "";
if (diff < 10) {
diffString = "0000-00-00T00:00:0" + diff;
} else {
diffString = "0000-00-00T00:00:" + diff;
}
valueNew = "TPZ" + opSymbol + diffString;
}
} else {
// check for REFUNIT (only allowed for "year")
if ((ltn.equals("REFUNIT")) && (unit.equals("year"))) {
String dateWithYear = ContextAnalyzer.getLastMentionedX(linearDates, i, "dateYear",
language);
String year = dateWithYear;
if (dateWithYear.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
if (dateWithYear.startsWith("BC")) {
year = dateWithYear.substring(0, 6);
} else {
year = dateWithYear.substring(0, 4);
}
if (op.equals("MINUS")) {
diff = diff * (-1);
}
String yearNew = DateCalculator.getXNextYear(dateWithYear, diff);
String rest = dateWithYear.substring(4);
valueNew = valueNew.replace(checkUndef, yearNew + rest);
}
}
// REF and this are handled here
if (unit.equals("century")) {
if ((documentTypeNews | documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
int century = dctCentury;
if (op.equals("MINUS")) {
century = dctCentury - diff;
} else if (op.equals("PLUS")) {
century = dctCentury + diff;
}
valueNew = valueNew.replace(checkUndef, century + "");
} else {
String lmCentury = ContextAnalyzer.getLastMentionedX(linearDates, i, "century",
language);
if (lmCentury.equals("")) {
valueNew = valueNew.replace(checkUndef, "XX");
} else {
if (op.equals("MINUS")) {
diff = (-1) * diff;
}
lmCentury = DateCalculator.getXNextCentury(lmCentury, diff);
valueNew = valueNew.replace(checkUndef, lmCentury);
}
}
} else if (unit.equals("decade")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
int dctDecadeLong = Integer.parseInt(dctCentury + "" + dctDecade);
int decade = dctDecadeLong;
if (op.equals("MINUS")) {
decade = dctDecadeLong - diff;
} else if (op.equals("PLUS")) {
decade = dctDecadeLong + diff;
}
valueNew = valueNew.replace(checkUndef, decade + "X");
} else {
String lmDecade = ContextAnalyzer.getLastMentionedX(linearDates, i, "decade", language);
if (lmDecade.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXX");
} else {
if (op.equals("MINUS")) {
diff = (-1) * diff;
}
lmDecade = DateCalculator.getXNextDecade(lmDecade, diff);
valueNew = valueNew.replace(checkUndef, lmDecade);
}
}
} else if (unit.equals("year")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
int intValue = dctYear;
if (op.equals("MINUS")) {
intValue = dctYear - diff;
} else if (op.equals("PLUS")) {
intValue = dctYear + diff;
}
valueNew = valueNew.replace(checkUndef, intValue + "");
} else {
String lmYear = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
if (lmYear.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
if (op.equals("MINUS")) {
diff = (-1) * diff;
}
lmYear = DateCalculator.getXNextYear(lmYear, diff);
valueNew = valueNew.replace(checkUndef, lmYear);
}
}
// TODO BC years
} else if (unit.equals("quarter")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
int intYear = dctYear;
int intQuarter = Integer.parseInt(dctQuarter.substring(1));
int diffQuarters = diff % 4;
diff = diff - diffQuarters;
int diffYears = diff / 4;
if (op.equals("MINUS")) {
diffQuarters = diffQuarters * (-1);
diffYears = diffYears * (-1);
}
intYear = intYear + diffYears;
intQuarter = intQuarter + diffQuarters;
valueNew = valueNew.replace(checkUndef, intYear + "-Q" + intQuarter);
} else {
String lmQuarter = ContextAnalyzer.getLastMentionedX(linearDates, i, "quarter",
language);
if (lmQuarter.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
int intYear = Integer.parseInt(lmQuarter.substring(0, 4));
int intQuarter = Integer.parseInt(lmQuarter.substring(6));
int diffQuarters = diff % 4;
diff = diff - diffQuarters;
int diffYears = diff / 4;
if (op.equals("MINUS")) {
diffQuarters = diffQuarters * (-1);
diffYears = diffYears * (-1);
}
intYear = intYear + diffYears;
intQuarter = intQuarter + diffQuarters;
valueNew = valueNew.replace(checkUndef, intYear + "-Q" + intQuarter);
}
}
} else if (unit.equals("month")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
if (op.equals("MINUS")) {
diff = diff * (-1);
}
valueNew = valueNew.replace(checkUndef, DateCalculator
.getXNextMonth(dctYear + "-" + norm.getFromNormNumber(dctMonth + ""), diff));
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month", language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
if (op.equals("MINUS")) {
diff = diff * (-1);
}
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextMonth(lmMonth, diff));
}
}
} else if (unit.equals("week")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
if (op.equals("MINUS")) {
diff = diff * (-1);
} else if (op.equals("PLUS")) {
// diff = diff * 7;
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextWeek(
dctYear + "-W" + norm.getFromNormNumber(dctWeek + ""), diff, language));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
if (op.equals("MINUS")) {
diff = diff * 7 * (-1);
} else if (op.equals("PLUS")) {
diff = diff * 7;
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, diff));
}
}
} else if (unit.equals("day")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)
&& (ltn.equals("this"))) {
if (op.equals("MINUS")) {
diff = diff * (-1);
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(
dctYear + "-" + norm.getFromNormNumber(dctMonth + "") + "-" + dctDay, diff));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
if (op.equals("MINUS")) {
diff = diff * (-1);
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, diff));
}
}
}
}
}
}
// century
else if (ambigString.startsWith("UNDEF-last-century")) {
String checkUndef = "UNDEF-last-century";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, norm.getFromNormNumber(dctCentury - 1 + ""));
} else {
String lmCentury = ContextAnalyzer.getLastMentionedX(linearDates, i, "century", language);
if (lmCentury.equals("")) {
valueNew = valueNew.replace(checkUndef, "XX");
} else {
lmCentury = DateCalculator.getXNextCentury(lmCentury, -1);
valueNew = valueNew.replace(checkUndef, lmCentury);
}
}
} else if (ambigString.startsWith("UNDEF-this-century")) {
String checkUndef = "UNDEF-this-century";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, norm.getFromNormNumber(dctCentury + ""));
} else {
String lmCentury = ContextAnalyzer.getLastMentionedX(linearDates, i, "century", language);
if (lmCentury.equals("")) {
valueNew = valueNew.replace(checkUndef, "XX");
} else {
valueNew = valueNew.replace(checkUndef, lmCentury);
}
}
} else if (ambigString.startsWith("UNDEF-next-century")) {
String checkUndef = "UNDEF-next-century";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, norm.getFromNormNumber(dctCentury + 1 + ""));
} else {
String lmCentury = ContextAnalyzer.getLastMentionedX(linearDates, i, "century", language);
if (lmCentury.equals("")) {
valueNew = valueNew.replace(checkUndef, "XX");
} else {
lmCentury = DateCalculator.getXNextCentury(lmCentury, +1);
valueNew = valueNew.replace(checkUndef, lmCentury);
}
}
}
// decade
else if (ambigString.startsWith("UNDEF-last-decade")) {
String checkUndef = "UNDEF-last-decade";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, (dctYear - 10 + "").substring(0, 3));
} else {
String lmDecade = ContextAnalyzer.getLastMentionedX(linearDates, i, "decade", language);
if (lmDecade.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
lmDecade = DateCalculator.getXNextDecade(lmDecade, -1);
valueNew = valueNew.replace(checkUndef, lmDecade);
}
}
} else if (ambigString.startsWith("UNDEF-this-decade")) {
String checkUndef = "UNDEF-this-decade";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, (dctYear + "").substring(0, 3));
} else {
String lmDecade = ContextAnalyzer.getLastMentionedX(linearDates, i, "decade", language);
if (lmDecade.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
valueNew = valueNew.replace(checkUndef, lmDecade);
}
}
} else if (ambigString.startsWith("UNDEF-next-decade")) {
String checkUndef = "UNDEF-next-decade";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, (dctYear + 10 + "").substring(0, 3));
} else {
String lmDecade = ContextAnalyzer.getLastMentionedX(linearDates, i, "decade", language);
if (lmDecade.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
lmDecade = DateCalculator.getXNextDecade(lmDecade, 1);
valueNew = valueNew.replace(checkUndef, lmDecade);
}
}
}
// year
else if (ambigString.startsWith("UNDEF-last-year")) {
String checkUndef = "UNDEF-last-year";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "");
} else {
String lmYear = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
if (lmYear.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
lmYear = DateCalculator.getXNextYear(lmYear, -1);
valueNew = valueNew.replace(checkUndef, lmYear);
}
}
if (valueNew.endsWith("-FY")) {
valueNew = "FY" + valueNew.substring(0, Math.min(valueNew.length(), 4));
}
} else if (ambigString.startsWith("UNDEF-this-year")) {
String checkUndef = "UNDEF-this-year";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + "");
} else {
String lmYear = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
if (lmYear.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
valueNew = valueNew.replace(checkUndef, lmYear);
}
}
if (valueNew.endsWith("-FY")) {
valueNew = "FY" + valueNew.substring(0, Math.min(valueNew.length(), 4));
}
} else if (ambigString.startsWith("UNDEF-next-year")) {
String checkUndef = "UNDEF-next-year";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "");
} else {
String lmYear = ContextAnalyzer.getLastMentionedX(linearDates, i, "year", language);
if (lmYear.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX");
} else {
lmYear = DateCalculator.getXNextYear(lmYear, 1);
valueNew = valueNew.replace(checkUndef, lmYear);
}
}
if (valueNew.endsWith("-FY")) {
valueNew = "FY" + valueNew.substring(0, Math.min(valueNew.length(), 4));
}
}
// month
else if (ambigString.startsWith("UNDEF-last-month")) {
String checkUndef = "UNDEF-last-month";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextMonth(dctYear + "-" + norm.getFromNormNumber(dctMonth + ""), -1));
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month", language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextMonth(lmMonth, -1));
}
}
} else if (ambigString.startsWith("UNDEF-this-month")) {
String checkUndef = "UNDEF-this-month";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + norm.getFromNormNumber(dctMonth + ""));
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month", language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
valueNew = valueNew.replace(checkUndef, lmMonth);
}
}
} else if (ambigString.startsWith("UNDEF-next-month")) {
String checkUndef = "UNDEF-next-month";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextMonth(dctYear + "-" + norm.getFromNormNumber(dctMonth + ""), 1));
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month", language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextMonth(lmMonth, 1));
}
}
}
// day
else if (ambigString.startsWith("UNDEF-last-day")) {
String checkUndef = "UNDEF-last-day";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, DateCalculator
.getXNextDay(dctYear + "-" + norm.getFromNormNumber(dctMonth + "") + "-" + dctDay, -1));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, -1));
}
}
} else if (ambigString.startsWith("UNDEF-this-day")) {
String checkUndef = "UNDEF-this-day";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + norm.getFromNormNumber(dctMonth + "") + "-"
+ norm.getFromNormNumber(dctDay + ""));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
valueNew = valueNew.replace(checkUndef, lmDay);
}
if (ambigString.equals("UNDEF-this-day")) {
valueNew = "PRESENT_REF";
}
}
} else if (ambigString.startsWith("UNDEF-next-day")) {
String checkUndef = "UNDEF-next-day";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, DateCalculator
.getXNextDay(dctYear + "-" + norm.getFromNormNumber(dctMonth + "") + "-" + dctDay, 1));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, 1));
}
}
}
// week
else if (ambigString.startsWith("UNDEF-last-week")) {
String checkUndef = "UNDEF-last-week";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, DateCalculator
.getXNextWeek(dctYear + "-W" + norm.getFromNormNumber(dctWeek + ""), -1, language));
} else {
String lmWeek = ContextAnalyzer.getLastMentionedX(linearDates, i, "week", language);
if (lmWeek.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-WXX");
} else {
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextWeek(lmWeek, -1, language));
}
}
} else if (ambigString.startsWith("UNDEF-this-week")) {
String checkUndef = "UNDEF-this-week";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + "-W" + norm.getFromNormNumber(dctWeek + ""));
} else {
String lmWeek = ContextAnalyzer.getLastMentionedX(linearDates, i, "week", language);
if (lmWeek.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-WXX");
} else {
valueNew = valueNew.replace(checkUndef, lmWeek);
}
}
} else if (ambigString.startsWith("UNDEF-next-week")) {
String checkUndef = "UNDEF-next-week";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, DateCalculator
.getXNextWeek(dctYear + "-W" + norm.getFromNormNumber(dctWeek + ""), 1, language));
} else {
String lmWeek = ContextAnalyzer.getLastMentionedX(linearDates, i, "week", language);
if (lmWeek.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-WXX");
} else {
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextWeek(lmWeek, 1, language));
}
}
}
// quarter
else if (ambigString.startsWith("UNDEF-last-quarter")) {
String checkUndef = "UNDEF-last-quarter";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
if (dctQuarter.equals("Q1")) {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-Q4");
} else {
int newQuarter = Integer.parseInt(dctQuarter.substring(1, 2)) - 1;
valueNew = valueNew.replace(checkUndef, dctYear + "-Q" + newQuarter);
}
} else {
String lmQuarter = ContextAnalyzer.getLastMentionedX(linearDates, i, "quarter", language);
if (lmQuarter.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-QX");
} else {
int lmQuarterOnly = Integer.parseInt(lmQuarter.substring(6, 7));
int lmYearOnly = Integer.parseInt(lmQuarter.substring(0, 4));
if (lmQuarterOnly == 1) {
valueNew = valueNew.replace(checkUndef, lmYearOnly - 1 + "-Q4");
} else {
int newQuarter = lmQuarterOnly - 1;
valueNew = valueNew.replace(checkUndef, lmYearOnly + "-Q" + newQuarter);
}
}
}
} else if (ambigString.startsWith("UNDEF-this-quarter")) {
String checkUndef = "UNDEF-this-quarter";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + dctQuarter);
} else {
String lmQuarter = ContextAnalyzer.getLastMentionedX(linearDates, i, "quarter", language);
if (lmQuarter.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-QX");
} else {
valueNew = valueNew.replace(checkUndef, lmQuarter);
}
}
} else if (ambigString.startsWith("UNDEF-next-quarter")) {
String checkUndef = "UNDEF-next-quarter";
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
if (dctQuarter.equals("Q4")) {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-Q1");
} else {
int newQuarter = Integer.parseInt(dctQuarter.substring(1, 2)) + 1;
valueNew = valueNew.replace(checkUndef, dctYear + "-Q" + newQuarter);
}
} else {
String lmQuarter = ContextAnalyzer.getLastMentionedX(linearDates, i, "quarter", language);
if (lmQuarter.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-QX");
} else {
int lmQuarterOnly = Integer.parseInt(lmQuarter.substring(6, 7));
int lmYearOnly = Integer.parseInt(lmQuarter.substring(0, 4));
if (lmQuarterOnly == 4) {
valueNew = valueNew.replace(checkUndef, lmYearOnly + 1 + "-Q1");
} else {
int newQuarter = lmQuarterOnly + 1;
valueNew = valueNew.replace(checkUndef, lmYearOnly + "-Q" + newQuarter);
}
}
}
}
// MONTH NAMES
else if (ambigString.matches(
"UNDEF-(last|this|next)-(january|february|march|april|may|june|july|august|september|october|november|december).*")) {
for (MatchResult mr : Toolbox.findMatches(Pattern.compile(
"(UNDEF-(last|this|next)-(january|february|march|april|may|june|july|august|september|october|november|december))(.*)"),
ambigString)) {
String rest = mr.group(4);
int day = 0;
for (MatchResult mr_rest : Toolbox.findMatches(Pattern.compile("-([0-9][0-9])"), rest)) {
day = Integer.parseInt(mr_rest.group(1));
}
String checkUndef = mr.group(1);
String ltn = mr.group(2);
String newMonth = norm.getFromNormMonthName((mr.group(3)));
int newMonthInt = Integer.parseInt(newMonth);
if (ltn.equals("last")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// check day if dct-month and newMonth are equal
if ((dctMonth == newMonthInt) && (!(day == 0))) {
if (dctDay > day) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newMonth);
}
} else if (dctMonth <= newMonthInt) {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newMonth);
}
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month-with-details",
language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
int lmMonthInt = Integer.parseInt(lmMonth.substring(5, 7));
//
int lmDayInt = 0;
if ((lmMonth.length() > 9)
&& (lmMonth.subSequence(8, 10).toString().matches("\\d\\d"))) {
lmDayInt = Integer.parseInt(lmMonth.subSequence(8, 10) + "");
}
if ((lmMonthInt == newMonthInt) && (!(lmDayInt == 0)) && (!(day == 0))) {
if (lmDayInt > day) {
valueNew = valueNew.replace(checkUndef,
lmMonth.substring(0, 4) + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmMonth.substring(0, 4)) - 1 + "-" + newMonth);
}
}
if (lmMonthInt <= newMonthInt) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmMonth.substring(0, 4)) - 1 + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef, lmMonth.substring(0, 4) + "-" + newMonth);
}
}
}
} else if (ltn.equals("this")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newMonth);
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month-with-details",
language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
valueNew = valueNew.replace(checkUndef, lmMonth.substring(0, 4) + "-" + newMonth);
}
}
} else if (ltn.equals("next")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// check day if dct-month and newMonth are equal
if ((dctMonth == newMonthInt) && (!(day == 0))) {
if (dctDay < day) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-" + newMonth);
}
} else if (dctMonth >= newMonthInt) {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newMonth);
}
} else {
String lmMonth = ContextAnalyzer.getLastMentionedX(linearDates, i, "month-with-details",
language);
if (lmMonth.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
int lmMonthInt = Integer.parseInt(lmMonth.substring(5, 7));
if (lmMonthInt >= newMonthInt) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmMonth.substring(0, 4)) + 1 + "-" + newMonth);
} else {
valueNew = valueNew.replace(checkUndef, lmMonth.substring(0, 4) + "-" + newMonth);
}
}
}
}
}
}
// SEASONS NAMES
else if (ambigString.matches("^UNDEF-(last|this|next)-(SP|SU|FA|WI).*")) {
for (MatchResult mr : Toolbox.findMatches(Pattern.compile("(UNDEF-(last|this|next)-(SP|SU|FA|WI)).*"),
ambigString)) {
String checkUndef = mr.group(1);
String ltn = mr.group(2);
String newSeason = mr.group(3);
if (ltn.equals("last")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
if (dctSeason.equals("SP")) {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newSeason);
} else if (dctSeason.equals("SU")) {
if (newSeason.equals("SP")) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newSeason);
}
} else if (dctSeason.equals("FA")) {
if ((newSeason.equals("SP")) || (newSeason.equals("SU"))) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newSeason);
}
} else if (dctSeason.equals("WI")) {
if (newSeason.equals("WI")) {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newSeason);
} else {
if (dctMonth < 12) {
valueNew = valueNew.replace(checkUndef, dctYear - 1 + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
}
}
}
} else { // NARRATVIE DOCUMENT
String lmSeason = ContextAnalyzer.getLastMentionedX(linearDates, i, "season", language);
if (lmSeason.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
if (lmSeason.substring(5, 7).equals("SP")) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) - 1 + "-" + newSeason);
} else if (lmSeason.substring(5, 7).equals("SU")) {
if (lmSeason.substring(5, 7).equals("SP")) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) - 1 + "-" + newSeason);
}
} else if (lmSeason.substring(5, 7).equals("FA")) {
if ((newSeason.equals("SP")) || (newSeason.equals("SU"))) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) - 1 + "-" + newSeason);
}
} else if (lmSeason.substring(5, 7).equals("WI")) {
if (newSeason.equals("WI")) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) - 1 + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + "-" + newSeason);
}
}
}
}
} else if (ltn.equals("this")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// TODO include tense of sentence?
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
} else {
// TODO include tense of sentence?
String lmSeason = ContextAnalyzer.getLastMentionedX(linearDates, i, "season", language);
if (lmSeason.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
valueNew = valueNew.replace(checkUndef, lmSeason.substring(0, 4) + "-" + newSeason);
}
}
} else if (ltn.equals("next")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
if (dctSeason.equals("SP")) {
if (newSeason.equals("SP")) {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
}
} else if (dctSeason.equals("SU")) {
if ((newSeason.equals("SP")) || (newSeason.equals("SU"))) {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
}
} else if (dctSeason.equals("FA")) {
if (newSeason.equals("WI")) {
valueNew = valueNew.replace(checkUndef, dctYear + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-" + newSeason);
}
} else if (dctSeason.equals("WI")) {
valueNew = valueNew.replace(checkUndef, dctYear + 1 + "-" + newSeason);
}
} else { // NARRATIVE DOCUMENT
String lmSeason = ContextAnalyzer.getLastMentionedX(linearDates, i, "season", language);
if (lmSeason.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX");
} else {
if (lmSeason.substring(5, 7).equals("SP")) {
if (newSeason.equals("SP")) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + 1 + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + "-" + newSeason);
}
} else if (lmSeason.substring(5, 7).equals("SU")) {
if ((newSeason.equals("SP")) || (newSeason.equals("SU"))) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + 1 + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + "-" + newSeason);
}
} else if (lmSeason.substring(5, 7).equals("FA")) {
if (newSeason.equals("WI")) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + "-" + newSeason);
} else {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + 1 + "-" + newSeason);
}
} else if (lmSeason.substring(5, 7).equals("WI")) {
valueNew = valueNew.replace(checkUndef,
Integer.parseInt(lmSeason.substring(0, 4)) + 1 + "-" + newSeason);
}
}
}
}
}
}
// WEEKDAY NAMES
// TODO the calculation is strange, but works
// TODO tense should be included?!
else if (ambigString.matches(
"^UNDEF-(last|this|next|day)-(monday|tuesday|wednesday|thursday|friday|saturday|sunday).*")) {
for (MatchResult mr : Toolbox.findMatches(Pattern.compile(
"(UNDEF-(last|this|next|day)-(monday|tuesday|wednesday|thursday|friday|saturday|sunday)).*"),
ambigString)) {
String checkUndef = mr.group(1);
String ltnd = mr.group(2);
String newWeekday = mr.group(3);
int newWeekdayInt = Integer.parseInt(norm.getFromNormDayInWeek(newWeekday));
if (ltnd.equals("last")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
int diff = (-1) * (dctWeekday - newWeekdayInt);
if (diff >= 0) {
diff = diff - 7;
}
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextDay(dctYear + "-" + dctMonth + "-" + dctDay, diff));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
int lmWeekdayInt = DateCalculator.getWeekdayOfDate(lmDay);
int diff = (-1) * (lmWeekdayInt - newWeekdayInt);
if (diff >= 0) {
diff = diff - 7;
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, diff));
}
}
} else if (ltnd.equals("this")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// TODO tense should be included?!
int diff = (-1) * (dctWeekday - newWeekdayInt);
if (diff >= 0) {
diff = diff - 7;
}
if (diff == -7) {
diff = 0;
}
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextDay(dctYear + "-" + dctMonth + "-" + dctDay, diff));
} else {
// TODO tense should be included?!
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
int lmWeekdayInt = DateCalculator.getWeekdayOfDate(lmDay);
int diff = (-1) * (lmWeekdayInt - newWeekdayInt);
if (diff >= 0) {
diff = diff - 7;
}
if (diff == -7) {
diff = 0;
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, diff));
}
}
} else if (ltnd.equals("next")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
int diff = newWeekdayInt - dctWeekday;
if (diff <= 0) {
diff = diff + 7;
}
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextDay(dctYear + "-" + dctMonth + "-" + dctDay, diff));
} else {
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
int lmWeekdayInt = DateCalculator.getWeekdayOfDate(lmDay);
int diff = newWeekdayInt - lmWeekdayInt;
if (diff <= 0) {
diff = diff + 7;
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, diff));
}
}
} else if (ltnd.equals("day")) {
if ((documentTypeNews || documentTypeColloquial || documentTypeScientific) && (dctAvailable)) {
// TODO tense should be included?!
int diff = (-1) * (dctWeekday - newWeekdayInt);
if (diff >= 0) {
diff = diff - 7;
}
if (diff == -7) {
diff = 0;
}
// Tense is FUTURE
if ((last_used_tense.equals("FUTURE")) && diff != 0) {
diff = diff + 7;
}
// Tense is PAST
if ((last_used_tense.equals("PAST"))) {
}
valueNew = valueNew.replace(checkUndef,
DateCalculator.getXNextDay(dctYear + "-" + dctMonth + "-" + dctDay, diff));
} else {
// TODO tense should be included?!
String lmDay = ContextAnalyzer.getLastMentionedX(linearDates, i, "day", language);
if (lmDay.equals("")) {
valueNew = valueNew.replace(checkUndef, "XXXX-XX-XX");
} else {
int lmWeekdayInt = DateCalculator.getWeekdayOfDate(lmDay);
int diff = (-1) * (lmWeekdayInt - newWeekdayInt);
if (diff >= 0) {
diff = diff - 7;
}
if (diff == -7) {
diff = 0;
}
valueNew = valueNew.replace(checkUndef, DateCalculator.getXNextDay(lmDay, diff));
}
}
}
}
} else {
Logger.printDetail(component,
"ATTENTION: UNDEF value for: " + valueNew + " is not handled in disambiguation phase!");
}
}
return valueNew;
}
/**
* Under-specified values are disambiguated here. Only Timexes of types "date"
* and "time" can be under-specified.
*
* @param jcas
*/
public void specifyAmbiguousValues(JCas jcas) {
// build up a list with all found TIMEX expressions
List<Timex3> linearDates = new ArrayList<Timex3>();
FSIterator iterTimex = jcas.getAnnotationIndex(Timex3.type).iterator();
// Create List of all Timexes of types "date" and "time"
while (iterTimex.hasNext()) {
Timex3 timex = (Timex3) iterTimex.next();
if (timex.getTimexType().equals("DATE") || timex.getTimexType().equals("TIME")) {
linearDates.add(timex);
}
if (timex.getTimexType().equals("DURATION") && !timex.getEmptyValue().equals("")) {
linearDates.add(timex);
}
}
//////////////////////////////////////////////
// go through list of Date and Time timexes //
//////////////////////////////////////////////
for (int i = 0; i < linearDates.size(); i++) {
Timex3 t_i = (Timex3) linearDates.get(i);
String value_i = t_i.getTimexValue();
String valueNew = value_i;
// handle the value attribute only if we have a TIME or DATE
if (t_i.getTimexType().equals("TIME") || t_i.getTimexType().equals("DATE"))
valueNew = specifyAmbiguousValuesString(value_i, t_i, i, linearDates, jcas);
// handle the emptyValue attribute for any type
if (t_i.getEmptyValue() != null && t_i.getEmptyValue().length() > 0) {
String emptyValueNew = specifyAmbiguousValuesString(t_i.getEmptyValue(), t_i, i, linearDates, jcas);
t_i.setEmptyValue(emptyValueNew);
}
t_i.removeFromIndexes();
Logger.printDetail(t_i.getTimexId() + " DISAMBIGUATION PHASE: foundBy:" + t_i.getFoundByRule() + " text:"
+ t_i.getCoveredText() + " value:" + t_i.getTimexValue() + " NEW value:" + valueNew);
t_i.setTimexValue(valueNew);
t_i.addToIndexes();
linearDates.set(i, t_i);
}
}
/**
* @param jcas
*/
private void deleteOverlappedTimexesPreprocessing(JCas jcas) {
FSIterator timexIter1 = jcas.getAnnotationIndex(Timex3.type).iterator();
HashSet<Timex3> hsTimexesToRemove = new HashSet<Timex3>();
while (timexIter1.hasNext()) {
Timex3 t1 = (Timex3) timexIter1.next();
FSIterator timexIter2 = jcas.getAnnotationIndex(Timex3.type).iterator();
while (timexIter2.hasNext()) {
Timex3 t2 = (Timex3) timexIter2.next();
if (((t1.getBegin() >= t2.getBegin()) && (t1.getEnd() < t2.getEnd())) || // t1
// starts
// inside
// or
// with
// t2
// and
// ends
// before
// t2
// ->
// remove
// t1
((t1.getBegin() > t2.getBegin()) && (t1.getEnd() <= t2.getEnd()))) { // t1
// starts
// inside
// t2
// and
// ends
// with
// or
// before
// t2
// ->
// remove
// t1
hsTimexesToRemove.add(t1);
} else if (((t2.getBegin() >= t1.getBegin()) && (t2.getEnd() < t1.getEnd())) || // t2
// starts
// inside
// or
// with
// t1
// and
// ends
// before
// t1
// ->
// remove
// t2
((t2.getBegin() > t1.getBegin()) && (t2.getEnd() <= t1.getEnd()))) { // t2
// starts
// inside
// t1
// and
// ends
// with
// or
// before
// t1
// ->
// remove
// t2
hsTimexesToRemove.add(t2);
}
// identical length
if (!t1.equals(t2) && (t1.getBegin() == t2.getBegin()) && (t1.getEnd() == t2.getEnd())) {
if ((t1.getTimexValue().startsWith("UNDEF")) && (!(t2.getTimexValue().startsWith("UNDEF")))) {
hsTimexesToRemove.add(t1);
} else if ((!(t1.getTimexValue().startsWith("UNDEF")))
&& (t2.getTimexValue().startsWith("UNDEF"))) {
hsTimexesToRemove.add(t2);
}
// t1 is explicit, but t2 is not
else if ((t1.getFoundByRule().endsWith("explicit"))
&& (!(t2.getFoundByRule().endsWith("explicit")))) {
hsTimexesToRemove.add(t2);
}
// remove timexes that are identical, but one has an
// emptyvalue
else if (t2.getEmptyValue().equals("") && !t1.getEmptyValue().equals("")) {
hsTimexesToRemove.add(t2);
}
// REMOVE REAL DUPLICATES (the one with the lower timexID)
else if ((Integer.parseInt(t1.getTimexId().substring(1)) < Integer
.parseInt(t2.getTimexId().substring(1)))) {
hsTimexesToRemove.add(t1);
}
}
}
}
// remove, finally
for (Timex3 t : hsTimexesToRemove) {
Logger.printDetail("REMOVE DUPLICATE: " + t.getCoveredText() + "(id:" + t.getTimexId() + " value:"
+ t.getTimexValue() + " found by:" + t.getFoundByRule() + ")");
t.removeFromIndexes();
timex_counter--;
}
}
private void deleteOverlappedTimexesPostprocessing(JCas jcas) {
FSIterator timexIter = jcas.getAnnotationIndex(Timex3.type).iterator();
FSIterator innerTimexIter = timexIter.copy();
HashSet<ArrayList<Timex3>> effectivelyToInspect = new HashSet<ArrayList<Timex3>>();
ArrayList<Timex3> allTimexesToInspect = new ArrayList<Timex3>();
while (timexIter.hasNext()) {
Timex3 myTimex = (Timex3) timexIter.next();
ArrayList<Timex3> timexSet = new ArrayList<Timex3>();
if (!(myTimex.getTimexType().equals("TEMPONYM"))) {
timexSet.add(myTimex);
}
// compare this timex to all other timexes and mark those that have
// an overlap
while (innerTimexIter.hasNext()) {
Timex3 myInnerTimex = (Timex3) innerTimexIter.next();
if (!(myTimex.getTimexType().equals("TEMPONYM"))) {
if ((myTimex.getBegin() <= myInnerTimex.getBegin() && myTimex.getEnd() > myInnerTimex.getBegin()) || // timex1
// starts,
// timex2
// is
// partial
// overlap
(myInnerTimex.getBegin() <= myTimex.getBegin()
&& myInnerTimex.getEnd() > myTimex.getBegin())
|| // same as above, but in reverse
(myInnerTimex.getBegin() <= myTimex.getBegin() && myTimex.getEnd() <= myInnerTimex.getEnd())
|| // timex 1 is contained within or identical to
// timex2
(myTimex.getBegin() <= myInnerTimex.getBegin()
&& myInnerTimex.getEnd() <= myTimex.getEnd())) { // same
// as
// above,
// but
// in
// reverse
timexSet.add(myInnerTimex); // increase the set
allTimexesToInspect.add(myTimex); // note that these
// timexes are being
// looked at
allTimexesToInspect.add(myInnerTimex);
}
}
}
// if overlaps with myTimex were detected, memorize them
if (timexSet.size() > 1)
effectivelyToInspect.add(timexSet);
// reset the inner iterator
innerTimexIter.moveToFirst();
}
/*
* prune those sets of overlapping timexes that are subsets of others (i.e.
* leave only the largest union of overlapping timexes)
*/
HashSet<ArrayList<Timex3>> newEffectivelyToInspect = new HashSet<ArrayList<Timex3>>();
for (Timex3 t : allTimexesToInspect) {
ArrayList<Timex3> setToKeep = new ArrayList<Timex3>();
// determine the largest set that contains this timex
for (ArrayList<Timex3> tSet : effectivelyToInspect) {
if (tSet.contains(t) && tSet.size() > setToKeep.size())
setToKeep = tSet;
}
newEffectivelyToInspect.add(setToKeep);
}
// overwrite previous list of sets
effectivelyToInspect = newEffectivelyToInspect;
// iterate over the selected sets and merge information, remove old
// timexes
for (ArrayList<Timex3> tSet : effectivelyToInspect) {
Timex3 newTimex = new Timex3(jcas);
// if a timex has the timex value REMOVE, remove it from
// consideration
@SuppressWarnings("unchecked")
ArrayList<Timex3> newTSet = (ArrayList<Timex3>) tSet.clone();
for (Timex3 t : tSet) {
if (t.getTimexValue().equals("REMOVE")) { // remove timexes with
// value "REMOVE"
newTSet.remove(t);
}
}
tSet = newTSet;
// iteration is done if all the timexes have been removed, i.e. the
// set is empty
if (tSet.size() == 0)
continue;
/*
* check - whether all timexes of this set have the same timex type attribute, -
* which one in the set has the longest value attribute string length, - what
* the combined extents are
*/
Boolean allSameTypes = true;
String timexType = null;
Timex3 longestTimex = null;
Integer combinedBegin = Integer.MAX_VALUE, combinedEnd = Integer.MIN_VALUE;
ArrayList<Integer> tokenIds = new ArrayList<Integer>();
for (Timex3 t : tSet) {
// check whether the types are identical and either all DATE or
// TIME
if (timexType == null) {
timexType = t.getTimexType();
} else {
if (allSameTypes && !timexType.equals(t.getTimexType())
|| !(timexType.equals("DATE") || timexType.equals("TIME"))) {
allSameTypes = false;
}
}
Logger.printDetail("Are these overlapping timexes of same type? => " + allSameTypes);
// check timex value attribute string length
if (longestTimex == null) {
longestTimex = t;
} else if (allSameTypes && t.getFoundByRule().indexOf("-BCADhint") != -1) {
longestTimex = t;
} else if (allSameTypes && t.getFoundByRule().indexOf("relative") == -1
&& longestTimex.getFoundByRule().indexOf("relative") != -1) {
longestTimex = t;
} else if (longestTimex.getTimexValue().length() == t.getTimexValue().length()) {
if (t.getBegin() < longestTimex.getBegin())
longestTimex = t;
} else if (longestTimex.getTimexValue().length() < t.getTimexValue().length()) {
longestTimex = t;
}
Logger.printDetail("Selected " + longestTimex.getTimexId() + ": " + longestTimex.getCoveredText() + "["
+ longestTimex.getTimexValue() + "] as the longest-valued timex.");
// check combined beginning/end
if (combinedBegin > t.getBegin())
combinedBegin = t.getBegin();
if (combinedEnd < t.getEnd())
combinedEnd = t.getEnd();
Logger.printDetail("Selected combined constraints: " + combinedBegin + ":" + combinedEnd);
// disassemble and remember the token ids
String[] tokenizedTokenIds = t.getAllTokIds().split("<-->");
for (Integer i = 1; i < tokenizedTokenIds.length; i++) {
if (!tokenIds.contains(Integer.parseInt(tokenizedTokenIds[i]))) {
tokenIds.add(Integer.parseInt(tokenizedTokenIds[i]));
}
}
}
/*
* types are equal => merge constraints, use the longer, "more granular" value.
* if types are not equal, just take the longest value.
*/
Collections.sort(tokenIds);
newTimex = longestTimex;
if (allSameTypes) {
newTimex.setBegin(combinedBegin);
newTimex.setEnd(combinedEnd);
if (tokenIds.size() > 0)
newTimex.setFirstTokId(tokenIds.get(0));
String tokenIdText = "BEGIN";
for (Integer tokenId : tokenIds) {
tokenIdText += "<-->" + tokenId;
}
newTimex.setAllTokIds(tokenIdText);
}
// remove old overlaps.
for (Timex3 t : tSet) {
t.removeFromIndexes();
}
// add the single constructed/chosen timex to the indexes.
newTimex.addToIndexes();
}
}
/**
* Identify the part of speech (POS) of a MarchResult.
*
* @param tokBegin
* @param tokEnd
* @param s
* @param jcas
* @return
*/
public String getPosFromMatchResult(int tokBegin, int tokEnd, Sentence s, JCas jcas) {
// get all tokens in sentence
HashMap<Integer, Token> hmTokens = new HashMap<Integer, Token>();
FSIterator iterTok = jcas.getAnnotationIndex(Token.type).subiterator(s);
while (iterTok.hasNext()) {
Token token = (Token) iterTok.next();
hmTokens.put(token.getBegin(), token);
}
// get correct token
String pos = "";
if (hmTokens.containsKey(tokBegin)) {
Token tokenToCheck = hmTokens.get(tokBegin);
pos = tokenToCheck.getPos() == null ? "" : tokenToCheck.getPos();
}
return pos;
}
/**
* Apply the extraction rules, normalization rules
*
* @param timexType
* @param hmPattern
* @param hmOffset
* @param hmNormalization
* @param s
* @param jcas
*/
public void findTimexes(String timexType, HashMap<Pattern, String> hmPattern, HashMap<String, String> hmOffset,
HashMap<String, String> hmNormalization, Sentence s, JCas jcas) {
RuleManager rm = RuleManager.getInstance(language, find_temponyms);
HashMap<String, String> hmDatePosConstraint = rm.getHmDatePosConstraint();
HashMap<String, String> hmDurationPosConstraint = rm.getHmDurationPosConstraint();
HashMap<String, String> hmTimePosConstraint = rm.getHmTimePosConstraint();
HashMap<String, String> hmSetPosConstraint = rm.getHmSetPosConstraint();
HashMap<String, String> hmTemponymPosConstraint = rm.getHmTemponymPosConstraint();
// get fast check patterns first
HashMap<String, Pattern> hmDateFastCheck = rm.getHmDateFastCheck();
HashMap<String, Pattern> hmDurationFastCheck = rm.getHmDurationFastCheck();
HashMap<String, Pattern> hmTimeFastCheck = rm.getHmTimeFastCheck();
HashMap<String, Pattern> hmSetFastCheck = rm.getHmSetFastCheck();
HashMap<String, Pattern> hmTemponymFastCheck = rm.getHmTemponymFastCheck();
Pattern f = null;
Boolean fastCheckOK = true;
// Iterator over the rules by sorted by the name of the rules
// this is important since later, the timexId will be used to
// decide which of two expressions shall be removed if both
// have the same offset
for (Iterator<Pattern> i = Toolbox.sortByValue(hmPattern).iterator(); i.hasNext();) {
Pattern p = (Pattern) i.next();
// validate fast check fist, if no fast match, everything else is
// not required anymore
if (timexType.equals("DATE")) {
f = hmDateFastCheck.get(hmPattern.get(p));
} else if (timexType.equals("Time")) {
f = hmTimeFastCheck.get(hmPattern.get(p));
} else if (timexType.equals("DURATION")) {
f = hmDurationFastCheck.get(hmPattern.get(p));
} else if (timexType.equals("SET")) {
f = hmSetFastCheck.get(hmPattern.get(p));
} else if (timexType.equals("TEMPONYM")) {
f = hmTemponymFastCheck.get(hmPattern.get(p));
}
if (!(f == null)) {
fastCheckOK = false;
if (f.matcher(s.getCoveredText()).find()) {
fastCheckOK = true;
}
}
if (fastCheckOK) {
for (MatchResult r : Toolbox.findMatches(p, s.getCoveredText())) {
boolean infrontBehindOK = checkTokenBoundaries(r, s, jcas) // improved
// token
// boundary
// checking
&& checkInfrontBehind(r, s);
// CHECK POS CONSTRAINTS
boolean posConstraintOK = true;
if (timexType.equals("DATE")) {
if (hmDatePosConstraint.containsKey(hmPattern.get(p))) {
posConstraintOK = checkPosConstraint(s, hmDatePosConstraint.get(hmPattern.get(p)), r, jcas);
}
} else if (timexType.equals("DURATION")) {
if (hmDurationPosConstraint.containsKey(hmPattern.get(p))) {
posConstraintOK = checkPosConstraint(s, hmDurationPosConstraint.get(hmPattern.get(p)), r,
jcas);
}
} else if (timexType.equals("TIME")) {
if (hmTimePosConstraint.containsKey(hmPattern.get(p))) {
posConstraintOK = checkPosConstraint(s, hmTimePosConstraint.get(hmPattern.get(p)), r, jcas);
}
} else if (timexType.equals("SET")) {
if (hmSetPosConstraint.containsKey(hmPattern.get(p))) {
posConstraintOK = checkPosConstraint(s, hmSetPosConstraint.get(hmPattern.get(p)), r, jcas);
}
} else if (timexType.equals("TEMPONYM")) {
if (hmTemponymPosConstraint.containsKey(hmPattern.get(p))) {
posConstraintOK = checkPosConstraint(s, hmSetPosConstraint.get(hmPattern.get(p)), r, jcas);
}
}
if ((infrontBehindOK == true) && (posConstraintOK == true)) {
// Offset of timex expression (in the checked sentence)
int timexStart = r.start();
int timexEnd = r.end();
// Normalization from Files:
// Any offset parameter?
if (hmOffset.containsKey(hmPattern.get(p))) {
String offset = hmOffset.get(hmPattern.get(p));
// pattern for offset information
Pattern paOffset = Pattern.compile("group\\(([0-9]+)\\)-group\\(([0-9]+)\\)");
for (MatchResult mr : Toolbox.findMatches(paOffset, offset)) {
int startOffset = Integer.parseInt(mr.group(1));
int endOffset = Integer.parseInt(mr.group(2));
timexStart = r.start(startOffset);
timexEnd = r.end(endOffset);
}
}
// Normalization Parameter
if (hmNormalization.containsKey(hmPattern.get(p))) {
String[] attributes = new String[5];
if (timexType.equals("DATE")) {
attributes = getAttributesForTimexFromFile(hmPattern.get(p),
rm.getHmDateNormalization(), rm.getHmDateQuant(), rm.getHmDateFreq(),
rm.getHmDateMod(), rm.getHmDateEmptyValue(), r, jcas);
} else if (timexType.equals("DURATION")) {
attributes = getAttributesForTimexFromFile(hmPattern.get(p),
rm.getHmDurationNormalization(), rm.getHmDurationQuant(),
rm.getHmDurationFreq(), rm.getHmDurationMod(), rm.getHmDurationEmptyValue(), r,
jcas);
} else if (timexType.equals("TIME")) {
attributes = getAttributesForTimexFromFile(hmPattern.get(p),
rm.getHmTimeNormalization(), rm.getHmTimeQuant(), rm.getHmTimeFreq(),
rm.getHmTimeMod(), rm.getHmTimeEmptyValue(), r, jcas);
} else if (timexType.equals("SET")) {
attributes = getAttributesForTimexFromFile(hmPattern.get(p), rm.getHmSetNormalization(),
rm.getHmSetQuant(), rm.getHmSetFreq(), rm.getHmSetMod(),
rm.getHmSetEmptyValue(), r, jcas);
} else if (timexType.equals("TEMPONYM")) {
attributes = getAttributesForTimexFromFile(hmPattern.get(p),
rm.getHmTemponymNormalization(), rm.getHmTemponymQuant(),
rm.getHmTemponymFreq(), rm.getHmTemponymMod(), rm.getHmTemponymEmptyValue(), r,
jcas);
}
if (!(attributes == null)) {
addTimexAnnotation(timexType, timexStart + s.getBegin(), timexEnd + s.getBegin(), s,
attributes[0], attributes[1], attributes[2], attributes[3], attributes[4],
"t" + timexID++, hmPattern.get(p), jcas);
}
} else {
Logger.printError("SOMETHING REALLY WRONG HERE: " + hmPattern.get(p));
}
}
}
}
fastCheckOK = true;
}
}
/**
* Check whether the part of speech constraint defined in a rule is satisfied.
*
* @param s
* @param posConstraint
* @param m
* @param jcas
* @return
*/
public boolean checkPosConstraint(Sentence s, String posConstraint, MatchResult m, JCas jcas) {
Pattern paConstraint = Pattern.compile("group\\(([0-9]+)\\):(.*?):");
for (MatchResult mr : Toolbox.findMatches(paConstraint, posConstraint)) {
int groupNumber = Integer.parseInt(mr.group(1));
int tokenBegin = s.getBegin() + m.start(groupNumber);
int tokenEnd = s.getBegin() + m.end(groupNumber);
String pos = mr.group(2);
String pos_as_is = getPosFromMatchResult(tokenBegin, tokenEnd, s, jcas);
if (pos_as_is.matches(pos)) {
Logger.printDetail("POS CONSTRAINT IS VALID: pos should be " + pos + " and is " + pos_as_is);
} else {
return false;
}
}
return true;
}
public String applyRuleFunctions(String tonormalize, MatchResult m) {
NormalizationManager norm = NormalizationManager.getInstance(language, find_temponyms);
String normalized = "";
// pattern for normalization functions + group information
// pattern for group information
Pattern paNorm = Pattern.compile("%([A-Za-z0-9]+?)\\(group\\(([0-9]+)\\)\\)");
Pattern paGroup = Pattern.compile("group\\(([0-9]+)\\)");
while ((tonormalize.contains("%")) || (tonormalize.contains("group"))) {
// replace normalization functions
for (MatchResult mr : Toolbox.findMatches(paNorm, tonormalize)) {
Logger.printDetail("-----------------------------------");
Logger.printDetail("DEBUGGING: tonormalize:" + tonormalize);
Logger.printDetail("DEBUGGING: mr.group():" + mr.group());
Logger.printDetail("DEBUGGING: mr.group(1):" + mr.group(1));
Logger.printDetail("DEBUGGING: mr.group(2):" + mr.group(2));
Logger.printDetail("DEBUGGING: m.group():" + m.group());
Logger.printDetail("DEBUGGING: m.group(" + Integer.parseInt(mr.group(2)) + "):"
+ m.group(Integer.parseInt(mr.group(2))));
Logger.printDetail("DEBUGGING: hmR...:"
+ norm.getFromHmAllNormalization(mr.group(1)).get(m.group(Integer.parseInt(mr.group(2)))));
Logger.printDetail("-----------------------------------");
if (!(m.group(Integer.parseInt(mr.group(2))) == null)) {
String partToReplace = m.group(Integer.parseInt(mr.group(2))).replaceAll("[\n\\s]+", " ");
if (!(norm.getFromHmAllNormalization(mr.group(1)).containsKey(partToReplace))) {
Logger.printDetail("Maybe problem with normalization of the resource: " + mr.group(1));
Logger.printDetail("Maybe problem with part to replace? " + partToReplace);
if (mr.group(1).contains("Temponym")) {
Logger.printDetail("Should be ok, as it's a temponym.");
return null;
}
} else {
tonormalize = tonormalize.replace(mr.group(),
norm.getFromHmAllNormalization(mr.group(1)).get(partToReplace));
}
} else {
Logger.printDetail("Empty part to normalize in " + mr.group(1));
tonormalize = tonormalize.replace(mr.group(), "");
}
}
// replace other groups
for (MatchResult mr : Toolbox.findMatches(paGroup, tonormalize)) {
Logger.printDetail("-----------------------------------");
Logger.printDetail("DEBUGGING: tonormalize:" + tonormalize);
Logger.printDetail("DEBUGGING: mr.group():" + mr.group());
Logger.printDetail("DEBUGGING: mr.group(1):" + mr.group(1));
Logger.printDetail("DEBUGGING: m.group():" + m.group());
Logger.printDetail("DEBUGGING: m.group(" + Integer.parseInt(mr.group(1)) + "):"
+ m.group(Integer.parseInt(mr.group(1))));
Logger.printDetail("-----------------------------------");
tonormalize = tonormalize.replace(mr.group(), m.group(Integer.parseInt(mr.group(1))));
}
// replace substrings
Pattern paSubstring = Pattern.compile("%SUBSTRING%\\((.*?),([0-9]+),([0-9]+)\\)");
for (MatchResult mr : Toolbox.findMatches(paSubstring, tonormalize)) {
String substring = mr.group(1).substring(Integer.parseInt(mr.group(2)), Integer.parseInt(mr.group(3)));
tonormalize = tonormalize.replace(mr.group(), substring);
}
if (language.getName().compareTo("arabic") != 0) {
// replace lowercase
Pattern paLowercase = Pattern.compile("%LOWERCASE%\\((.*?)\\)");
for (MatchResult mr : Toolbox.findMatches(paLowercase, tonormalize)) {
String substring = mr.group(1).toLowerCase();
tonormalize = tonormalize.replace(mr.group(), substring);
}
// replace uppercase
Pattern paUppercase = Pattern.compile("%UPPERCASE%\\((.*?)\\)");
for (MatchResult mr : Toolbox.findMatches(paUppercase, tonormalize)) {
String substring = mr.group(1).toUpperCase();
tonormalize = tonormalize.replace(mr.group(), substring);
}
}
// replace sum, concatenation
Pattern paSum = Pattern.compile("%SUM%\\((.*?),(.*?)\\)");
for (MatchResult mr : Toolbox.findMatches(paSum, tonormalize)) {
int newValue = Integer.parseInt(mr.group(1)) + Integer.parseInt(mr.group(2));
tonormalize = tonormalize.replace(mr.group(), newValue + "");
}
// replace normalization function without group
Pattern paNormNoGroup = Pattern.compile("%([A-Za-z0-9]+?)\\((.*?)\\)");
for (MatchResult mr : Toolbox.findMatches(paNormNoGroup, tonormalize)) {
tonormalize = tonormalize.replace(mr.group(),
norm.getFromHmAllNormalization(mr.group(1)).get(mr.group(2)));
}
// replace Chinese with Arabic numerals
Pattern paChineseNorm = Pattern.compile("%CHINESENUMBERS%\\((.*?)\\)");
for (MatchResult mr : Toolbox.findMatches(paChineseNorm, tonormalize)) {
RegexHashMap<String> chineseNumerals = new RegexHashMap<String>();
chineseNumerals.put("[零00]", "0");
chineseNumerals.put("[一11]", "1");
chineseNumerals.put("[二22]", "2");
chineseNumerals.put("[三33]", "3");
chineseNumerals.put("[四44]", "4");
chineseNumerals.put("[五55]", "5");
chineseNumerals.put("[六66]", "6");
chineseNumerals.put("[七77]", "7");
chineseNumerals.put("[八88]", "8");
chineseNumerals.put("[九99]", "9");
String outString = "";
for (Integer i = 0; i < mr.group(1).length(); i++) {
String thisChar = mr.group(1).substring(i, i + 1);
if (chineseNumerals.containsKey(thisChar)) {
outString += chineseNumerals.get(thisChar);
} else {
System.out.println(chineseNumerals.entrySet());
Logger.printError(component, "Found an error in the resources: " + mr.group(1) + " contains "
+ "a character that is not defined in the Chinese numerals map. Normalization may be mangled.");
outString += thisChar;
}
}
tonormalize = tonormalize.replace(mr.group(), outString);
}
}
normalized = tonormalize;
return normalized;
}
public String[] getAttributesForTimexFromFile(String rule, HashMap<String, String> hmNormalization,
HashMap<String, String> hmQuant, HashMap<String, String> hmFreq, HashMap<String, String> hmMod,
HashMap<String, String> hmEmptyValue, MatchResult m, JCas jcas) {
String[] attributes = new String[5];
String value = "";
String quant = "";
String freq = "";
String mod = "";
String emptyValue = "";
// Normalize Value
String value_normalization_pattern = hmNormalization.get(rule);
value = applyRuleFunctions(value_normalization_pattern, m);
if (value == null)
return null;
// get quant
if (hmQuant.containsKey(rule)) {
String quant_normalization_pattern = hmQuant.get(rule);
quant = applyRuleFunctions(quant_normalization_pattern, m);
}
// get freq
if (hmFreq.containsKey(rule)) {
String freq_normalization_pattern = hmFreq.get(rule);
freq = applyRuleFunctions(freq_normalization_pattern, m);
}
// get mod
if (hmMod.containsKey(rule)) {
String mod_normalization_pattern = hmMod.get(rule);
mod = applyRuleFunctions(mod_normalization_pattern, m);
}
// get emptyValue
if (hmEmptyValue.containsKey(rule)) {
String emptyValue_normalization_pattern = hmEmptyValue.get(rule);
emptyValue = applyRuleFunctions(emptyValue_normalization_pattern, m);
emptyValue = correctDurationValue(emptyValue);
}
// For example "PT24H" -> "P1D"
if (group_gran)
value = correctDurationValue(value);
attributes[0] = value;
attributes[1] = quant;
attributes[2] = freq;
attributes[3] = mod;
attributes[4] = emptyValue;
return attributes;
}
/**
* Durations of a finer granularity are mapped to a coarser one if possible,
* e.g., "PT24H" -> "P1D". One may add several further corrections.
*
* @param value
* @return
*/
public String correctDurationValue(String value) {
if (value.matches("PT[0-9]+H")) {
for (MatchResult mr : Toolbox.findMatches(Pattern.compile("PT([0-9]+)H"), value)) {
try {
int hours = Integer.parseInt(mr.group(1));
if ((hours % 24) == 0) {
int days = hours / 24;
value = "P" + days + "D";
}
} catch (NumberFormatException e) {
Logger.printDetail(component, "Couldn't do granularity conversion for " + value);
}
}
} else if (value.matches("PT[0-9]+M")) {
for (MatchResult mr : Toolbox.findMatches(Pattern.compile("PT([0-9]+)M"), value)) {
try {
int minutes = Integer.parseInt(mr.group(1));
if ((minutes % 60) == 0) {
int hours = minutes / 60;
value = "PT" + hours + "H";
}
} catch (NumberFormatException e) {
Logger.printDetail(component, "Couldn't do granularity conversion for " + value);
}
}
} else if (value.matches("P[0-9]+M")) {
for (MatchResult mr : Toolbox.findMatches(Pattern.compile("P([0-9]+)M"), value)) {
try {
int months = Integer.parseInt(mr.group(1));
if ((months % 12) == 0) {
int years = months / 12;
value = "P" + years + "Y";
}
} catch (NumberFormatException e) {
Logger.printDetail(component, "Couldn't do granularity conversion for " + value);
}
}
}
return value;
}
/**
* Check whether or not a jcas object has a correct DCT value. If there is no
* DCT present, we canonically return true since fallback calculation takes care
* of that scenario.
*
* @param jcas
* @return Whether or not the given jcas contains a valid DCT
*/
private Boolean isValidDCT(JCas jcas) {
FSIterator dctIter = jcas.getAnnotationIndex(Dct.type).iterator();
if (!dctIter.hasNext()) {
return true;
} else {
Dct dct = (Dct) dctIter.next();
String dctVal = dct.getValue();
if (dctVal == null)
return false;
if (dctVal.matches("\\d{8}") // Something like 20041224
|| dctVal.matches("\\d{4}.\\d{2}.\\d{2}.*")) { // Something
// like
// 2004-12-24
return true;
} else {
return false;
}
}
}
/**
* Check token boundaries using token information
*
* @param r
* MatchResult
* @param s
* respective Sentence
* @param jcas
* current CAS object
* @return whether or not the MatchResult is a clean one
*/
public static Boolean checkTokenBoundaries(MatchResult r, Sentence s, JCas jcas) {
Boolean beginOK = false;
Boolean endOK = false;
// whole expression is marked as a sentence
if ((r.end() - r.start()) == (s.getEnd() - s.getBegin())) {
return true;
}
// Only check Token boundaries if no white-spaces in front of and behind
// the match-result
if ((r.start() > 0) && ((s.getCoveredText().subSequence(r.start() - 1, r.start()).equals(" ")))
&& ((r.end() < s.getCoveredText().length())
&& ((s.getCoveredText().subSequence(r.end(), r.end() + 1).equals(" "))))) {
return true;
}
// other token boundaries than white-spaces
else {
FSIterator iterToken = jcas.getAnnotationIndex(Token.type).subiterator(s);
while (iterToken.hasNext()) {
Token t = (Token) iterToken.next();
// Check begin
if ((r.start() + s.getBegin()) == t.getBegin()) {
beginOK = true;
}
// Tokenizer does not split number from some symbols (".", "/",
// "-", "–"),
// e.g., "...12 August-24 Augsut..."
else if ((r.start() > 0) && ((s.getCoveredText().subSequence(r.start() - 1, r.start()).equals("."))
|| (s.getCoveredText().subSequence(r.start() - 1, r.start()).equals("/"))
|| (s.getCoveredText().subSequence(r.start() - 1, r.start()).equals("–"))
|| (s.getCoveredText().subSequence(r.start() - 1, r.start()).equals("-")))) {
beginOK = true;
}
// Check end
if ((r.end() + s.getBegin()) == t.getEnd()) {
endOK = true;
}
// Tokenizer does not split number from some symbols (".", "/",
// "-", "–"),
// e.g., "... in 1990. New Sentence ..."
else if ((r.end() < s.getCoveredText().length())
&& ((s.getCoveredText().subSequence(r.end(), r.end() + 1).equals("."))
|| (s.getCoveredText().subSequence(r.end(), r.end() + 1).equals("/"))
|| (s.getCoveredText().subSequence(r.end(), r.end() + 1).equals("–"))
|| (s.getCoveredText().subSequence(r.end(), r.end() + 1).equals("-")))) {
endOK = true;
}
if (beginOK && endOK)
return true;
}
}
return false;
}
/**
* Check token boundaries of expressions.
*
* @param r
* MatchResult
* @param s
* Respective sentence
* @return whether or not the MatchResult is a clean one
*/
public static Boolean checkInfrontBehind(MatchResult r, Sentence s) {
Boolean ok = true;
// get rid of expressions such as "1999" in 53453.1999
if (r.start() > 1) {
if ((s.getCoveredText().substring(r.start() - 2, r.start()).matches("\\d\\."))) {
ok = false;
}
}
// get rid of expressions if there is a character or symbol ($+)
// directly in front of the expression
if (r.start() > 0) {
if (((s.getCoveredText().substring(r.start() - 1, r.start()).matches("[\\w\\$\\+]")))
&& (!(s.getCoveredText().substring(r.start() - 1, r.start()).matches("\\(")))) {
ok = false;
}
}
if (r.end() < s.getCoveredText().length()) {
if ((s.getCoveredText().substring(r.end(), r.end() + 1).matches("[°\\w]"))
&& (!(s.getCoveredText().substring(r.end(), r.end() + 1).matches("\\)")))) {
ok = false;
}
if (r.end() + 1 < s.getCoveredText().length()) {
if (s.getCoveredText().substring(r.end(), r.end() + 2).matches("[\\.,]\\d")) {
ok = false;
}
}
}
return ok;
}
}
| 109,969 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
KeytermExtractor.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/annotator/KeytermExtractor.java | package uhh_lt.newsleak.annotator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.component.JCasAnnotator_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.fit.descriptor.OperationalProperties;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import opennlp.uima.Location;
import opennlp.uima.Organization;
import opennlp.uima.Person;
import opennlp.uima.Sentence;
import opennlp.uima.Token;
import uhh_lt.keyterms.Extractor;
import uhh_lt.newsleak.types.Metadata;
import uhh_lt.newsleak.types.Paragraph;
/**
* UIMA annotator for key term extraction. Uses the uhh-lt/lt-keyterms maven
* package providing statistical keyness measurement based on log likelihood
* comparison with a reference corpus.
*/
@OperationalProperties(multipleDeploymentAllowed = true, modifiesCas = true)
public class KeytermExtractor extends JCasAnnotator_ImplBase {
/** PARAM_LANGUAGE_CODE to initialize the keyterm extractor */
public static final String PARAM_LANGUAGE_CODE = "languageCode";
@ConfigurationParameter(name = PARAM_LANGUAGE_CODE)
private String languageCode;
/** PARAM_N_KEYTERMS Number of keyterms to extract per document. */
public static final String PARAM_N_KEYTERMS = "nKeyterms";
@ConfigurationParameter(name = PARAM_N_KEYTERMS)
private Integer nKeyterms;
private Extractor extractor;
private Logger log;
/*
* UIMA initializer
*
* @see org.apache.uima.fit.component.JCasAnnotator_ImplBase#initialize(org.
* apache.uima.UimaContext)
*/
@Override
public void initialize(UimaContext context) throws ResourceInitializationException {
super.initialize(context);
log = context.getLogger();
try {
extractor = new Extractor(languageCode, 100);
} catch (IOException e) {
throw new ResourceInitializationException(e.getMessage(), null);
}
}
/*
* Extracts keyterms and named entities. Removes named entities from keyterm
* list to not produce overlap
*
* @see org.apache.uima.analysis_component.JCasAnnotator_ImplBase#process(org.
* apache.uima.jcas.JCas)
*/
@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
Set<String> keytermSet = getKeyWords(jcas);
HashSet<String> namedEntities = getNamedEntities(jcas);
// generate decreasing count value for each keyterm
int pseudoCount = keytermSet.size();
int n = 0;
StringBuilder keyterms = new StringBuilder();
String text;
for (String term : keytermSet) {
pseudoCount--;
// do not extract NEs as keyterms too
if (namedEntities.contains(term))
continue;
text = keyterms.length() > 0 ? "\t" : "";
text += term.replaceAll(":", "");
keyterms.append(text).append(":").append(pseudoCount);
n++;
// extract only top n keyterms
if (n >= nKeyterms)
break;
}
Metadata metadata = (Metadata) jcas.getAnnotationIndex(Metadata.type).iterator().next();
metadata.setKeyterms(keyterms.toString());
metadata.addToIndexes();
}
/**
* Gets the key words from a document using the lt-keyterms library. Keyterms
* are only extracted from paragraphs which are supposed to contain full texts,
* i.e. are not marked as dubious "not fulltext" (such as log files).
*
* @param jcas
* the jcas
* @return the key words
*/
public Set<String> getKeyWords(JCas jcas) {
Collection<Paragraph> paragraphs = JCasUtil.select(jcas, Paragraph.class);
List<String> tokens = new ArrayList<String>();
for (Paragraph paragraph : paragraphs) {
if (paragraph.getIsNotFulltext()) {
log.log(Level.FINEST, "Skipping paragraph for keyterm extraction.");
} else {
Collection<Token> parTokens = JCasUtil.selectCovered(jcas, Token.class, paragraph.getBegin(),
paragraph.getEnd());
for (Token token : parTokens) {
tokens.add(token.getCoveredText());
}
}
}
return extractor.extractKeyTerms(tokens);
}
/**
* Gets the named entities.
*
* @param jcas
* the jcas
* @return the named entities
*/
public HashSet<String> getNamedEntities(JCas jcas) {
Collection<Person> persons = JCasUtil.select(jcas, Person.class);
Collection<Organization> organizations = JCasUtil.select(jcas, Organization.class);
Collection<Location> locations = JCasUtil.select(jcas, Location.class);
HashSet<String> nes = new HashSet<String>();
for (Person ne : persons) {
nes.add(ne.getCoveredText());
}
for (Location ne : locations) {
nes.add(ne.getCoveredText());
}
for (Organization ne : organizations) {
nes.add(ne.getCoveredText());
}
return nes;
}
}
| 4,951 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
DictionaryResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/DictionaryResource.java | package uhh_lt.newsleak.resources;
import java.io.File;
import java.io.IOException;
import java.text.BreakIterator;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.resource.ResourceSpecifier;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import org.tartarus.snowball.SnowballStemmer;
import org.tartarus.snowball.ext.danishStemmer;
import org.tartarus.snowball.ext.dutchStemmer;
import org.tartarus.snowball.ext.englishStemmer;
import org.tartarus.snowball.ext.finnishStemmer;
import org.tartarus.snowball.ext.frenchStemmer;
import org.tartarus.snowball.ext.germanStemmer;
import org.tartarus.snowball.ext.hungarianStemmer;
import org.tartarus.snowball.ext.italianStemmer;
import org.tartarus.snowball.ext.norwegianStemmer;
import org.tartarus.snowball.ext.portugueseStemmer;
import org.tartarus.snowball.ext.romanianStemmer;
import org.tartarus.snowball.ext.russianStemmer;
import org.tartarus.snowball.ext.spanishStemmer;
import org.tartarus.snowball.ext.swedishStemmer;
import org.tartarus.snowball.ext.turkishStemmer;
import uhh_lt.newsleak.annotator.LanguageDetector;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.annotator.DictionaryExtractor such as reading in dictionary
* files for each language, and perform stemming of dictionary entries.
*
* Dictionaries shoudl follow the convention of file names providing the
* dictionary type as main name and ISO-639-3 language code as file extension.
* Dictionaries containing entries for all languages may have the file extension
* 'all'. Dictionary files should be placed in <i>conf/dictionaries</i>.
*
* Dictionary files should contain one entry per line. Entries can be single
* terms, which then are stemmed before comparison with the target data (if a
* stemmer for the selected language is available). Entries can also be multi
* word unit (MWU). For MWU, no stemming is performed. MWU are matched via regex
* instead.
*/
public class DictionaryResource extends Resource_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant PARAM_DATADIR. */
public static final String PARAM_DATADIR = "dictionaryDir";
/** The dictionary dir. */
@ConfigurationParameter(name = PARAM_DATADIR, mandatory = true)
private String dictionaryDir;
/** The Constant PARAM_DICTIONARY_FILES. */
public static final String PARAM_DICTIONARY_FILES = "dictionaryFilesString";
/** The dictionary files string. */
@ConfigurationParameter(name = PARAM_DICTIONARY_FILES)
private String dictionaryFilesString;
/** The dictionary files. */
private List<File> dictionaryFiles;
/** The Constant PARAM_LANGUAGE_CODE. */
public static final String PARAM_LANGUAGE_CODE = "languageCode";
/** The language code. */
@ConfigurationParameter(name = PARAM_LANGUAGE_CODE)
private String languageCode;
/** The stemmer. */
private SnowballStemmer stemmer;
/** The locale. */
private Locale locale;
/** The unigram dictionaries. */
private HashMap<String, Dictionary> unigramDictionaries;
/** The mwu dictionaries. */
private HashMap<String, Dictionary> mwuDictionaries;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.Resource_ImplBase#initialize(org.apache.uima.
* resource.ResourceSpecifier, java.util.Map)
*/
@Override
public boolean initialize(ResourceSpecifier aSpecifier, Map<String, Object> aAdditionalParams)
throws ResourceInitializationException {
if (!super.initialize(aSpecifier, aAdditionalParams)) {
return false;
}
this.logger = this.getLogger();
locale = LanguageDetector.localeToISO().get(languageCode);
dictionaryFiles = getDictionaryFiles(dictionaryFilesString);
// select stemmer
switch (languageCode) {
case "eng":
stemmer = new englishStemmer();
break;
case "dan":
stemmer = new danishStemmer();
break;
case "deu":
stemmer = new germanStemmer();
break;
case "nld":
stemmer = new dutchStemmer();
break;
case "fin":
stemmer = new finnishStemmer();
break;
case "fra":
stemmer = new frenchStemmer();
break;
case "hun":
stemmer = new hungarianStemmer();
break;
case "ita":
stemmer = new italianStemmer();
break;
case "nor":
stemmer = new norwegianStemmer();
break;
case "por":
stemmer = new portugueseStemmer();
break;
case "ron":
stemmer = new romanianStemmer();
break;
case "rus":
stemmer = new russianStemmer();
break;
case "spa":
stemmer = new spanishStemmer();
break;
case "swe":
stemmer = new swedishStemmer();
break;
case "tur":
stemmer = new turkishStemmer();
break;
default:
stemmer = new noStemmer();
}
// populate dictionary objects from files
unigramDictionaries = new HashMap<String, Dictionary>();
mwuDictionaries = new HashMap<String, Dictionary>();
for (File f : dictionaryFiles) {
try {
String dictType = f.getName().replaceAll("\\..*", "").toUpperCase();
List<String> dictTermList = FileUtils.readLines(f);
Dictionary dictUnigrams = new Dictionary();
Dictionary dictMwu = new Dictionary();
for (String term : dictTermList) {
String t = term.trim();
if (!t.isEmpty()) {
if (isMultiWord(t)) {
// handle dictionary entry as multiword unit
dictMwu.put("(?i)" + Pattern.quote(t), t);
} else {
// handle dictionary entry as unigram
String stem;
synchronized (stemmer) {
stemmer.setCurrent(t);
stemmer.stem();
stem = stemmer.getCurrent().toLowerCase();
}
// map stems to shortest original type
String shortestType;
if (dictUnigrams.containsKey(stem) && dictUnigrams.get(stem).length() < t.length()) {
shortestType = dictUnigrams.get(stem);
} else {
shortestType = t;
}
dictUnigrams.put(stem, shortestType);
}
}
}
unigramDictionaries.put(dictType, dictUnigrams);
mwuDictionaries.put(dictType, dictMwu);
} catch (IOException e) {
throw new ResourceInitializationException(e.getMessage(), null);
}
}
return true;
}
/**
* Checks if a String is a multi word unit.
*
* @param t
* the t
* @return true, if is multi word
*/
private boolean isMultiWord(String t) {
BreakIterator tokenBreaker = BreakIterator.getWordInstance(locale);
tokenBreaker.setText(t);
// count tokens
int pos = tokenBreaker.first();
int nTokens = 0;
while (pos != BreakIterator.DONE) {
nTokens++;
pos = tokenBreaker.next();
}
nTokens = nTokens / 2;
return nTokens > 1;
}
/**
* Retrieves the dictionary files as configured in the preprocessing configuration.
*
* @param list
* the list
* @return the dictionary files
*/
private List<File> getDictionaryFiles(String list) {
List<File> files = new ArrayList<File>();
for (String f : list.split(", +?")) {
String[] args = f.split(":");
if (args.length > 2) {
logger.log(Level.SEVERE,
"Could not parse dictionary files configuration: '" + list + "'\n"
+ "Expecting format 'dictionaryfiles = langcode:filename1, langcode:filename2, ...'.\n"
+ "You can also omit 'langcode:' to apply dictionary to all languages.");
System.exit(1);
}
if (args.length == 1 || (args.length == 2 && args[0].equals(languageCode))) {
String fname = args.length == 1 ? args[0] : args[1];
files.add(new File(dictionaryDir, fname));
logger.log(Level.INFO, "Applying dictionary file " + f + " to language " + languageCode);
}
}
return files;
}
/**
* Gets the unigram dictionaries.
*
* @return the unigram dictionaries
*/
public HashMap<String, Dictionary> getUnigramDictionaries() {
return unigramDictionaries;
}
/**
* A do nothing stemmer.
*/
private class noStemmer extends org.tartarus.snowball.SnowballStemmer {
/*
* (non-Javadoc)
*
* @see org.tartarus.snowball.SnowballStemmer#stem()
*/
@Override
public boolean stem() {
return true;
}
}
/**
* Stems an input token.
*
* @param token
* the token
* @return the string
*/
public synchronized String stem(String token) {
stemmer.setCurrent(token);
stemmer.stem();
return stemmer.getCurrent();
}
/**
* The Class Dictionary.
*/
public class Dictionary extends HashMap<String, String> {
/** Serial ID. */
private static final long serialVersionUID = -4395683941205467020L;
/**
* Instantiates a new dictionary.
*/
public Dictionary() {
super();
}
}
/**
* Gets the mwu dictionaries.
*
* @return the mwu dictionaries
*/
public HashMap<String, Dictionary> getMwuDictionaries() {
return mwuDictionaries;
}
}
| 9,025 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
PostgresResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/PostgresResource.java | package uhh_lt.newsleak.resources;
import java.io.File;
import java.io.IOException;
import java.sql.Connection;
import java.sql.Date;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.resource.ResourceSpecifier;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.writer.PostgresDbWriter. A shared client is used to
* insert/update entries for each document as queried by the writer. For this,
* the class uses prepared insert and upsert statements.
*/
public class PostgresResource extends Resource_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant TABLE_DOCUMENT. */
public static final String TABLE_DOCUMENT = "document";
/** The Constant TABLE_METADATA. */
public static final String TABLE_METADATA = "metadata";
/** The Constant TABLE_ENTITY. */
public static final String TABLE_ENTITY = "entity";
/** The Constant TABLE_ENTITYOFFSET. */
public static final String TABLE_ENTITYOFFSET = "entityoffset";
/** The Constant TABLE_EVENTTIME. */
public static final String TABLE_EVENTTIME = "eventtime";
/** The Constant TABLE_KEYTERMS. */
public static final String TABLE_KEYTERMS = "terms";
/** The Constant PARAM_DBNAME. */
public static final String PARAM_DBNAME = "dbName";
/** The db name. */
@ConfigurationParameter(name = PARAM_DBNAME)
private String dbName;
/** The Constant PARAM_DBUSER. */
public static final String PARAM_DBUSER = "dbUser";
/** The db user. */
@ConfigurationParameter(name = PARAM_DBUSER)
private String dbUser;
/** The Constant PARAM_DBURL. */
public static final String PARAM_DBURL = "dbUrl";
/** The db url. */
@ConfigurationParameter(name = PARAM_DBURL)
private String dbUrl;
/** The Constant PARAM_DBPASS. */
public static final String PARAM_DBPASS = "dbPass";
/** The db pass. */
@ConfigurationParameter(name = PARAM_DBPASS)
private String dbPass;
/** The Constant PARAM_INDEX_SCHEMA. */
public static final String PARAM_INDEX_SCHEMA = "indexSqlFile";
/** The index sql file. */
@ConfigurationParameter(name = PARAM_INDEX_SCHEMA)
private String indexSqlFile;
/** The Constant PARAM_TABLE_SCHEMA. */
public static final String PARAM_TABLE_SCHEMA = "tableSchemaFile";
/** The table schema file. */
@ConfigurationParameter(name = PARAM_TABLE_SCHEMA)
private String tableSchemaFile;
/** The Constant PARAM_CREATE_DB. */
public final static String PARAM_CREATE_DB = "createDb";
/** The create db. */
@ConfigurationParameter(name = PARAM_CREATE_DB, mandatory = false, defaultValue = "false", description = "If true, an new db will be created (existing db will be removed).")
private boolean createDb;
/** The db connection. */
private Connection dbConnection;
/** The db statement. */
private Statement dbStatement;
/** The prepared statement document. */
private PreparedStatement preparedStatementDocument;
/** The prepared statement entity upsert. */
private PreparedStatement preparedStatementEntityUpsert;
/** The prepared statement entityoffset. */
private PreparedStatement preparedStatementEntityoffset;
/** The prepared statement eventtime. */
private PreparedStatement preparedStatementEventtime;
/** The prepared statement keyterms. */
private PreparedStatement preparedStatementKeyterms;
/** The document counter. */
private int documentCounter = 0;
/** The internal batch size. */
private int INTERNAL_BATCH_SIZE = 100;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.Resource_ImplBase#initialize(org.apache.uima.
* resource.ResourceSpecifier, java.util.Map)
*/
@Override
public boolean initialize(ResourceSpecifier aSpecifier, Map<String, Object> aAdditionalParams)
throws ResourceInitializationException {
if (!super.initialize(aSpecifier, aAdditionalParams)) {
return false;
}
this.logger = this.getLogger();
// init db
try {
if (createDb) {
createDb(dbUrl, dbName, dbUser, dbPass);
logger.log(Level.INFO, "DB " + dbName + " created");
} else {
initDb(dbUrl, dbName, dbUser, dbPass);
}
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
prepareStatements();
return true;
}
/**
* Gets the db statement.
*
* @return the db statement
*/
public Statement getDbStatement() {
return dbStatement;
}
/**
* Execute insert.
*
* @param sql
* the sql
* @return true, if successful
* @throws SQLException
* the SQL exception
*/
public synchronized boolean executeInsert(String sql) throws SQLException {
return dbStatement.execute(sql);
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.resource.Resource_ImplBase#destroy()
*/
@Override
public void destroy() {
super.destroy();
try {
dbConnection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
/**
* Creates the db.
*
* @param dbUrl
* the db url
* @param dbName
* the db name
* @param dbUser
* the db user
* @param dbPass
* the db pass
* @throws Exception
* the exception
*/
private void createDb(String dbUrl, String dbName, String dbUser, String dbPass) throws Exception {
Connection rootConnection = DriverManager.getConnection("jdbc:postgresql://" + dbUrl + "/postgres", dbUser,
dbPass);
Statement dbStatement = rootConnection.createStatement();
dbStatement.executeUpdate("DROP DATABASE IF EXISTS " + dbName + ";");
dbStatement.executeUpdate("CREATE DATABASE " + dbName
+ " WITH ENCODING='UTF8' LC_CTYPE='en_US.UTF-8' LC_COLLATE='en_US.UTF-8'"
+ " TEMPLATE=template0 CONNECTION LIMIT=-1; GRANT ALL ON DATABASE " + dbName + " TO " + dbUser + ";");
initDb(dbUrl, dbName, dbUser, dbPass);
/**
* create SQL schema (SQL indexes will be created later by @see
* uhh_lt.newsleak.preprocessing.InformationExtraction2Postgres)
*/
createSchema(tableSchemaFile);
}
/**
* Inits the db.
*
* @param dbUrl
* the db url
* @param dbName
* the db name
* @param dbUser
* the db user
* @param dbPass
* the db pass
* @throws InstantiationException
* the instantiation exception
* @throws IllegalAccessException
* the illegal access exception
* @throws ClassNotFoundException
* the class not found exception
* @throws SQLException
* the SQL exception
*/
public void initDb(String dbUrl, String dbName, String dbUser, String dbPass)
throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
String url = "jdbc:postgresql://" + dbUrl + "/";
dbConnection = DriverManager.getConnection(url + dbName, dbUser, dbPass);
dbStatement = dbConnection.createStatement();
dbConnection.setAutoCommit(false);
}
/**
* Commit.
*/
public void commit() {
try {
dbConnection.commit();
logger.log(Level.INFO,
"Another " + INTERNAL_BATCH_SIZE + " documents committed (total: " + documentCounter + ")");
} catch (SQLException e) {
e.printStackTrace();
}
}
/**
* Creates the schema.
*
* @param tableSchemaFile
* the table schema file
*/
private void createSchema(String tableSchemaFile) {
try {
String schemaSql = FileUtils.readFileToString(new File(tableSchemaFile)).replace("\n", " ");
dbStatement.executeUpdate(schemaSql);
logger.log(Level.INFO, "Schema created");
} catch (IOException e1) {
logger.log(Level.SEVERE, "Could not read DB schema file " + tableSchemaFile);
System.exit(1);
} catch (SQLException e) {
logger.log(Level.SEVERE, "Could create DB schema.");
e.printStackTrace();
System.exit(1);
}
}
/**
* Prepare statements.
*/
private void prepareStatements() {
try {
preparedStatementDocument = dbConnection
.prepareStatement("INSERT INTO " + TABLE_DOCUMENT + " (id, content, created) VALUES (?, ?, ?)");
preparedStatementEntityUpsert = dbConnection
.prepareStatement("INSERT INTO " + TABLE_ENTITY + " as e (name, type, frequency) VALUES (?, ?, ?) "
+ "ON CONFLICT ON CONSTRAINT unique_name_type DO "
+ "UPDATE SET frequency = e.frequency + ? " + "RETURNING id");
preparedStatementEntityoffset = dbConnection.prepareStatement("INSERT INTO " + TABLE_ENTITYOFFSET
+ " (docid, entid, entitystart, entityend) VALUES (?, ?, ?, ?)");
preparedStatementEventtime = dbConnection.prepareStatement("INSERT INTO " + TABLE_EVENTTIME
+ " (docid, beginoffset, endoffset, timex, type, timexvalue) VALUES (?, ?, ?, ?, ?, ?)");
preparedStatementKeyterms = dbConnection
.prepareStatement("INSERT INTO " + TABLE_KEYTERMS + " (docid, term, frequency) VALUES (?, ?, ?)");
} catch (SQLException e) {
e.printStackTrace();
System.exit(1);
}
}
/**
* Insert document.
*
* @param id
* the id
* @param content
* the content
* @param created
* the created
* @return true, if successful
* @throws SQLException
* the SQL exception
*/
public synchronized boolean insertDocument(Integer id, String content, String created) throws SQLException {
documentCounter++;
preparedStatementDocument.setInt(1, id);
preparedStatementDocument.setString(2, content.replaceAll("\u0000", ""));
preparedStatementDocument.setDate(3, Date.valueOf(created));
return preparedStatementDocument.execute();
}
/**
* Insert entity.
*
* @param name
* the name
* @param type
* the type
* @param frequency
* the frequency
* @return the integer
* @throws SQLException
* the SQL exception
*/
public synchronized Integer insertEntity(String name, String type, Integer frequency) throws SQLException {
Integer entityId;
preparedStatementEntityUpsert.setString(1, name.replaceAll("\u0000", ""));
preparedStatementEntityUpsert.setString(2, type);
preparedStatementEntityUpsert.setInt(3, frequency);
preparedStatementEntityUpsert.setInt(4, frequency);
ResultSet rs = preparedStatementEntityUpsert.executeQuery();
rs.next();
entityId = rs.getInt(1);
return entityId;
}
/**
* Insert entityoffset.
*
* @param docid
* the docid
* @param entid
* the entid
* @param entitystart
* the entitystart
* @param entityend
* the entityend
* @throws SQLException
* the SQL exception
*/
public synchronized void insertEntityoffset(Integer docid, Integer entid, Integer entitystart, Integer entityend)
throws SQLException {
preparedStatementEntityoffset.setInt(1, docid);
preparedStatementEntityoffset.setInt(2, entid);
preparedStatementEntityoffset.setInt(3, entitystart);
preparedStatementEntityoffset.setInt(4, entityend);
preparedStatementEntityoffset.addBatch();
}
/**
* Insert eventtime.
*
* @param docid
* the docid
* @param beginoffset
* the beginoffset
* @param endoffset
* the endoffset
* @param timex
* the timex
* @param type
* the type
* @param timexvalue
* the timexvalue
* @throws SQLException
* the SQL exception
*/
public synchronized void insertEventtime(Integer docid, Integer beginoffset, Integer endoffset, String timex,
String type, String timexvalue) throws SQLException {
preparedStatementEventtime.setInt(1, docid);
preparedStatementEventtime.setInt(2, beginoffset);
preparedStatementEventtime.setInt(3, endoffset);
preparedStatementEventtime.setString(4, timex.replaceAll("\u0000", ""));
preparedStatementEventtime.setString(5, type);
preparedStatementEventtime.setString(6, timexvalue.replaceAll("\u0000", ""));
preparedStatementEventtime.addBatch();
}
/**
* Insert keyterms.
*
* @param docid
* the docid
* @param term
* the term
* @param frequency
* the frequency
* @throws SQLException
* the SQL exception
*/
public synchronized void insertKeyterms(Integer docid, String term, Integer frequency) throws SQLException {
preparedStatementKeyterms.setInt(1, docid);
preparedStatementKeyterms.setString(2, term.replaceAll("\u0000", ""));
preparedStatementKeyterms.setInt(3, frequency);
preparedStatementKeyterms.addBatch();
}
/**
* Execute batches.
*
* @throws SQLException
* the SQL exception
*/
public synchronized void executeBatches() throws SQLException {
preparedStatementEntityoffset.executeBatch();
preparedStatementEntityoffset.clearBatch();
preparedStatementEventtime.executeBatch();
preparedStatementEventtime.clearBatch();
preparedStatementKeyterms.executeBatch();
preparedStatementKeyterms.clearBatch();
if (documentCounter % INTERNAL_BATCH_SIZE == 0) {
this.commit();
}
}
}
| 13,258 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
HooverResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/HooverResource.java | package uhh_lt.newsleak.resources;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Map;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.resource.ResourceSpecifier;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import io.searchbox.client.JestClient;
import io.searchbox.client.JestClientFactory;
import io.searchbox.client.config.HttpClientConfig;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.reader.HooverElasticsearchReader. This resource connects
* directly to Hoover's elasticsearch index via a Jest client.
*/
public class HooverResource extends Resource_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant HOOVER_DOCUMENT_TYPE. */
public static final String HOOVER_DOCUMENT_TYPE = "doc";
/** The Constant PARAM_HOST. */
public static final String PARAM_HOST = "mHost";
/** The m host. */
@ConfigurationParameter(name = PARAM_HOST)
private String mHost;
/** The Constant PARAM_PORT. */
public static final String PARAM_PORT = "mPort";
/** The m port. */
@ConfigurationParameter(name = PARAM_PORT)
private Integer mPort;
/** The Constant PARAM_INDEX. */
public static final String PARAM_INDEX = "mIndex";
/** The m index. */
@ConfigurationParameter(name = PARAM_INDEX)
private String mIndex;
/** The Constant PARAM_CLUSTERNAME. */
public static final String PARAM_CLUSTERNAME = "mClustername";
/** The m clustername. */
@ConfigurationParameter(name = PARAM_CLUSTERNAME)
private String mClustername;
/** The Constant PARAM_SEARCHURL. */
public static final String PARAM_SEARCHURL = "mSearchUrl";
/** The m search url. */
@ConfigurationParameter(name = PARAM_SEARCHURL)
private String mSearchUrl;
/** The hoover search relative base path. */
private String indexPath;
/** The client. */
private JestClient client;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.Resource_ImplBase#initialize(org.apache.uima.
* resource.ResourceSpecifier, java.util.Map)
*/
@Override
public boolean initialize(ResourceSpecifier aSpecifier, Map<String, Object> aAdditionalParams)
throws ResourceInitializationException {
if (!super.initialize(aSpecifier, aAdditionalParams)) {
return false;
}
this.logger = this.getLogger();
// Construct a new Jest client according to configuration via factory
JestClientFactory factory = new JestClientFactory();
factory.setHttpClientConfig(new HttpClientConfig.Builder(mHost + ":" + mPort).multiThreaded(false).build());
client = factory.getObject();
indexPath = mIndex + "/";
return true;
}
/**
* Gets the elasticsearch client.
*
* @return the client
*/
public JestClient getClient() {
return client;
}
/**
* Gets the elasticsearch index.
*
* @return the index
*/
public String getIndex() {
return mIndex;
}
/**
* Sets the elasticsearch index.
*
* @param mIndex
* the new index
*/
public void setIndex(String mIndex) {
this.mIndex = mIndex;
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.resource.Resource_ImplBase#destroy()
*/
@Override
public void destroy() {
super.destroy();
try {
client.close();
logger.log(Level.INFO, "Hoover connection closed.");
} catch (IOException e) {
logger.log(Level.SEVERE, "Error closing Hoover connection.");
e.printStackTrace();
}
}
/**
* Gets the elasticsearch base URL (can be useful to link from newsleak to the
* original source document in Hoover).
*
* @return the client url
*/
public String getHooverBasePath() {
return indexPath;
}
/**
* Extracts the document ids from a Jest request on Hoover's elasticsearch
* index.
*
* @param hits
* the hits
* @return the ids
*/
public ArrayList<String> getIds(JsonArray hits) {
ArrayList<String> idList = new ArrayList<String>();
for (JsonElement hit : hits) {
idList.add(hit.getAsJsonObject().get("_id").getAsString());
}
return idList;
}
}
| 4,226 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
ElasticsearchResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/ElasticsearchResource.java | package uhh_lt.newsleak.resources;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.net.InetAddress;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.resource.ResourceSpecifier;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import uhh_lt.newsleak.util.AtomicCounter;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.writer.ElasticsearchDocumentWriter which writes fulltexts
* extracted by a reader process temporarily into the newsleak elasticsearch
* index.
*
* The class takes care for assigning unique document id (as auto-increment
* integers) to documents indexed by newsleak. In case of splitting of long
* documents (if the parameter paragraphsasdocuments = true in the
* pre-processing configuration) a new document id needs to be created for each
* document split. To assign metadata assigned to the original document
* correctly to the document splits, the class keeps a record of the mapping of
* original document ids to document split ids. @See
* uhh_lt.newsleak.preprocessing.InformationExtraction2Postgres duplicates the
* metadata according to this record later on.
*/
public class ElasticsearchResource extends Resource_ImplBase {
/** The logger. */
private Logger logger;
/** The Constant DOCUMENT_TYPE. */
private static final String DOCUMENT_TYPE = "document";
/** The Constant PARAM_HOST. */
public static final String PARAM_HOST = "mHost";
/** The m host. */
@ConfigurationParameter(name = PARAM_HOST)
private String mHost;
/** The Constant PARAM_PORT. */
public static final String PARAM_PORT = "mPort";
/** The m port. */
@ConfigurationParameter(name = PARAM_PORT)
private Integer mPort;
/** The Constant PARAM_INDEX. */
public static final String PARAM_INDEX = "mIndex";
/** The m index. */
@ConfigurationParameter(name = PARAM_INDEX)
private String mIndex;
/** The Constant PARAM_CLUSTERNAME. */
public static final String PARAM_CLUSTERNAME = "mClustername";
/** The m clustername. */
@ConfigurationParameter(name = PARAM_CLUSTERNAME)
private String mClustername;
/** The Constant PARAM_DOCUMENT_MAPPING_FILE. */
public static final String PARAM_DOCUMENT_MAPPING_FILE = "documentMappingFile";
/** The document mapping file. */
@ConfigurationParameter(name = PARAM_DOCUMENT_MAPPING_FILE)
private String documentMappingFile;
/** The Constant PARAM_CREATE_INDEX. */
public final static String PARAM_CREATE_INDEX = "createIndex";
/** The create index. */
@ConfigurationParameter(name = PARAM_CREATE_INDEX, mandatory = false, defaultValue = "false", description = "If true, an new index will be created (existing index will be removed).")
private boolean createIndex;
/** The Constant PARAM_METADATA_FILE. */
public static final String PARAM_METADATA_FILE = "mMetadata";
/** The m metadata. */
@ConfigurationParameter(name = PARAM_METADATA_FILE)
private String mMetadata;
/** The metadata file. */
private File metadataFile;
/** The elasticsearch client. */
private TransportClient client;
/** The autoincrement value for generating unique document ids. */
private AtomicCounter autoincrementValue;
/**
* Mapping of temporary document ids to new document ids (necessary for
* paragraph splitting and correct metadata association)
*/
private HashMap<Integer, ArrayList<Integer>> documentIdMapping;
/**
* Memorize, if document id mapping has been written already (for parallel
* execution of CPEs).
*/
private boolean documentIdMappingWritten = false;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.Resource_ImplBase#initialize(org.apache.uima.
* resource.ResourceSpecifier, java.util.Map)
*/
@Override
public boolean initialize(ResourceSpecifier aSpecifier, Map<String, Object> aAdditionalParams)
throws ResourceInitializationException {
if (!super.initialize(aSpecifier, aAdditionalParams)) {
return false;
}
// setup elasticsearch connection
this.logger = this.getLogger();
Settings settings = Settings.builder().put("cluster.name", mClustername).build();
try {
client = TransportClient.builder().settings(settings).build()
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(mHost), mPort));
if (createIndex) {
createIndex();
}
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
// initialize fields
autoincrementValue = new AtomicCounter();
documentIdMapping = new HashMap<Integer, ArrayList<Integer>>();
metadataFile = new File(mMetadata + ".id-map");
return true;
}
/**
* Gets the elasticsearch client.
*
* @return the client
*/
public TransportClient getClient() {
return client;
}
/**
* Gets the elasticsearch index.
*
* @return the index
*/
public String getIndex() {
return mIndex;
}
/**
* Sets the elasticsearch index.
*
* @param mIndex
* the new index
*/
public void setIndex(String mIndex) {
this.mIndex = mIndex;
}
/*
* (non-Javadoc)
*
* @see org.apache.uima.resource.Resource_ImplBase#destroy()
*/
@Override
public void destroy() {
super.destroy();
client.close();
}
/**
* Creates a new elasticsearch index and adds a mapping for the document type.
* Previously existing indexes will be removed.
*
* @throws Exception
* the exception
*/
private void createIndex() throws Exception {
boolean exists = client.admin().indices().prepareExists(mIndex).execute().actionGet().isExists();
// remove preexisting index
if (exists) {
logger.log(Level.INFO, "Preexisting index " + mIndex + " will be removed.");
DeleteIndexResponse deleteResponse = client.admin().indices().delete(new DeleteIndexRequest(mIndex))
.actionGet();
if (deleteResponse.isAcknowledged()) {
logger.log(Level.INFO, "Preexisting index " + mIndex + " successfully removed.");
exists = false;
}
}
// create schema mapping from file
logger.log(Level.INFO, "Index " + mIndex + " will be created.");
String docMapping = new String(Files.readAllBytes(Paths.get(documentMappingFile)));
XContentBuilder builder = XContentFactory.jsonBuilder();
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(docMapping.getBytes());
parser.close();
builder.copyCurrentStructure(parser);
CreateIndexRequestBuilder createIndexRequestBuilder = client.admin().indices().prepareCreate(mIndex);
createIndexRequestBuilder.addMapping(DOCUMENT_TYPE, builder);
createIndexRequestBuilder.execute().actionGet();
}
/**
* Generates a new document id.
*
* @return the next document id
*/
public synchronized int getNextDocumentId() {
autoincrementValue.increment();
return autoincrementValue.value();
}
/**
* Adds a new mapping between temporary and final document ids
*
* @param tmpId
* the tmp id
* @param newIds
* the new ids (can be more than one due to splitting of long
* documents)
*/
public synchronized void addDocumentIdMapping(Integer tmpId, ArrayList<Integer> newIds) {
documentIdMapping.put(tmpId, newIds);
}
/**
* Write document id mapping to disk.
*
* @throws IOException
* Signals that an I/O exception has occurred.
*/
public synchronized void writeDocumentIdMapping() throws IOException {
if (!documentIdMappingWritten) {
logger.log(Level.INFO, "Writing document id mapping file " + metadataFile);
FileOutputStream fos = new FileOutputStream(metadataFile);
ObjectOutputStream oos = new ObjectOutputStream(fos);
oos.writeObject(documentIdMapping);
oos.close();
/* run only once even in parallel execution of a CPE */
documentIdMappingWritten = true;
}
}
}
| 8,723 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
MetadataResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/MetadataResource.java | package uhh_lt.newsleak.resources;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVPrinter;
import org.apache.commons.lang3.StringUtils;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.resource.ResourceSpecifier;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.reader.HooverElasticsearchReader and @see
* uhh_lt.newsleak.reader.HooverElasticsearchApiReader. Metadata is written
* temporarily to disk in CSV format for later import into the newsleak postgres
* database.
*/
public class MetadataResource extends Resource_ImplBase {
/** The Constant PARAM_METADATA_FILE. */
public static final String PARAM_METADATA_FILE = "mMetadata";
/** The m metadata. */
@ConfigurationParameter(name = PARAM_METADATA_FILE)
private String mMetadata;
/** The Constant PARAM_RESET_METADATA_FILE. */
public static final String PARAM_RESET_METADATA_FILE = "resetMetadata";
/** The reset metadata. */
@ConfigurationParameter(name = PARAM_RESET_METADATA_FILE)
private boolean resetMetadata;
/** The metadata file. */
private File metadataFile;
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.Resource_ImplBase#initialize(org.apache.uima.
* resource.ResourceSpecifier, java.util.Map)
*/
@Override
public boolean initialize(ResourceSpecifier aSpecifier, Map<String, Object> aAdditionalParams)
throws ResourceInitializationException {
if (!super.initialize(aSpecifier, aAdditionalParams)) {
return false;
}
metadataFile = new File(mMetadata);
if (resetMetadata) {
try {
// reset metadata file
new FileOutputStream(metadataFile).close();
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
}
return true;
}
/**
* Append a list of metadata entries for one document to the temporary metadata
* file.
*
* @param metadata
* the metadata
*/
public synchronized void appendMetadata(List<List<String>> metadata) {
try {
BufferedWriter writer = new BufferedWriter(new FileWriter(metadataFile, true));
CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.RFC4180);
csvPrinter.printRecords(metadata);
csvPrinter.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Create a metadata entry with type text.
*
* @param docId
* the doc id
* @param key
* the key
* @param value
* the value
* @return the array list
*/
public ArrayList<String> createTextMetadata(String docId, String key, String value) {
ArrayList<String> meta = new ArrayList<String>();
meta.add(docId);
meta.add(StringUtils.capitalize(key));
meta.add(value.replaceAll("\\r|\\n", " "));
meta.add("Text");
return meta;
}
}
| 3,123 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
TextLineWriterResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/TextLineWriterResource.java | package uhh_lt.newsleak.resources;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.writer.TextLineWriter. It allows for synchronzied writing of
* text by any annotator class in a given output file.
*
* This writer is used for debug purposes only.
*/
public class TextLineWriterResource extends Resource_ImplBase {
/** The Constant PARAM_OUTPUT_FILE. */
public static final String PARAM_OUTPUT_FILE = "outputFile";
/** The outfile. */
@ConfigurationParameter(name = PARAM_OUTPUT_FILE, mandatory = true, description = "Output dir for writing")
private File outfile;
/**
* Append text to the output file.
*
* @param text
* the text
*/
public synchronized void append(String text) {
FileWriter fileWriter;
try {
fileWriter = new FileWriter(outfile, true);
fileWriter.write(text + "\n");
fileWriter.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
| 1,141 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
LanguageDetectorResource.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/resources/LanguageDetectorResource.java | package uhh_lt.newsleak.resources;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import org.apache.uima.fit.component.Resource_ImplBase;
import org.apache.uima.fit.descriptor.ConfigurationParameter;
import org.apache.uima.resource.ResourceInitializationException;
import org.apache.uima.resource.ResourceSpecifier;
import org.apache.uima.util.Level;
import org.apache.uima.util.Logger;
import opennlp.tools.cmdline.langdetect.LanguageDetectorModelLoader;
import opennlp.tools.langdetect.LanguageDetectorModel;
/**
* Provides shared functionality and data for the @see
* uhh_lt.newsleak.annotator.LanguageDetector, among other loading of an openNLP
* language detection model and tracking a simple statistic of language counts.
*/
public class LanguageDetectorResource extends Resource_ImplBase {
/** The Constant PARAM_MODEL_FILE. */
public static final String PARAM_MODEL_FILE = "mModelfile";
/** The m modelfile. */
@ConfigurationParameter(name = PARAM_MODEL_FILE)
private String mModelfile;
/** The language counter. */
private HashMap<String, Integer> languageCounter;
/** The model. */
private LanguageDetectorModel model;
/** The statistics logged. */
private boolean statisticsLogged = false;
/**
* Gets the openNLP language detection model.
*
* @return the model
*/
public LanguageDetectorModel getModel() {
return model;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.uima.fit.component.Resource_ImplBase#initialize(org.apache.uima.
* resource.ResourceSpecifier, java.util.Map)
*/
@Override
public boolean initialize(ResourceSpecifier aSpecifier, Map<String, Object> aAdditionalParams)
throws ResourceInitializationException {
if (!super.initialize(aSpecifier, aAdditionalParams)) {
return false;
}
model = new LanguageDetectorModelLoader().load(new File(mModelfile));
languageCounter = new HashMap<String, Integer>();
return true;
}
/**
* Increments the counter for a specific language.
*
* @param language
* the language
*/
public synchronized void addLanguage(String language) {
languageCounter.put(language, languageCounter.containsKey(language) ? languageCounter.get(language) + 1 : 1);
}
/**
* Log language statistics.
*
* @param logger
* the logger
*/
public synchronized void logLanguageStatistics(Logger logger) {
if (!statisticsLogged) {
StringBuilder sb = new StringBuilder();
sb.append("Languages detected in current collection\n");
sb.append("-------------------------------------\n");
for (String language : languageCounter.keySet()) {
sb.append(language + ": " + languageCounter.get(language) + "\n");
}
logger.log(Level.INFO, sb.toString());
statisticsLogged = true;
}
}
}
| 2,771 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
AtomicCounter.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/util/AtomicCounter.java | package uhh_lt.newsleak.util;
import java.util.concurrent.atomic.AtomicInteger;
/**
* AtomicCounter to concurrently count up indexes.
*/
public class AtomicCounter {
/** A concurrent integer. */
private AtomicInteger c = new AtomicInteger(0);
/**
* Increment.
*/
public void increment() {
c.incrementAndGet();
}
/**
* Decrement.
*/
public void decrement() {
c.decrementAndGet();
}
/**
* The value of the counter.
*
* @return the int
*/
public int value() {
return c.get();
}
} | 517 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
MapUtil.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/util/MapUtil.java | package uhh_lt.newsleak.util;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
/**
* Allows for sorting of maps by its contained values in increasing and
* decreasing order.
*/
public class MapUtil {
/**
* Sort by value (increasing).
*
* @param <K>
* the key type
* @param <V>
* the value type
* @param map
* the map
* @return the map
*/
public static <K, V extends Comparable<? super V>> Map<K, V> sortByValue(Map<K, V> map) {
return sortByValue(map, 1);
}
/**
* Sort by value (decreasing).
*
* @param <K>
* the key type
* @param <V>
* the value type
* @param map
* the map
* @return the map
*/
public static <K, V extends Comparable<? super V>> Map<K, V> sortByValueDecreasing(Map<K, V> map) {
return sortByValue(map, -1);
}
/**
* Internal sorting mechanism.
*
* @param <K>
* the key type
* @param <V>
* the value type
* @param map
* the map
* @param multiplier
* the multiplier
* @return the map
*/
private static <K, V extends Comparable<? super V>> Map<K, V> sortByValue(Map<K, V> map, Integer multiplier) {
List<Map.Entry<K, V>> list = new LinkedList<Map.Entry<K, V>>(map.entrySet());
Collections.sort(list, new Comparator<Map.Entry<K, V>>() {
public int compare(Map.Entry<K, V> o1, Map.Entry<K, V> o2) {
return (o1.getValue()).compareTo(o2.getValue()) * multiplier;
}
});
Map<K, V> result = new LinkedHashMap<K, V>();
for (Map.Entry<K, V> entry : list) {
result.put(entry.getKey(), entry.getValue());
}
return result;
}
} | 1,767 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
ResultSetIterable.java | /FileExtraction/Java_unseen/uhh-lt_newsleak/preprocessing/src/main/java/uhh_lt/newsleak/util/ResultSetIterable.java | package uhh_lt.newsleak.util;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Iterator;
import java.util.function.Function;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/**
* The Class ResultSetIterable provides stream processing of result sets.
*
* @param <T> the generic type
*/
public class ResultSetIterable<T> implements Iterable<T> {
/** The rs. */
private final ResultSet rs;
/** The process next. */
private final Function<ResultSet, T> processNext;
/**
* Instantiates a new result set iterable.
*
* @param rs the rs
* @param processNext the process next
*/
public ResultSetIterable(ResultSet rs, Function<ResultSet, T> processNext){
this.rs = rs;
// processNext is the mapper function to handle the fetched resultSet
this.processNext = processNext;
}
/* (non-Javadoc)
* @see java.lang.Iterable#iterator()
*/
@Override
public Iterator<T> iterator() {
try {
return new Iterator<T>() {
// the iterator state is initialized by calling next() to
// know whether there are elements to iterate
boolean hasNext = rs.next();
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public T next() {
T result = processNext.apply(rs);
//after each get, we need to update the hasNext info
try {
hasNext = rs.next();
} catch (SQLException e) {
throw new RuntimeException(e);
}
return result;
}
};
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Stream.
*
* @return the stream
*/
public Stream<T> stream() {
return StreamSupport.stream(this.spliterator(), false);
}
} | 1,701 | Java | .java | uhh-lt/newsleak | 52 | 15 | 16 | 2016-06-24T11:44:24Z | 2022-12-03T06:22:01Z |
MPDExceptionTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/MPDExceptionTest.java | package org.bff.javampd;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import org.junit.jupiter.api.Test;
class MPDExceptionTest {
@Test
void testDefaultConstructorCommand() {
MPDException exception = new MPDException();
assertNull(exception.getCommand());
assertNull(exception.getMessage());
assertNull(exception.getCause());
}
@Test
void testMessageConstructorCommand() {
String message = "message";
MPDException exception = new MPDException(message);
assertNull(exception.getCommand());
assertEquals(exception.getMessage(), message);
assertNull(exception.getCause());
}
@Test
void testCauseConstructorCommand() {
String message = "message";
Exception cause = new Exception(message);
MPDException exception = new MPDException(cause);
assertNull(exception.getCommand());
assertEquals("java.lang.Exception: message", exception.getMessage());
assertEquals(exception.getCause(), cause);
}
@Test
void testMessageCauseConstructorCommand() {
Exception cause = new Exception();
String message = "message";
MPDException exception = new MPDException(message, cause);
assertNull(exception.getCommand());
assertEquals(exception.getMessage(), message);
assertEquals(exception.getCause(), cause);
}
@Test
void testMessageCauseCommandConstructorCommand() {
Exception cause = new Exception();
String message = "message";
String command = "command";
MPDException exception = new MPDException(message, command, cause);
assertEquals(exception.getCommand(), command);
assertEquals(exception.getMessage(), message);
assertEquals(exception.getCause(), cause);
}
}
| 1,767 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDSystemClockTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/MPDSystemClockTest.java | package org.bff.javampd;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.time.LocalDateTime;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class MPDSystemClockTest {
private Clock clock;
@BeforeEach
void before() {
clock = new MPDSystemClock();
}
@Test
void now() {
LocalDateTime systemTime = LocalDateTime.now();
LocalDateTime clockTime = clock.now();
int deviation = 1;
assertTrue(
systemTime.minusSeconds(deviation).isBefore(clockTime)
&& systemTime.plusSeconds(deviation).isAfter(clockTime));
}
@Test
void min() {
assertEquals(LocalDateTime.MIN, clock.min());
}
}
| 753 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDSongConverterTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/song/MPDSongConverterTest.java | package org.bff.javampd.song;
import static org.junit.jupiter.api.Assertions.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
class MPDSongConverterTest {
private SongConverter converter;
@BeforeEach
void before() {
converter = new MPDSongConverter();
}
@Test
void defaults() {
var s =
converter
.convertResponseToSongs(List.of("file: Tool/10,000 Days/01 Vicarious.flac"))
.getFirst();
assertAll(
() -> assertNull(s.getArtistName()),
() -> assertNull(s.getAlbumArtist()),
() -> assertNull(s.getAlbumName()),
() -> assertNull(s.getTrack()),
() -> assertNull(s.getName()),
() -> assertNull(s.getTitle()),
() -> assertNull(s.getDate()),
() -> assertNull(s.getGenre()),
() -> assertNull(s.getComment()),
() -> assertEquals(-1, s.getLength()),
() -> assertNull(s.getDiscNumber()),
() -> assertEquals("Tool/10,000 Days/01 Vicarious.flac", s.getFile()));
}
@Test
void terminationNull() {
assertEquals(
"Tool/10,000 Days/01 Vicarious.flac",
converter
.convertResponseToSongs(List.of("file: Tool/10,000 Days/01 Vicarious.flac"))
.getFirst()
.getFile());
}
@Test
void single() {
var s = converter.convertResponseToSongs(createSingleResponse()).getFirst();
assertAll(
() -> assertEquals("Tool", s.getArtistName()),
() -> assertEquals("Tool", s.getAlbumArtist()),
() -> assertEquals("10,000 Days", s.getAlbumName()),
() -> assertEquals("1", s.getTrack()),
() -> assertEquals("Vicarious", s.getName()),
() -> assertEquals("Vicarious", s.getTitle()),
() -> assertEquals("2006-04-28", s.getDate()),
() -> assertEquals("Hard Rock", s.getGenre()),
() -> assertEquals("JavaMPD comment", s.getComment()),
() -> assertEquals(427, s.getLength()),
() -> assertEquals("1", s.getDiscNumber()),
() -> assertEquals("Tool/10,000 Days/01 Vicarious.flac", s.getFile()));
}
@Test
void clearAttributes() {
var songs =
converter.convertResponseToSongs(
Arrays.asList(
"file: Tool/10,000 Days/01 Vicarious.flac",
"Album: 10,000 Days",
"AlbumArtist: Tool",
"Genre: Hard Rock",
"Date: 2006",
"Track: 1",
"""
file: Greta Van Fleet/Anthem of the Peaceful Army/03-When the\
Curtain Falls.flac\
""",
"Artist: Greta Van Fleet",
"Album: Anthem of the Peaceful Army",
"Track: 3"));
var s = songs.get(1);
assertAll(
() -> assertNull(s.getDate()),
() -> assertNull(s.getGenre()),
() -> assertNull(s.getTagMap().get("Genre")));
}
@Test
void multipleFirst() {
var s = new ArrayList<>(converter.convertResponseToSongs(createMultipleResponse()));
var s1 = s.getFirst();
assertAll(
() -> assertEquals("Breaking Benjamin", s1.getArtistName()),
() -> assertEquals("Breaking Benjamin", s1.getAlbumArtist()),
() -> assertEquals("Ember", s1.getAlbumName()),
() -> assertEquals("3", s1.getTrack()),
() -> assertEquals("Red Cold River", s1.getName()),
() -> assertEquals("Red Cold River", s1.getTitle()),
() -> assertEquals("2018", s1.getDate()),
() -> assertEquals("Alternative Metal", s1.getGenre()),
() -> assertEquals(201, s1.getLength()),
() -> assertEquals("1", s1.getDiscNumber()),
() -> assertEquals("Breaking Benjamin/Ember/03. Red Cold River.flac", s1.getFile()));
}
@Test
void multipleMiddle() {
var s = new ArrayList<>(converter.convertResponseToSongs(createMultipleResponse()));
var s2 = s.get(1);
assertAll(
() -> assertEquals("Breaking Benjamin", s2.getArtistName()),
() -> assertNull(s2.getAlbumArtist()),
() -> assertEquals("We Are Not Alone", s2.getAlbumName()),
() -> assertEquals("1", s2.getTrack()),
() -> assertEquals("So Cold", s2.getName()),
() -> assertEquals("So Cold", s2.getTitle()),
() -> assertEquals("2004", s2.getDate()),
() -> assertNull(s2.getGenre()),
() -> assertEquals(273, s2.getLength()),
() -> assertNull(s2.getDiscNumber()),
() -> assertEquals("Breaking Benjamin/We Are Not Alone/01-So Cold.flac", s2.getFile()));
}
@Test
void multipleLast() {
var s = new ArrayList<>(converter.convertResponseToSongs(createMultipleResponse()));
var s3 = s.get(2);
assertAll(
() -> assertEquals("Greta Van Fleet", s3.getArtistName()),
() -> assertEquals("Greta Van Fleet", s3.getAlbumArtist()),
() -> assertEquals("Anthem of the Peaceful Army", s3.getAlbumName()),
() -> assertEquals("2", s3.getTrack()),
() -> assertEquals("The Cold Wind", s3.getName()),
() -> assertEquals("The Cold Wind", s3.getTitle()),
() -> assertEquals("2018", s3.getDate()),
() -> assertEquals("Rock", s3.getGenre()),
() -> assertEquals(197, s3.getLength()),
() -> assertEquals("1", s3.getDiscNumber()),
() ->
assertEquals(
"Greta Van Fleet/Anthem of the Peaceful Army/02 The Cold Wind.flac", s3.getFile()));
}
@Test
void tagMap() {
var s = converter.convertResponseToSongs(createSingleResponse()).getFirst();
var m = s.getTagMap();
assertAll(
() -> assertEquals(24, m.size()),
() -> assertEquals("Tool", m.get("AlbumArtist").getFirst()));
}
@Test
@DisplayName("multiple instances of the same tag")
void tagMapMultipleTags() {
var s = converter.convertResponseToSongs(createSingleResponse()).getFirst();
var p = s.getTagMap().get("Performer");
assertAll(
() -> assertEquals(4, p.size()),
() -> assertEquals("Danny Carey (membranophone)", p.getFirst()),
() -> assertEquals("Justin Chancellor (bass guitar)", p.get(1)),
() -> assertEquals("Adam Jones (guitar)", p.get(2)),
() -> assertEquals("Maynard James Keenan (lead vocals)", p.get(3)));
}
@Test
void tagMapNotTagLine() {
var songs =
converter.convertResponseToSongs(
Arrays.asList("file: Tool/10,000 Days/01 Vicarious.flac", "NotATag"));
var s = songs.getFirst();
assertEquals(0, s.getTagMap().size());
}
@Test
void fileNameList() {
var files =
this.converter.getSongFileNameList(
Arrays.asList(
"file: Tool/10,000 Days/01 Vicarious.flac",
"""
File: Greta Van Fleet/Anthem of the Peaceful Army/02 The Cold\
Wind.flac\
"""));
assertAll(
() -> assertEquals("Tool/10,000 Days/01 Vicarious.flac", files.getFirst()),
() ->
assertEquals(
"Greta Van Fleet/Anthem of the Peaceful Army/02 The Cold Wind.flac", files.get(1)));
}
@Test
void singleSongUnknownResponse() {
var response = new ArrayList<>(createSingleResponse());
response.add("unknown: I dont know");
assertDoesNotThrow(() -> this.converter.convertResponseToSongs(response));
}
@Test
void emptyResponse() {
assertEquals(0, this.converter.convertResponseToSongs(new ArrayList<>()).size());
}
private List<String> createSingleResponse() {
return Arrays.asList(
"file: Tool/10,000 Days/01 Vicarious.flac",
"Last-Modified: 2022-02-19T12:52:00Z",
"Format: 44100:16:2",
"Time: 427",
"duration: 426.680",
"Performer: Danny Carey (membranophone)",
"Performer: Justin Chancellor (bass guitar)",
"Performer: Adam Jones (guitar)",
"Performer: Maynard James Keenan (lead vocals)",
"MUSICBRAINZ_RELEASETRACKID: 73735e2e-5d72-4453-8545-d1e55f2c17ae",
"MUSICBRAINZ_WORKID: 1a1872d9-04cb-4c35-8b83-33eb76d8a45a",
"Album: 10,000 Days",
"AlbumArtist: Tool",
"AlbumArtistSort: Tool",
"Artist: Tool",
"ArtistSort: Tool",
"Disc: 1",
"Genre: Hard Rock",
"Label: Tool Dissectional",
"MUSICBRAINZ_ALBUMARTISTID: 66fc5bf8-daa4-4241-b378-9bc9077939d2",
"MUSICBRAINZ_ALBUMID: 287a7dee-5c59-4bae-9972-b806d8fcb8ed",
"MUSICBRAINZ_ARTISTID: 66fc5bf8-daa4-4241-b378-9bc9077939d2",
"MUSICBRAINZ_TRACKID: a48c9643-d98b-4043-9b24-be04eee0e807",
"OriginalDate: 2006-04-28",
"Title: Vicarious",
"Date: 2006",
"Date: 2006-04-28",
"Comment: JavaMPD comment",
"Track: 1",
"ok");
}
private List<String> createMultipleResponse() {
return Arrays.asList(
"file: Breaking Benjamin/Ember/03. Red Cold River.flac",
"Last-Modified: 2022-02-19T12:50:00Z",
"Format: 44100:16:2",
"Time: 201",
"duration: 200.960",
"Album: Ember",
"Artist: Breaking Benjamin",
"AlbumArtist: Breaking Benjamin",
"Disc: 1",
"Genre: Alternative Metal",
"Title: Red Cold River",
"Date: 2018",
"Track: 3",
"file: Breaking Benjamin/We Are Not Alone/01-So Cold.flac",
"Last-Modified: 2022-02-19T12:50:00Z",
"Format: 44100:16:2",
"Time: 273",
"duration: 273.293",
"Album: We Are Not Alone",
"Artist: Breaking Benjamin",
"Title: So Cold",
"Date: 2004",
"Track: 1",
"file: Greta Van Fleet/Anthem of the Peaceful Army/02 The Cold Wind.flac",
"Last-Modified: 2022-02-19T12:56:00Z",
"Format: 44100:16:2",
"Time: 197",
"duration: 196.546",
"MUSICBRAINZ_RELEASETRACKID: 0081de95-3d88-43b3-9bb4-ff0fde825556",
"AlbumArtistSort: Greta Van Fleet",
"ArtistSort: Greta Van Fleet",
"Disc: 1",
"Label: Republic Records",
"MUSICBRAINZ_ALBUMARTISTID: 0be22557-d8c7-4706-a531-625c4c570162",
"MUSICBRAINZ_ALBUMID: 87fc4a33-8cea-4d1f-a00b-ca02791cc288",
"MUSICBRAINZ_ARTISTID: 0be22557-d8c7-4706-a531-625c4c570162",
"MUSICBRAINZ_TRACKID: f4872a53-bf91-47ff-8115-b9af8e0d9398",
"OriginalDate: 2018-10-19",
"Title: The Cold Wind",
"Artist: Greta Van Fleet",
"Album: Anthem of the Peaceful Army",
"Genre: Rock",
"AlbumArtist: Greta Van Fleet",
"Disc: 1",
"Date: 2018",
"Track: 2",
"OK");
}
}
| 10,711 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDSongDatabaseTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/song/MPDSongDatabaseTest.java | package org.bff.javampd.song;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.bff.javampd.album.MPDAlbum;
import org.bff.javampd.artist.MPDArtist;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
class MPDSongDatabaseTest {
private SongDatabase songDatabase;
private SongSearcher mockedSongSearcher;
@Captor private ArgumentCaptor<String> argumentCaptor;
@Captor private ArgumentCaptor<SongSearcher.ScopeType> scopeCaptor;
@BeforeEach
void setup() {
mockedSongSearcher = mock(SongSearcher.class);
songDatabase = new MPDSongDatabase(mockedSongSearcher);
}
@Test
@DisplayName("Finding album from album object")
void testFindAlbum() {
String testAlbumName = "testAlbumName";
when(mockedSongSearcher.find(SongSearcher.ScopeType.ALBUM, testAlbumName))
.thenReturn(Collections.singletonList(MPDSong.builder().build()));
songDatabase.findAlbum(MPDAlbum.builder(testAlbumName).build());
verify(mockedSongSearcher).find(scopeCaptor.capture(), argumentCaptor.capture());
assertAll(
() -> assertEquals(testAlbumName, argumentCaptor.getValue()),
() -> assertEquals(SongSearcher.ScopeType.ALBUM, scopeCaptor.getValue()));
}
@Test
@DisplayName("Finding album directly by name")
void testFindAlbumByName() {
String testAlbumName = "testAlbumName";
when(mockedSongSearcher.find(SongSearcher.ScopeType.ALBUM, testAlbumName))
.thenReturn(Collections.singletonList(MPDSong.builder().build()));
songDatabase.findAlbum(testAlbumName);
verify(mockedSongSearcher).find(scopeCaptor.capture(), argumentCaptor.capture());
assertAll(
() -> assertEquals(testAlbumName, argumentCaptor.getValue()),
() -> assertEquals(SongSearcher.ScopeType.ALBUM, scopeCaptor.getValue()));
}
@Test
@DisplayName("Finding songs from album and artist")
void testFindAlbumByArtist() {
String testAlbumName = "testAlbumName";
String testArtistName = "testArtistName";
when(mockedSongSearcher.find(SongSearcher.ScopeType.ALBUM, testAlbumName))
.thenReturn(
List.of(
MPDSong.builder().artistName(testArtistName).build(), MPDSong.builder().build()));
var songs =
new ArrayList<>(
songDatabase.findAlbumByArtist(
new MPDArtist(testArtistName), MPDAlbum.builder(testAlbumName).build()));
verify(mockedSongSearcher).find(scopeCaptor.capture(), argumentCaptor.capture());
assertAll(
() -> assertEquals(testAlbumName, argumentCaptor.getValue()),
() -> assertEquals(SongSearcher.ScopeType.ALBUM, scopeCaptor.getValue()),
() -> assertEquals(1, songs.size()),
() -> assertEquals(testArtistName, songs.getFirst().getArtistName()));
}
}
| 3,221 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDSongTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/song/MPDSongTest.java | package org.bff.javampd.song;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.junit.jupiter.api.Test;
class MPDSongTest {
@Test
void equalsContract() {
EqualsVerifier.simple().forClass(MPDSong.class).verify();
}
@Test
void testCompareToLessThanZero() {
MPDSong song1 = MPDSong.builder().file("file1").title("song1").build();
MPDSong song2 = MPDSong.builder().file("file2").title("song2").build();
assertTrue(song1.compareTo(song2) < 0);
}
@Test
void testCompareToGreaterThanZero() {
MPDSong song1 = MPDSong.builder().file("file2").title("song2").build();
MPDSong song2 = MPDSong.builder().file("file1").title("song1").build();
assertTrue(song1.compareTo(song2) > 0);
}
@Test
void testCompareToEquals() {
MPDSong song1 = MPDSong.builder().file("file1").title("song1").build();
MPDSong song2 = MPDSong.builder().file("file1").title("song1").build();
assertEquals(0, song1.compareTo(song2));
}
@Test
void testToString() {
String file = "file1";
MPDSong song = MPDSong.builder().file("file1").title("song1").build();
assertThat(
song.toString(),
is(
equalTo(
"""
MPDSong(name=song1, title=song1, albumArtist=null,\
artistName=null, albumName=null, file=file1, genre=null,\
comment=null, date=null, discNumber=null, track=null,\
length=0, tagMap=null)\
""")));
}
@Test
void testGetName() {
MPDSong song = MPDSong.builder().file("file1").title("song1").name("name1").build();
assertEquals("name1", song.getName());
}
@Test
void testGetNameNullName() {
MPDSong song = MPDSong.builder().file("file1").title("song1").build();
assertEquals("song1", song.getName());
}
@Test
void testGetNameEmptyName() {
MPDSong song = MPDSong.builder().file("file1").title("song1").name("").build();
assertEquals("song1", song.getName());
}
}
| 2,249 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDSongSearcherTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/song/MPDSongSearcherTest.java | package org.bff.javampd.song;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.*;
import java.util.ArrayList;
import org.bff.javampd.command.CommandExecutor;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Captor;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
class MPDSongSearcherTest {
private SongSearcher songSearcher;
private CommandExecutor mockedCommandExecuter;
private SearchProperties searchProperties;
@Captor private ArgumentCaptor<String> commandArgumentCaptor;
@Captor private ArgumentCaptor<String> paramArgumentCaptor;
@BeforeEach
void setup() {
searchProperties = new SearchProperties();
SongConverter mockedSongConverter = mock(SongConverter.class);
mockedCommandExecuter = mock(CommandExecutor.class);
songSearcher =
new MPDSongSearcher(searchProperties, mockedCommandExecuter, mockedSongConverter);
}
@Test
void searchAny() {
var search = "test";
when(mockedCommandExecuter.sendCommand(any(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.searchAny(search);
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("search", commandArgumentCaptor.getValue()),
() ->
assertEquals("(any contains '%s')".formatted(search), paramArgumentCaptor.getValue()));
}
@Test
void searchByScopeAndString() {
var search = "test";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.search(SongSearcher.ScopeType.ALBUM, search);
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("search", commandArgumentCaptor.getValue()),
() ->
assertEquals(
"(album contains '%s')".formatted(search), paramArgumentCaptor.getValue()));
}
@Test
void searchBySingleCriteria() {
var search = "test";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.search(new SearchCriteria(SongSearcher.ScopeType.TITLE, search));
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("search", commandArgumentCaptor.getValue()),
() ->
assertEquals(
"(title contains '%s')".formatted(search), paramArgumentCaptor.getValue()));
}
@Test
void searchByMultipleCriteria() {
var searchArtist = "Tool";
var searchTitle = "Vic";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.search(
new SearchCriteria(SongSearcher.ScopeType.TITLE, searchTitle),
new SearchCriteria(SongSearcher.ScopeType.ARTIST, searchArtist));
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("search", commandArgumentCaptor.getValue()),
() ->
assertEquals(
"((title contains '%s') AND (artist contains '%s'))"
.formatted(searchTitle, searchArtist),
paramArgumentCaptor.getValue()));
}
@Test
void findAny() {
var find = "test";
when(mockedCommandExecuter.sendCommand(
searchProperties.getFind(), generateParams(SongSearcher.ScopeType.ANY, find)))
.thenReturn(new ArrayList<>());
this.songSearcher.findAny(find);
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("find", commandArgumentCaptor.getValue()),
() -> assertEquals("(any == '%s')".formatted(find), paramArgumentCaptor.getValue()));
}
@Test
void findByScopeAndString() {
var find = "test";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.find(SongSearcher.ScopeType.ALBUM, find);
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("find", commandArgumentCaptor.getValue()),
() -> assertEquals("(album == '%s')".formatted(find), paramArgumentCaptor.getValue()));
}
@Test
void findBySingleCriteria() {
var find = "test";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.find(new SearchCriteria(SongSearcher.ScopeType.TITLE, find));
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("find", commandArgumentCaptor.getValue()),
() -> assertEquals("(title == '%s')".formatted(find), paramArgumentCaptor.getValue()));
}
@Test
void findByMultipleCriteria() {
var findArtist = "Tool";
var findTitle = "Vicarious";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.find(
new SearchCriteria(SongSearcher.ScopeType.TITLE, findTitle),
new SearchCriteria(SongSearcher.ScopeType.ARTIST, findArtist));
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("find", commandArgumentCaptor.getValue()),
() ->
assertEquals(
"((title == '%s') AND (artist == '%s'))".formatted(findTitle, findArtist),
paramArgumentCaptor.getValue()));
}
@Test
void testFindEscapeSingleQuote() {
var find = "I Ain't Mad at Cha";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.find(new SearchCriteria(SongSearcher.ScopeType.TITLE, find));
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertEquals("find", commandArgumentCaptor.getValue()),
() ->
assertThat(
paramArgumentCaptor.getValue(),
is(equalTo("(title == 'I Ain\\\\'t Mad at Cha')"))));
}
@Test
void testSearchEscapeSingleQuote() {
var search = "Mama's Just a Little Girl";
when(mockedCommandExecuter.sendCommand(anyString(), anyString())).thenReturn(new ArrayList<>());
this.songSearcher.search(new SearchCriteria(SongSearcher.ScopeType.TITLE, search));
verify(mockedCommandExecuter)
.sendCommand(commandArgumentCaptor.capture(), paramArgumentCaptor.capture());
assertAll(
() -> assertThat(commandArgumentCaptor.getValue(), is(equalTo("search"))),
() ->
assertThat(
paramArgumentCaptor.getValue(),
is(equalTo("(title contains 'Mama\\\\'s Just a Little Girl')"))));
}
private String generateParams(SongSearcher.ScopeType scopeType, String criteria) {
return "(%s == '%s')".formatted(scopeType.getType(), criteria);
}
}
| 7,699 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
SearchCriteriaTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/song/SearchCriteriaTest.java | package org.bff.javampd.song;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.junit.jupiter.api.Test;
class SearchCriteriaTest {
@Test
void testEqualsAndHash() {
EqualsVerifier.simple().forClass(SearchCriteria.class).verify();
}
}
| 252 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
SongProcessorTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/song/SongProcessorTest.java | package org.bff.javampd.song;
import static org.bff.javampd.song.SongProcessor.ALBUM_ARTIST;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
class SongProcessorTest {
@Test
@DisplayName("looks up processor for different cases")
void lookupLine() {
assertAll(
() -> assertEquals(ALBUM_ARTIST, SongProcessor.lookup("albumartist: Tool")),
() -> assertEquals(ALBUM_ARTIST, SongProcessor.lookup("ALBUMARTIST: Tool")),
() -> assertEquals(ALBUM_ARTIST, SongProcessor.lookup("AlbumArtist: Tool")));
}
}
| 682 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MonitorPropertiesTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/monitor/MonitorPropertiesTest.java | package org.bff.javampd.monitor;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class MonitorPropertiesTest {
private MonitorProperties monitorProperties;
@BeforeEach
void setUp() {
monitorProperties = new MonitorProperties();
}
@Test
void testGetOutputDelay() {
assertEquals(60, monitorProperties.getOutputDelay());
}
@Test
void testGetConnectionDelay() {
assertEquals(5, monitorProperties.getConnectionDelay());
}
@Test
void testGetPlaylistDelay() {
assertEquals(2, monitorProperties.getPlaylistDelay());
}
@Test
void testGetPlayerDelay() {
assertEquals(0, monitorProperties.getPlayerDelay());
}
@Test
void testGetErrorDelay() {
assertEquals(0, monitorProperties.getErrorDelay());
}
@Test
void testGetTrackDelay() {
assertEquals(0, monitorProperties.getTrackDelay());
}
@Test
void testGetMonitorDelay() {
assertEquals(1, monitorProperties.getMonitorDelay());
}
@Test
void testGetExceptionDelay() {
assertEquals(5, monitorProperties.getExceptionDelay());
}
}
| 1,154 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDVolumeMonitorTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/monitor/MPDVolumeMonitorTest.java | package org.bff.javampd.monitor;
import static org.junit.jupiter.api.Assertions.*;
import org.bff.javampd.player.VolumeChangeListener;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class MPDVolumeMonitorTest {
private VolumeMonitor volumeMonitor;
@BeforeEach
void setUp() {
volumeMonitor = new MPDVolumeMonitor();
}
@Test
void testProcessResponseStatus() {
final int[] volume = {0};
volumeMonitor.addVolumeChangeListener(event -> volume[0] = event.getVolume());
volumeMonitor.processResponseStatus("volume: 1");
volumeMonitor.checkStatus();
assertEquals(1, volume[0]);
}
@Test
void testProcessResponseStatusSameVolume() {
final boolean[] eventFired = {false};
volumeMonitor.addVolumeChangeListener(event -> eventFired[0] = true);
volumeMonitor.processResponseStatus("volume: 1");
volumeMonitor.checkStatus();
assertTrue(eventFired[0]);
eventFired[0] = false;
volumeMonitor.processResponseStatus("volume: 1");
volumeMonitor.checkStatus();
assertFalse(eventFired[0]);
}
@Test
void testProcessResponseStatusNotVolume() {
final boolean[] eventFired = {false};
volumeMonitor.addVolumeChangeListener(event -> eventFired[0] = true);
volumeMonitor.processResponseStatus("bogus: 1");
volumeMonitor.checkStatus();
assertFalse(eventFired[0]);
}
@Test
void testRemoveVolumeChangeListener() {
final int[] volume = {0};
VolumeChangeListener volumeChangeListener = event -> volume[0] = event.getVolume();
volumeMonitor.addVolumeChangeListener(volumeChangeListener);
volumeMonitor.processResponseStatus("volume: 1");
volumeMonitor.checkStatus();
assertEquals(1, volume[0]);
volumeMonitor.removeVolumeChangeListener(volumeChangeListener);
volumeMonitor.processResponseStatus("volume: 2");
assertEquals(1, volume[0]);
}
@Test
void testResetVolume() {
String line = "volume: 1";
final boolean[] eventFired = {false};
volumeMonitor.addVolumeChangeListener(event -> eventFired[0] = true);
volumeMonitor.processResponseStatus(line);
volumeMonitor.checkStatus();
assertTrue(eventFired[0]);
volumeMonitor.reset();
eventFired[0] = false;
volumeMonitor.processResponseStatus(line);
volumeMonitor.checkStatus();
assertTrue(eventFired[0]);
}
}
| 2,366 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
MPDPlayerMonitorTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/monitor/MPDPlayerMonitorTest.java | package org.bff.javampd.monitor;
import static org.junit.jupiter.api.Assertions.*;
import org.bff.javampd.player.PlayerBasicChangeEvent;
import org.bff.javampd.player.PlayerBasicChangeListener;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class MPDPlayerMonitorTest {
private PlayerMonitor playerMonitor;
@BeforeEach
void setUp() {
playerMonitor = new MPDPlayerMonitor();
}
@Test
void testAddPlayerChangeListener() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_STARTED, changeEvent[0].getStatus());
}
@Test
void testRemovePlayerChangeListener() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
PlayerBasicChangeListener playerBasicChangeListener = event -> changeEvent[0] = event;
playerMonitor.addPlayerChangeListener(playerBasicChangeListener);
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_STARTED, changeEvent[0].getStatus());
changeEvent[0] = null;
playerMonitor.removePlayerChangeListener(playerBasicChangeListener);
playerMonitor.processResponseStatus("state: stop");
playerMonitor.checkStatus();
assertNull(changeEvent[0]);
}
@Test
void testPlayerStarted() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus("state: stop");
playerMonitor.checkStatus();
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_STARTED, changeEvent[0].getStatus());
}
@Test
void testPlayerStopped() {
processStoppedTest("state: play");
}
@Test
void testPlayerInvalidStatus() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus("state: bogus");
playerMonitor.checkStatus();
assertNull(changeEvent[0]);
}
@Test
void testPlayerInvalidStatusAfterValidStatus() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
assertNotNull(changeEvent[0]);
changeEvent[0] = null;
playerMonitor.processResponseStatus("state: bogus");
playerMonitor.checkStatus();
assertNull(changeEvent[0]);
}
@Test
void testPlayerPaused() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
playerMonitor.processResponseStatus("state: pause");
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_PAUSED, changeEvent[0].getStatus());
}
@Test
void testPlayerPausedtoStopped() {
processStoppedTest("state: pause");
}
private void processStoppedTest(String from) {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus(from);
playerMonitor.checkStatus();
playerMonitor.processResponseStatus("state: stop");
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_STOPPED, changeEvent[0].getStatus());
}
@Test
void testPlayerUnPaused() {
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
playerMonitor.processResponseStatus("state: pause");
playerMonitor.checkStatus();
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_UNPAUSED, changeEvent[0].getStatus());
}
@Test
void testGetStatus() {
playerMonitor.processResponseStatus("state: play");
playerMonitor.checkStatus();
assertEquals(PlayerStatus.STATUS_PLAYING, playerMonitor.getStatus());
}
@Test
void testResetState() {
String line = "state: play";
final PlayerBasicChangeEvent[] changeEvent = new PlayerBasicChangeEvent[1];
playerMonitor.addPlayerChangeListener(event -> changeEvent[0] = event);
playerMonitor.processResponseStatus(line);
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_STARTED, changeEvent[0].getStatus());
playerMonitor.reset();
changeEvent[0] = null;
playerMonitor.processResponseStatus(line);
playerMonitor.checkStatus();
assertEquals(PlayerBasicChangeEvent.Status.PLAYER_STARTED, changeEvent[0].getStatus());
}
}
| 5,344 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
StandAloneMonitorThreadTest.java | /FileExtraction/Java_unseen/finnyb_javampd/src/test/java/org/bff/javampd/monitor/StandAloneMonitorThreadTest.java | package org.bff.javampd.monitor;
import static org.awaitility.Awaitility.await;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import org.bff.javampd.MPDException;
import org.bff.javampd.server.ServerStatus;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
class StandAloneMonitorThreadTest {
@Mock private ServerStatus serverStatus;
@Mock private ConnectionMonitor connectionMonitor;
private StandAloneMonitorThread standAloneMonitorThread;
@AfterEach
void tearDown() {
standAloneMonitorThread.setStopped(true);
}
@Test
void testInitialStatus() {
final boolean[] called = new boolean[1];
List<String> returnStatus1 = new ArrayList<>();
returnStatus1.add("volume: 1");
List<String> returnStatus2 = new ArrayList<>();
returnStatus2.add("volume: 2");
when(serverStatus.getStatus()).thenReturn(returnStatus1).thenReturn(returnStatus2);
VolumeMonitor volumeMonitor = new MPDVolumeMonitor();
volumeMonitor.addVolumeChangeListener(event -> called[0] = true);
createMonitor(0, 0).addMonitor(new ThreadedMonitor(volumeMonitor, 0));
runMonitor();
await().until(() -> called[0]);
}
@Test
void testAddMonitor() {
final boolean[] called = new boolean[1];
Monitor monitor = () -> called[0] = true;
createMonitor(0, 0).addMonitor(new ThreadedMonitor(monitor, 0));
runMonitor();
await().until(() -> called[0]);
}
@Test
void testRemoveMonitor() {
final boolean[] called = new boolean[1];
Monitor monitor = () -> called[0] = true;
ThreadedMonitor threadedMonitor = new ThreadedMonitor(monitor, 0);
StandAloneMonitorThread monitorThread = createMonitor(0, 0);
monitorThread.addMonitor(threadedMonitor);
runMonitor();
await().until(() -> called[0]);
monitorThread.removeMonitor(threadedMonitor);
called[0] = false;
await().until(() -> !called[0]);
}
@Test
void testRunInterupted() {
createMonitor(1, 1);
Thread thread = new Thread(standAloneMonitorThread);
thread.start();
thread.interrupt();
await().until(() -> !thread.isAlive());
}
@Test
void testRunConnectionErrorWithInterrupt() {
final int[] count = {0};
Monitor monitor =
() -> {
++count[0];
throw new MPDException("Test Exception");
};
createMonitor(0, 5000).addMonitor(new ThreadedMonitor(monitor, 0));
Thread thread = runMonitor();
thread.interrupt();
await().until(() -> !thread.isAlive());
}
@Test
void testRunError() {
final int[] count = {0};
Monitor monitor =
() -> {
++count[0];
throw new MPDException("Test Exception");
};
when(connectionMonitor.isConnected()).thenReturn(true);
createMonitor(0, 0).addMonitor(new ThreadedMonitor(monitor, 0));
runMonitor();
await().until(() -> count[0] > 1);
}
@Test
void testRunConnectionError() {
Monitor monitor =
() -> {
throw new MPDException();
};
when(connectionMonitor.isConnected()).thenThrow(new MPDException());
createMonitor(0, 0).addMonitor(new ThreadedMonitor(monitor, 0));
assertThrows(MPDException.class, () -> standAloneMonitorThread.run());
}
@Test
void testLoadInitialStatusException() {
when(serverStatus.getStatus()).thenThrow(new MPDException());
createMonitor(0, 0);
assertThrows(MPDException.class, () -> standAloneMonitorThread.run());
}
@Test
void testIsStopped() {
createMonitor(0, 0);
runMonitor();
await().until(() -> !standAloneMonitorThread.isDone());
standAloneMonitorThread.setStopped(true);
await().until(() -> standAloneMonitorThread.isDone());
}
@Test
void testIsLoaded() {
createMonitor(0, 0);
assertFalse(standAloneMonitorThread.isInitialized());
runMonitor();
await().until(() -> standAloneMonitorThread.isInitialized());
}
private StandAloneMonitorThread createMonitor(int delay, int exceptionDelay) {
standAloneMonitorThread =
new StandAloneMonitorThread(serverStatus, connectionMonitor, delay, exceptionDelay);
return standAloneMonitorThread;
}
private Thread runMonitor() {
Thread thread = new Thread(standAloneMonitorThread);
thread.start();
return thread;
}
}
| 4,622 | Java | .java | finnyb/javampd | 35 | 21 | 13 | 2013-11-21T15:24:25Z | 2024-04-27T11:03:48Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.