Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,169 | clientTransportService.submitRequest(node, "benchmark", message, new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
}
}).txGet(); | 0true
| src_test_java_org_elasticsearch_benchmark_transport_TransportBenchmark.java |
1,246 | addOperation(operations, new Runnable() {
public void run() {
IMap map = hazelcast.getMap("myMap");
Map localMap = new HashMap();
for (int i = 0; i < 10; i++) {
localMap.put(random.nextInt(SIZE), new Customer(random.nextInt(100), String.valueOf(random.nextInt(10000))));
}
map.putAll(localMap);
}
}, 1); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
377 | public class TransportPutRepositoryAction extends TransportMasterNodeOperationAction<PutRepositoryRequest, PutRepositoryResponse> {
private final RepositoriesService repositoriesService;
@Inject
public TransportPutRepositoryAction(Settings settings, TransportService transportService, ClusterService clusterService,
RepositoriesService repositoriesService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
this.repositoriesService = repositoriesService;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return PutRepositoryAction.NAME;
}
@Override
protected PutRepositoryRequest newRequest() {
return new PutRepositoryRequest();
}
@Override
protected PutRepositoryResponse newResponse() {
return new PutRepositoryResponse();
}
@Override
protected ClusterBlockException checkBlock(PutRepositoryRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
}
@Override
protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener<PutRepositoryResponse> listener) throws ElasticsearchException {
repositoriesService.registerRepository(new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", request.name(), request.type())
.settings(request.settings())
.masterNodeTimeout(request.masterNodeTimeout())
.ackTimeout(request.timeout()), new ActionListener<RepositoriesService.RegisterRepositoryResponse>() {
@Override
public void onResponse(RepositoriesService.RegisterRepositoryResponse response) {
listener.onResponse(new PutRepositoryResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
} | 1no label
| src_main_java_org_elasticsearch_action_admin_cluster_repositories_put_TransportPutRepositoryAction.java |
1,132 | public class NativePayloadSumScoreScript extends AbstractSearchScript {
public static final String NATIVE_PAYLOAD_SUM_SCRIPT_SCORE = "native_payload_sum_script_score";
String field = null;
String[] terms = null;
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativePayloadSumScoreScript(params);
}
}
private NativePayloadSumScoreScript(Map<String, Object> params) {
params.entrySet();
terms = new String[params.size()];
field = params.keySet().iterator().next();
Object o = params.get(field);
ArrayList<String> arrayList = (ArrayList<String>) o;
terms = arrayList.toArray(new String[arrayList.size()]);
}
@Override
public Object run() {
float score = 0;
IndexField indexField = indexLookup().get(field);
for (int i = 0; i < terms.length; i++) {
IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS | IndexLookup.FLAG_CACHE);
for (TermPosition pos : indexFieldTerm) {
score += pos.payloadAsFloat(0);
}
}
return score;
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_scripts_score_script_NativePayloadSumScoreScript.java |
212 | public class OConsoleDatabaseApp extends OrientConsole implements OCommandOutputListener, OProgressListener {
protected ODatabaseDocument currentDatabase;
protected String currentDatabaseName;
protected ORecordInternal<?> currentRecord;
protected List<OIdentifiable> currentResultSet;
protected OServerAdmin serverAdmin;
private int lastPercentStep;
private String currentDatabaseUserName;
private String currentDatabaseUserPassword;
public OConsoleDatabaseApp(final String[] args) {
super(args);
}
public static void main(final String[] args) {
int result = 0;
try {
boolean tty = false;
try {
if (setTerminalToCBreak())
tty = true;
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
stty("echo");
} catch (Exception e) {
}
}
});
} catch (Exception e) {
}
final OConsoleDatabaseApp console = new OConsoleDatabaseApp(args);
if (tty)
console.setReader(new TTYConsoleReader());
result = console.run();
} finally {
try {
stty("echo");
} catch (Exception e) {
}
}
System.exit(result);
}
@Override
protected boolean isCollectingCommands(final String iLine) {
return iLine.startsWith("js");
}
@Override
protected void onBefore() {
super.onBefore();
currentResultSet = new ArrayList<OIdentifiable>();
OGlobalConfiguration.STORAGE_KEEP_OPEN.setValue(false);
// DISABLE THE NETWORK AND STORAGE TIMEOUTS
OGlobalConfiguration.STORAGE_LOCK_TIMEOUT.setValue(0);
OGlobalConfiguration.NETWORK_LOCK_TIMEOUT.setValue(0);
OGlobalConfiguration.CLIENT_CHANNEL_MIN_POOL.setValue(1);
OGlobalConfiguration.CLIENT_CHANNEL_MAX_POOL.setValue(2);
properties.put("limit", "20");
properties.put("width", "132");
properties.put("debug", "false");
properties.put("maxBinaryDisplay", "160");
properties.put("verbose", "2");
OCommandManager.instance().registerExecutor(OCommandScript.class, OCommandExecutorScript.class);
}
@Override
protected void onAfter() {
super.onAfter();
Orient.instance().shutdown();
}
@ConsoleCommand(aliases = { "use database" }, description = "Connect to a database or a remote Server instance")
public void connect(
@ConsoleParameter(name = "url", description = "The url of the remote server or the database to connect to in the format '<mode>:<path>'") String iURL,
@ConsoleParameter(name = "user", description = "User name") String iUserName,
@ConsoleParameter(name = "password", description = "User password", optional = true) String iUserPassword) throws IOException {
disconnect();
if (iUserPassword == null) {
message("Enter password: ");
final BufferedReader br = new BufferedReader(new InputStreamReader(this.in));
iUserPassword = br.readLine();
message("\n");
}
currentDatabaseUserName = iUserName;
currentDatabaseUserPassword = iUserPassword;
if (iURL.contains("/")) {
// OPEN DB
message("Connecting to database [" + iURL + "] with user '" + iUserName + "'...");
currentDatabase = new ODatabaseDocumentTx(iURL);
if (currentDatabase == null)
throw new OException("Database " + iURL + " not found");
currentDatabase.registerListener(new OConsoleDatabaseListener(this));
currentDatabase.open(iUserName, iUserPassword);
currentDatabaseName = currentDatabase.getName();
// if (currentDatabase.getStorage() instanceof OStorageProxy)
// serverAdmin = new OServerAdmin(currentDatabase.getStorage().getURL());
} else {
// CONNECT TO REMOTE SERVER
message("Connecting to remote Server instance [" + iURL + "] with user '" + iUserName + "'...");
serverAdmin = new OServerAdmin(iURL).connect(iUserName, iUserPassword);
currentDatabase = null;
currentDatabaseName = null;
}
message("OK");
}
@ConsoleCommand(aliases = { "close database" }, description = "Disconnect from the current database")
public void disconnect() {
if (serverAdmin != null) {
message("\nDisconnecting from remote server [" + serverAdmin.getURL() + "]...");
serverAdmin.close(true);
serverAdmin = null;
message("\nOK");
}
if (currentDatabase != null) {
message("\nDisconnecting from the database [" + currentDatabaseName + "]...");
final OStorage stg = Orient.instance().getStorage(currentDatabase.getURL());
currentDatabase.close();
// FORCE CLOSING OF STORAGE: THIS CLEAN UP REMOTE CONNECTIONS
if (stg != null)
stg.close(true);
currentDatabase = null;
currentDatabaseName = null;
currentRecord = null;
message("\nOK");
}
}
@ConsoleCommand(description = "Create a new database")
public void createDatabase(
@ConsoleParameter(name = "database-url", description = "The url of the database to create in the format '<mode>:<path>'") String iDatabaseURL,
@ConsoleParameter(name = "user", description = "Server administrator name") String iUserName,
@ConsoleParameter(name = "password", description = "Server administrator password") String iUserPassword,
@ConsoleParameter(name = "storage-type", description = "The type of the storage. 'local' and 'plocal' for disk-based databases and 'memory' for in-memory database") String iStorageType,
@ConsoleParameter(name = "db-type", optional = true, description = "The type of the database used between 'document' and 'graph'. By default is graph.") String iDatabaseType)
throws IOException {
if (iDatabaseType == null)
iDatabaseType = "graph";
message("\nCreating database [" + iDatabaseURL + "] using the storage type [" + iStorageType + "]...");
currentDatabaseUserName = iUserName;
currentDatabaseUserPassword = iUserPassword;
if (iDatabaseURL.startsWith(OEngineRemote.NAME)) {
// REMOTE CONNECTION
final String dbURL = iDatabaseURL.substring(OEngineRemote.NAME.length() + 1);
new OServerAdmin(dbURL).connect(iUserName, iUserPassword).createDatabase(iDatabaseType, iStorageType).close();
connect(iDatabaseURL, OUser.ADMIN, OUser.ADMIN);
} else {
// LOCAL CONNECTION
if (iStorageType != null) {
// CHECK STORAGE TYPE
if (!iDatabaseURL.toLowerCase().startsWith(iStorageType.toLowerCase()))
throw new IllegalArgumentException("Storage type '" + iStorageType + "' is different by storage type in URL");
}
currentDatabase = Orient.instance().getDatabaseFactory().createDatabase(iDatabaseType, iDatabaseURL);
currentDatabase.create();
currentDatabaseName = currentDatabase.getName();
}
message("\nDatabase created successfully.");
message("\n\nCurrent database is: " + iDatabaseURL);
}
@ConsoleCommand(description = "List all the databases available on the connected server")
public void listDatabases() throws IOException {
if (serverAdmin != null) {
final Map<String, String> databases = serverAdmin.listDatabases();
message("\nFound %d databases:\n", databases.size());
for (Entry<String, String> database : databases.entrySet()) {
message("\n* %s (%s)", database.getKey(), database.getValue().substring(0, database.getValue().indexOf(":")));
}
} else {
message("\nNot connected to the Server instance. You've to connect to the Server using server's credentials (look at orientdb-*server-config.xml file)");
}
out.println();
}
@ConsoleCommand(description = "Reload the database schema")
public void reloadSchema() throws IOException {
message("\nreloading database schema...");
updateDatabaseInfo();
message("\n\nDone.");
}
@ConsoleCommand(description = "Create a new data-segment in the current database.")
public void createDatasegment(
@ConsoleParameter(name = "datasegment-name", description = "The name of the data segment to create") final String iName,
@ConsoleParameter(name = "datasegment-location", description = "The directory where to place the files", optional = true) final String iLocation) {
checkForDatabase();
if (iLocation != null)
message("\nCreating data-segment [" + iName + "] in database " + currentDatabaseName + " in path: " + iLocation + "...");
else
message("\nCreating data-segment [" + iName + "] in database directory...");
currentDatabase.addDataSegment(iName, iLocation);
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Create a new cluster in the current database. The cluster can be physical or memory")
public void createCluster(
@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nCluster created correctly with id #%d\n", true);
updateDatabaseInfo();
}
@ConsoleCommand(description = "Remove a cluster in the current database. The cluster can be physical or memory")
public void dropCluster(
@ConsoleParameter(name = "cluster-name", description = "The name or the id of the cluster to remove") String iClusterName) {
checkForDatabase();
message("\nDropping cluster [" + iClusterName + "] in database " + currentDatabaseName + "...");
boolean result = currentDatabase.dropCluster(iClusterName, true);
if (!result) {
// TRY TO GET AS CLUSTER ID
try {
int clusterId = Integer.parseInt(iClusterName);
if (clusterId > -1) {
result = currentDatabase.dropCluster(clusterId, true);
}
} catch (Exception e) {
}
}
if (result)
message("\nCluster correctly removed");
else
message("\nCannot find the cluster to remove");
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Alters a cluster in the current database. The cluster can be physical or memory")
public void alterCluster(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("alter", iCommandText, "\nCluster updated successfully\n", false);
updateDatabaseInfo();
}
@ConsoleCommand(description = "Shows the holes in current storage")
public void showHoles() throws IOException {
checkForDatabase();
if (!(currentDatabase.getStorage() instanceof OStorageLocal)) {
message("\nError: cannot show holes in databases different by local");
return;
}
final OStorageLocal storage = (OStorageLocal) currentDatabase.getStorage();
final List<ODataHoleInfo> result = storage.getHolesList();
message("\nFound " + result.size() + " holes in database " + currentDatabaseName + ":");
message("\n+----------------------+----------------------+");
message("\n| Position | Size (in bytes) |");
message("\n+----------------------+----------------------+");
long size = 0;
for (ODataHoleInfo ppos : result) {
message("\n| %20d | %20d |", ppos.dataOffset, ppos.size);
size += ppos.size;
}
message("\n+----------------------+----------------------+");
message("\n| %20s | %20s |", "Total hole size", OFileUtils.getSizeAsString(size));
message("\n+----------------------+----------------------+");
}
@ConsoleCommand(description = "Begins a transaction. All the changes will remain local")
public void begin() throws IOException {
checkForDatabase();
if (currentDatabase.getTransaction().isActive()) {
message("\nError: an active transaction is currently open (id=" + currentDatabase.getTransaction().getId()
+ "). Commit or rollback before starting a new one.");
return;
}
currentDatabase.begin();
message("\nTransaction " + currentDatabase.getTransaction().getId() + " is running");
}
@ConsoleCommand(description = "Commits transaction changes to the database")
public void commit() throws IOException {
checkForDatabase();
if (!currentDatabase.getTransaction().isActive()) {
message("\nError: no active transaction is currently open.");
return;
}
final long begin = System.currentTimeMillis();
final int txId = currentDatabase.getTransaction().getId();
currentDatabase.commit();
message("\nTransaction " + txId + " has been committed in " + (System.currentTimeMillis() - begin) + "ms");
}
@ConsoleCommand(description = "Rolls back transaction changes to the previous state")
public void rollback() throws IOException {
checkForDatabase();
if (!currentDatabase.getTransaction().isActive()) {
message("\nError: no active transaction is running right now.");
return;
}
final long begin = System.currentTimeMillis();
final int txId = currentDatabase.getTransaction().getId();
currentDatabase.rollback();
message("\nTransaction " + txId + " has been rollbacked in " + (System.currentTimeMillis() - begin) + "ms");
}
@ConsoleCommand(splitInWords = false, description = "Truncate the class content in the current database")
public void truncateClass(@ConsoleParameter(name = "text", description = "The name of the class to truncate") String iCommandText) {
sqlCommand("truncate", iCommandText, "\nTruncated %d record(s) in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Truncate the cluster content in the current database")
public void truncateCluster(
@ConsoleParameter(name = "text", description = "The name of the class to truncate") String iCommandText) {
sqlCommand("truncate", iCommandText, "\nTruncated %d record(s) in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Truncate a record deleting it at low level")
public void truncateRecord(@ConsoleParameter(name = "text", description = "The record(s) to truncate") String iCommandText) {
sqlCommand("truncate", iCommandText, "\nTruncated %d record(s) in %f sec(s).\n", true);
}
@ConsoleCommand(description = "Load a record in memory using passed fetch plan")
public void loadRecord(
@ConsoleParameter(name = "record-id", description = "The unique Record Id of the record to load. If you do not have the Record Id, execute a query first") String iRecordId,
@ConsoleParameter(name = "fetch-plan", description = "The fetch plan to load the record with") String iFetchPlan) {
loadRecordInternal(iRecordId, iFetchPlan);
}
@ConsoleCommand(description = "Load a record in memory and set it as the current")
public void loadRecord(
@ConsoleParameter(name = "record-id", description = "The unique Record Id of the record to load. If you do not have the Record Id, execute a query first") String iRecordId) {
loadRecordInternal(iRecordId, null);
}
@ConsoleCommand(description = "Reloads a record using passed fetch plan")
public void reloadRecord(
@ConsoleParameter(name = "record-id", description = "The unique Record Id of the record to load. If you do not have the Record Id, execute a query first") String iRecordId,
@ConsoleParameter(name = "fetch-plan", description = "The fetch plan to load the record with") String iFetchPlan) {
reloadRecordInternal(iRecordId, iFetchPlan);
}
@ConsoleCommand(description = "Reload a record and set it as the current one")
public void reloadRecord(
@ConsoleParameter(name = "record-id", description = "The unique Record Id of the record to load. If you do not have the Record Id, execute a query first") String iRecordId) {
reloadRecordInternal(iRecordId, null);
}
@ConsoleCommand(splitInWords = false, description = "Explain how a command is executed profiling it")
public void explain(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
Object result = sqlCommand("explain", iCommandText, "\nProfiled command '%s' in %f sec(s):\n", true);
if (result != null && result instanceof ODocument) {
message(((ODocument) result).toJSON());
}
}
@ConsoleCommand(splitInWords = false, description = "Executes a command inside a transaction")
public void transactional(@ConsoleParameter(name = "command-text", description = "The command to execute") String iCommandText) {
sqlCommand("transactional", iCommandText, "\nResult: '%s'. Executed in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Insert a new record into the database")
public void insert(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("insert", iCommandText, "\nInserted record '%s' in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Create a new vertex into the database")
public void createVertex(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nCreated vertex '%s' in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Create a new edge into the database")
public void createEdge(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nCreated edge '%s' in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Update records in the database")
public void update(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("update", iCommandText, "\nUpdated %d record(s) in %f sec(s).\n", true);
updateDatabaseInfo();
currentDatabase.getLevel1Cache().invalidate();
currentDatabase.getLevel2Cache().clear();
}
@ConsoleCommand(splitInWords = false, description = "Delete records from the database")
public void delete(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("delete", iCommandText, "\nDelete %d record(s) in %f sec(s).\n", true);
updateDatabaseInfo();
currentDatabase.getLevel1Cache().invalidate();
currentDatabase.getLevel2Cache().clear();
}
@ConsoleCommand(splitInWords = false, description = "Grant privileges to a role")
public void grant(@ConsoleParameter(name = "text", description = "Grant command") String iCommandText) {
sqlCommand("grant", iCommandText, "\nPrivilege granted to the role: %s\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Revoke privileges to a role")
public void revoke(@ConsoleParameter(name = "text", description = "Revoke command") String iCommandText) {
sqlCommand("revoke", iCommandText, "\nPrivilege revoked to the role: %s\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Create a link from a JOIN")
public void createLink(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nCreated %d link(s) in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Find all references the target record id @rid")
public void findReferences(
@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("find", iCommandText, "\nFound %s in %f sec(s).\n", true);
}
@ConsoleCommand(splitInWords = false, description = "Alter a database property")
public void alterDatabase(
@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("alter", iCommandText, "\nDatabase updated successfully\n", false);
updateDatabaseInfo();
}
@ConsoleCommand(description = "Freeze database and flush on the disk")
public void freezeDatabase(
@ConsoleParameter(name = "storage-type", description = "Storage type of server database", optional = true) String storageType)
throws IOException {
checkForDatabase();
final String dbName = currentDatabase.getName();
if (currentDatabase.getURL().startsWith(OEngineRemote.NAME)) {
if (serverAdmin == null) {
message("\n\nCannot freeze a remote database without connecting to the server with a valid server's user");
return;
}
if (storageType == null)
storageType = "plocal";
new OServerAdmin(currentDatabase.getURL()).connect(currentDatabaseUserName, currentDatabaseUserPassword).freezeDatabase(
storageType);
} else {
// LOCAL CONNECTION
currentDatabase.freeze();
}
message("\n\nDatabase '" + dbName + "' was frozen successfully");
}
@ConsoleCommand(description = "Release database after freeze")
public void releaseDatabase(
@ConsoleParameter(name = "storage-type", description = "Storage type of server database", optional = true) String storageType)
throws IOException {
checkForDatabase();
final String dbName = currentDatabase.getName();
if (currentDatabase.getURL().startsWith(OEngineRemote.NAME)) {
if (serverAdmin == null) {
message("\n\nCannot release a remote database without connecting to the server with a valid server's user");
return;
}
if (storageType == null)
storageType = "plocal";
new OServerAdmin(currentDatabase.getURL()).connect(currentDatabaseUserName, currentDatabaseUserPassword).releaseDatabase(
storageType);
} else {
// LOCAL CONNECTION
currentDatabase.release();
}
message("\n\nDatabase '" + dbName + "' was released successfully");
}
@ConsoleCommand(description = "Freeze clusters and flush on the disk")
public void freezeCluster(
@ConsoleParameter(name = "cluster-name", description = "The name of the cluster to freeze") String iClusterName,
@ConsoleParameter(name = "storage-type", description = "Storage type of server database", optional = true) String storageType)
throws IOException {
checkForDatabase();
final int clusterId = currentDatabase.getClusterIdByName(iClusterName);
if (currentDatabase.getURL().startsWith(OEngineRemote.NAME)) {
if (serverAdmin == null) {
message("\n\nCannot freeze a remote database without connecting to the server with a valid server's user");
return;
}
if (storageType == null)
storageType = "plocal";
new OServerAdmin(currentDatabase.getURL()).connect(currentDatabaseUserName, currentDatabaseUserPassword).freezeCluster(
clusterId, storageType);
} else {
// LOCAL CONNECTION
currentDatabase.freezeCluster(clusterId);
}
message("\n\nCluster '" + iClusterName + "' was frozen successfully");
}
@ConsoleCommand(description = "Release cluster after freeze")
public void releaseCluster(
@ConsoleParameter(name = "cluster-name", description = "The name of the cluster to unfreeze") String iClusterName,
@ConsoleParameter(name = "storage-type", description = "Storage type of server database", optional = true) String storageType)
throws IOException {
checkForDatabase();
final int clusterId = currentDatabase.getClusterIdByName(iClusterName);
if (currentDatabase.getURL().startsWith(OEngineRemote.NAME)) {
if (serverAdmin == null) {
message("\n\nCannot freeze a remote database without connecting to the server with a valid server's user");
return;
}
if (storageType == null)
storageType = "plocal";
new OServerAdmin(currentDatabase.getURL()).connect(currentDatabaseUserName, currentDatabaseUserPassword).releaseCluster(
clusterId, storageType);
} else {
// LOCAL CONNECTION
currentDatabase.releaseCluster(clusterId);
}
message("\n\nCluster '" + iClusterName + "' was released successfully");
}
@ConsoleCommand(splitInWords = false, description = "Alter a class in the database schema")
public void alterClass(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("alter", iCommandText, "\nClass updated successfully\n", false);
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Create a class")
public void createClass(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nClass created successfully. Total classes in database now: %d\n", true);
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Alter a class property in the database schema")
public void alterProperty(
@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("alter", iCommandText, "\nProperty updated successfully\n", false);
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Create a property")
public void createProperty(
@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nProperty created successfully with id=%d\n", true);
updateDatabaseInfo();
}
/***
* @author Claudio Tesoriero
* @param iCommandText
*/
@ConsoleCommand(splitInWords = false, description = "Create a stored function")
public void createFunction(
@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText) {
sqlCommand("create", iCommandText, "\nFunction created successfully with id=%s\n", true);
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Traverse records and display the results")
public void traverse(@ConsoleParameter(name = "query-text", description = "The traverse to execute") String iQueryText) {
final int limit;
if (iQueryText.contains("limit")) {
// RESET CONSOLE FLAG
limit = -1;
} else {
limit = Integer.parseInt((String) properties.get("limit"));
}
long start = System.currentTimeMillis();
currentResultSet = currentDatabase.command(new OCommandSQL("traverse " + iQueryText)).execute();
float elapsedSeconds = getElapsedSecs(start);
dumpResultSet(limit);
message("\n\n" + currentResultSet.size() + " item(s) found. Traverse executed in " + elapsedSeconds + " sec(s).");
}
@ConsoleCommand(splitInWords = false, description = "Execute a query against the database and display the results")
public void select(@ConsoleParameter(name = "query-text", description = "The query to execute") String iQueryText) {
checkForDatabase();
if (iQueryText == null)
return;
iQueryText = iQueryText.trim();
if (iQueryText.length() == 0 || iQueryText.equalsIgnoreCase("select"))
return;
iQueryText = "select " + iQueryText;
final int limit;
if (iQueryText.contains("limit")) {
limit = -1;
} else {
limit = Integer.parseInt((String) properties.get("limit"));
}
final long start = System.currentTimeMillis();
currentResultSet = currentDatabase.query(new OSQLSynchQuery<ODocument>(iQueryText, limit).setFetchPlan("*:1"));
float elapsedSeconds = getElapsedSecs(start);
dumpResultSet(limit);
message("\n\n" + currentResultSet.size() + " item(s) found. Query executed in " + elapsedSeconds + " sec(s).");
}
@SuppressWarnings("unchecked")
@ConsoleCommand(splitInWords = false, description = "Execute javascript commands in the console")
public void js(
@ConsoleParameter(name = "text", description = "The javascript to execute. Use 'db' to reference to a document database, 'gdb' for a graph database") final String iText) {
if (iText == null)
return;
currentResultSet.clear();
final OCommandExecutorScript cmd = new OCommandExecutorScript();
cmd.parse(new OCommandScript("Javascript", iText));
long start = System.currentTimeMillis();
final Object result = cmd.execute(null);
float elapsedSeconds = getElapsedSecs(start);
if (OMultiValue.isMultiValue(result)) {
if (result instanceof List<?>)
currentResultSet = (List<OIdentifiable>) result;
else if (result instanceof Collection<?>) {
currentResultSet = new ArrayList<OIdentifiable>();
currentResultSet.addAll((Collection<? extends OIdentifiable>) result);
} else if (result.getClass().isArray()) {
currentResultSet = new ArrayList<OIdentifiable>();
for (OIdentifiable o : (OIdentifiable[]) result)
currentResultSet.add(o);
}
dumpResultSet(-1);
message("Client side script executed in %f sec(s). Returned %d records", elapsedSeconds, currentResultSet.size());
} else
message("Client side script executed in %f sec(s). Value returned is: %s", elapsedSeconds, result);
}
@SuppressWarnings("unchecked")
@ConsoleCommand(splitInWords = false, description = "Execute javascript commands against a remote server")
public void jss(
@ConsoleParameter(name = "text", description = "The javascript to execute. Use 'db' to reference to a document database, 'gdb' for a graph database") final String iText) {
checkForRemoteServer();
if (iText == null)
return;
currentResultSet.clear();
long start = System.currentTimeMillis();
Object result = currentDatabase.command(new OCommandScript("Javascript", iText.toString())).execute();
float elapsedSeconds = getElapsedSecs(start);
if (OMultiValue.isMultiValue(result)) {
if (result instanceof List<?>)
currentResultSet = (List<OIdentifiable>) result;
else if (result instanceof Collection<?>) {
currentResultSet = new ArrayList<OIdentifiable>();
currentResultSet.addAll((Collection<? extends OIdentifiable>) result);
} else if (result.getClass().isArray()) {
currentResultSet = new ArrayList<OIdentifiable>();
for (OIdentifiable o : (OIdentifiable[]) result)
currentResultSet.add(o);
}
dumpResultSet(-1);
message("Server side script executed in %f sec(s). Returned %d records", elapsedSeconds, currentResultSet.size());
} else
message("Server side script executed in %f sec(s). Value returned is: %s", elapsedSeconds, result);
}
@ConsoleCommand(splitInWords = false, description = "Create an index against a property")
public void createIndex(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText)
throws IOException {
message("\n\nCreating index...");
sqlCommand("create", iCommandText, "\nCreated index successfully with %d entries in %f sec(s).\n", true);
updateDatabaseInfo();
message("\n\nIndex created successfully");
}
@ConsoleCommand(description = "Delete the current database")
public void dropDatabase(
@ConsoleParameter(name = "storage-type", description = "Storage type of server database", optional = true) String storageType)
throws IOException {
checkForDatabase();
final String dbName = currentDatabase.getName();
if (currentDatabase.getURL().startsWith(OEngineRemote.NAME)) {
if (serverAdmin == null) {
message("\n\nCannot drop a remote database without connecting to the server with a valid server's user");
return;
}
if (storageType == null)
storageType = "plocal";
// REMOTE CONNECTION
final String dbURL = currentDatabase.getURL().substring(OEngineRemote.NAME.length() + 1);
new OServerAdmin(dbURL).connect(currentDatabaseUserName, currentDatabaseUserPassword).dropDatabase(storageType);
} else {
// LOCAL CONNECTION
currentDatabase.drop();
currentDatabase = null;
currentDatabaseName = null;
}
message("\n\nDatabase '" + dbName + "' deleted successfully");
}
@ConsoleCommand(description = "Delete the specified database")
public void dropDatabase(
@ConsoleParameter(name = "database-url", description = "The url of the database to drop in the format '<mode>:<path>'") String iDatabaseURL,
@ConsoleParameter(name = "user", description = "Server administrator name") String iUserName,
@ConsoleParameter(name = "password", description = "Server administrator password") String iUserPassword,
@ConsoleParameter(name = "storage-type", description = "Storage type of server database", optional = true) String storageType)
throws IOException {
if (iDatabaseURL.startsWith(OEngineRemote.NAME)) {
// REMOTE CONNECTION
final String dbURL = iDatabaseURL.substring(OEngineRemote.NAME.length() + 1);
if (serverAdmin != null)
serverAdmin.close();
serverAdmin = new OServerAdmin(dbURL).connect(iUserName, iUserPassword);
serverAdmin.dropDatabase(storageType);
disconnect();
} else {
// LOCAL CONNECTION
currentDatabase = new ODatabaseDocumentTx(iDatabaseURL);
if (currentDatabase.exists()) {
currentDatabase.open(iUserName, iUserPassword);
currentDatabase.drop();
} else
message("\n\nCannot drop database '" + iDatabaseURL + "' because was not found");
currentDatabase = null;
currentDatabaseName = null;
}
message("\n\nDatabase '" + iDatabaseURL + "' deleted successfully");
}
@ConsoleCommand(splitInWords = false, description = "Remove an index")
public void dropIndex(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText)
throws IOException {
message("\n\nRemoving index...");
sqlCommand("drop", iCommandText, "\nDropped index in %f sec(s).\n", false);
updateDatabaseInfo();
message("\n\nIndex removed successfully");
}
@ConsoleCommand(splitInWords = false, description = "Rebuild an index if it is automatic")
public void rebuildIndex(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText)
throws IOException {
message("\n\nRebuilding index(es)...");
sqlCommand("rebuild", iCommandText, "\nRebuilt index(es). Found %d link(s) in %f sec(s).\n", true);
updateDatabaseInfo();
message("\n\nIndex(es) rebuilt successfully");
}
@ConsoleCommand(splitInWords = false, description = "Remove a class from the schema")
public void dropClass(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText)
throws IOException {
sqlCommand("drop", iCommandText, "\nRemoved class in %f sec(s).\n", false);
updateDatabaseInfo();
}
@ConsoleCommand(splitInWords = false, description = "Remove a property from a class")
public void dropProperty(@ConsoleParameter(name = "command-text", description = "The command text to execute") String iCommandText)
throws IOException {
sqlCommand("drop", iCommandText, "\nRemoved class property in %f sec(s).\n", false);
updateDatabaseInfo();
}
@ConsoleCommand(description = "Browse all records of a class")
public void browseClass(@ConsoleParameter(name = "class-name", description = "The name of the class") final String iClassName) {
checkForDatabase();
currentResultSet.clear();
final int limit = Integer.parseInt((String) properties.get("limit"));
OIdentifiableIterator<?> it = currentDatabase.browseClass(iClassName);
browseRecords(limit, it);
}
@ConsoleCommand(description = "Browse all records of a cluster")
public void browseCluster(
@ConsoleParameter(name = "cluster-name", description = "The name of the cluster") final String iClusterName) {
checkForDatabase();
currentResultSet.clear();
final int limit = Integer.parseInt((String) properties.get("limit"));
final ORecordIteratorCluster<?> it = currentDatabase.browseCluster(iClusterName);
browseRecords(limit, it);
}
@ConsoleCommand(aliases = { "display" }, description = "Display current record attributes")
public void displayRecord(
@ConsoleParameter(name = "number", description = "The number of the record in the most recent result set") final String iRecordNumber) {
checkForDatabase();
if (iRecordNumber == null)
checkCurrentObject();
else {
int recNumber = Integer.parseInt(iRecordNumber);
if (currentResultSet.size() == 0)
throw new OException("No result set where to find the requested record. Execute a query first.");
if (currentResultSet.size() <= recNumber)
throw new OException("The record requested is not part of current result set (0"
+ (currentResultSet.size() > 0 ? "-" + (currentResultSet.size() - 1) : "") + ")");
currentRecord = currentResultSet.get(recNumber).getRecord();
}
dumpRecordDetails();
}
@ConsoleCommand(description = "Display a record as raw bytes")
public void displayRawRecord(@ConsoleParameter(name = "rid", description = "The record id to display") final String iRecordId) {
checkForDatabase();
ORecordId rid = new ORecordId(iRecordId);
final ORawBuffer buffer = currentDatabase.getStorage().readRecord(rid, null, false, null, false).getResult();
if (buffer == null)
throw new OException("The record has been deleted");
String content;
if (Integer.parseInt(properties.get("maxBinaryDisplay")) < buffer.buffer.length)
content = new String(Arrays.copyOf(buffer.buffer, Integer.parseInt(properties.get("maxBinaryDisplay"))));
else
content = new String(buffer.buffer);
message("\nRaw record content. The size is " + buffer.buffer.length + " bytes, while settings force to print first "
+ content.length() + " bytes:\n\n" + new String(content));
}
@ConsoleCommand(aliases = { "status" }, description = "Display information about the database")
public void info() {
if (currentDatabaseName != null) {
message("\nCurrent database: " + currentDatabaseName + " (url=" + currentDatabase.getURL() + ")");
final OStorage stg = currentDatabase.getStorage();
if (stg instanceof OStorageRemoteThread) {
final ODocument clusterConfig = ((OStorageRemoteThread) stg).getClusterConfiguration();
if (clusterConfig != null)
message("\n\nCluster configuration: " + clusterConfig.toJSON("prettyPrint"));
else
message("\n\nCluster configuration: none");
} else if (stg instanceof OStorageLocal) {
final OStorageLocal localStorage = (OStorageLocal) stg;
long holeSize = localStorage.getHoleSize();
message("\nFragmented at " + (float) (holeSize * 100f / localStorage.getSize()) + "%%");
message("\n (" + localStorage.getHoles() + " holes, total size of holes: " + OFileUtils.getSizeAsString(holeSize) + ")");
}
listProperties();
listClusters();
listClasses();
listIndexes();
}
}
@ConsoleCommand(description = "Display the database properties")
public void listProperties() {
if (currentDatabase == null)
return;
final OStorage stg = currentDatabase.getStorage();
final OStorageConfiguration dbCfg = stg.getConfiguration();
message("\n\nDATABASE PROPERTIES:");
if (dbCfg.properties != null) {
message("\n--------------------------------+----------------------------------------------------+");
message("\n NAME | VALUE |");
message("\n--------------------------------+----------------------------------------------------+");
message("\n %-30s | %-50s |", "Name", format(dbCfg.name, 50));
message("\n %-30s | %-50s |", "Version", format("" + dbCfg.version, 50));
message("\n %-30s | %-50s |", "Date format", format(dbCfg.dateFormat, 50));
message("\n %-30s | %-50s |", "Datetime format", format(dbCfg.dateTimeFormat, 50));
message("\n %-30s | %-50s |", "Schema RID", format(dbCfg.schemaRecordId, 50));
message("\n %-30s | %-50s |", "Index Manager RID", format(dbCfg.indexMgrRecordId, 50));
message("\n %-30s | %-50s |", "Dictionary RID", format(dbCfg.dictionaryRecordId, 50));
message("\n--------------------------------+----------------------------------------------------+");
if (dbCfg.properties != null && !dbCfg.properties.isEmpty()) {
message("\n\nDATABASE CUSTOM PROPERTIES:");
message("\n +-------------------------------+--------------------------------------------------+");
message("\n | NAME | VALUE |");
message("\n +-------------------------------+--------------------------------------------------+");
for (OStorageEntryConfiguration cfg : dbCfg.properties)
message("\n | %-29s | %-49s|", cfg.name, format(cfg.value, 49));
message("\n +-------------------------------+--------------------------------------------------+");
}
}
}
@ConsoleCommand(aliases = { "desc" }, description = "Display the schema of a class")
public void infoClass(@ConsoleParameter(name = "class-name", description = "The name of the class") final String iClassName) {
if (currentDatabaseName == null) {
message("\nNo database selected yet.");
return;
}
final OClass cls = currentDatabase.getMetadata().getSchema().getClass(iClassName);
if (cls == null) {
message("\n! Class '" + iClassName + "' does not exist in the database '" + currentDatabaseName + "'");
return;
}
message("\nClass................: " + cls);
if (cls.getShortName() != null)
message("\nAlias................: " + cls.getShortName());
if (cls.getSuperClass() != null)
message("\nSuper class..........: " + cls.getSuperClass());
message("\nDefault cluster......: " + currentDatabase.getClusterNameById(cls.getDefaultClusterId()) + " (id="
+ cls.getDefaultClusterId() + ")");
message("\nSupported cluster ids: " + Arrays.toString(cls.getClusterIds()));
if (cls.getBaseClasses().hasNext()) {
message("Base classes.........: ");
int i = 0;
for (Iterator<OClass> it = cls.getBaseClasses(); it.hasNext();) {
if (i > 0)
message(", ");
message(it.next().getName());
++i;
}
out.println();
}
if (cls.properties().size() > 0) {
message("\nProperties:");
message("\n-------------------------------+-------------+-------------------------------+-----------+----------+----------+-----------+-----------+----------+");
message("\n NAME | TYPE | LINKED TYPE/CLASS | MANDATORY | READONLY | NOT NULL | MIN | MAX | COLLATE |");
message("\n-------------------------------+-------------+-------------------------------+-----------+----------+----------+-----------+-----------+----------+");
for (final OProperty p : cls.properties()) {
try {
message("\n %-30s| %-12s| %-30s| %-10s| %-9s| %-9s| %-10s| %-10s| %-9s|", p.getName(), p.getType(),
p.getLinkedClass() != null ? p.getLinkedClass() : p.getLinkedType(), p.isMandatory(), p.isReadonly(), p.isNotNull(),
p.getMin() != null ? p.getMin() : "", p.getMax() != null ? p.getMax() : "", p.getCollate() != null ? p.getCollate().getName() : "");
} catch (Exception e) {
}
}
message("\n-------------------------------+-------------+-------------------------------+-----------+----------+----------+-----------+-----------+----------+");
}
final Set<OIndex<?>> indexes = cls.getClassIndexes();
if (!indexes.isEmpty()) {
message("\nIndexes (" + indexes.size() + " altogether):");
message("\n-------------------------------+----------------+");
message("\n NAME | PROPERTIES |");
message("\n-------------------------------+----------------+");
for (final OIndex<?> index : indexes) {
final OIndexDefinition indexDefinition = index.getDefinition();
if (indexDefinition != null) {
final List<String> fields = indexDefinition.getFields();
message("\n %-30s| %-15s|", index.getName(), fields.get(0) + (fields.size() > 1 ? " (+)" : ""));
for (int i = 1; i < fields.size(); i++) {
if (i < fields.size() - 1)
message("\n %-30s| %-15s|", "", fields.get(i) + " (+)");
else
message("\n %-30s| %-15s|", "", fields.get(i));
}
} else {
message("\n %-30s| %-15s|", index.getName(), "");
}
}
message("\n-------------------------------+----------------+");
}
}
@ConsoleCommand(description = "Display all indexes", aliases = { "indexes" })
public void listIndexes() {
if (currentDatabaseName != null) {
message("\n\nINDEXES:");
message("\n----------------------------------------------+------------+-----------------------+----------------+------------+");
message("\n NAME | TYPE | CLASS | FIELDS | RECORDS |");
message("\n----------------------------------------------+------------+-----------------------+----------------+------------+");
int totalIndexes = 0;
long totalRecords = 0;
final List<OIndex<?>> indexes = new ArrayList<OIndex<?>>(currentDatabase.getMetadata().getIndexManager().getIndexes());
Collections.sort(indexes, new Comparator<OIndex<?>>() {
public int compare(OIndex<?> o1, OIndex<?> o2) {
return o1.getName().compareToIgnoreCase(o2.getName());
}
});
for (final OIndex<?> index : indexes) {
try {
final OIndexDefinition indexDefinition = index.getDefinition();
if (indexDefinition == null || indexDefinition.getClassName() == null) {
message("\n %-45s| %-10s | %-22s| %-15s|%11d |", format(index.getName(), 45), format(index.getType(), 10), "", "",
index.getSize());
} else {
final List<String> fields = indexDefinition.getFields();
if (fields.size() == 1) {
message("\n %-45s| %-10s | %-22s| %-15s|%11d |", format(index.getName(), 45), format(index.getType(), 10),
format(indexDefinition.getClassName(), 22), format(fields.get(0), 10), index.getSize());
} else {
message("\n %-45s| %-10s | %-22s| %-15s|%11d |", format(index.getName(), 45), format(index.getType(), 10),
format(indexDefinition.getClassName(), 22), format(fields.get(0), 10), index.getSize());
for (int i = 1; i < fields.size(); i++) {
message("\n %-45s| %-10s | %-22s| %-15s|%11s |", "", "", "", fields.get(i), "");
}
}
}
totalIndexes++;
totalRecords += index.getSize();
} catch (Exception e) {
}
}
message("\n----------------------------------------------+------------+-----------------------+----------------+------------+");
message("\n TOTAL = %-3d %15d |",
totalIndexes, totalRecords);
message("\n-----------------------------------------------------------------------------------------------------------------+");
} else
message("\nNo database selected yet.");
}
@ConsoleCommand(description = "Display all the configured clusters", aliases = { "clusters" })
public void listClusters() {
if (currentDatabaseName != null) {
message("\n\nCLUSTERS:");
message("\n----------------------------------------------+-------+---------------------+---------+-----------------+");
message("\n NAME | ID | TYPE | DATASEG | RECORDS |");
message("\n----------------------------------------------+-------+---------------------+---------+-----------------+");
int clusterId;
String clusterType = null;
long totalElements = 0;
long count;
final List<String> clusters = new ArrayList<String>(currentDatabase.getClusterNames());
Collections.sort(clusters);
for (String clusterName : clusters) {
try {
clusterId = currentDatabase.getClusterIdByName(clusterName);
clusterType = currentDatabase.getClusterType(clusterName);
final OCluster cluster = currentDatabase.getStorage().getClusterById(clusterId);
count = currentDatabase.countClusterElements(clusterName);
totalElements += count;
message("\n %-45s| %5d | %-20s| %7d | %15d |", format(clusterName, 45), clusterId, clusterType,
cluster.getDataSegmentId(), count);
} catch (Exception e) {
}
}
message("\n----------------------------------------------+-------+---------------------+---------+-----------------+");
message("\n TOTAL = %-3d | | %15s |", clusters.size(),
totalElements);
message("\n----------------------------------------------------------------------------+---------+-----------------+");
} else
message("\nNo database selected yet.");
}
@ConsoleCommand(description = "Display all the configured classes", aliases = { "classes" })
public void listClasses() {
if (currentDatabaseName != null) {
message("\n\nCLASSES:");
message("\n----------------------------------------------+------------------------------------+------------+----------------+");
message("\n NAME | SUPERCLASS | CLUSTERS | RECORDS |");
message("\n----------------------------------------------+------------------------------------+------------+----------------+");
long totalElements = 0;
long count;
final List<OClass> classes = new ArrayList<OClass>(currentDatabase.getMetadata().getSchema().getClasses());
Collections.sort(classes, new Comparator<OClass>() {
public int compare(OClass o1, OClass o2) {
return o1.getName().compareToIgnoreCase(o2.getName());
}
});
for (OClass cls : classes) {
try {
final StringBuilder clusters = new StringBuilder();
if (cls.isAbstract())
clusters.append("-");
else
for (int i = 0; i < cls.getClusterIds().length; ++i) {
if (i > 0)
clusters.append(", ");
clusters.append(cls.getClusterIds()[i]);
}
count = currentDatabase.countClass(cls.getName());
totalElements += count;
final String superClass = cls.getSuperClass() != null ? cls.getSuperClass().getName() : "";
message("\n %-45s| %-35s| %-11s|%15d |", format(cls.getName(), 45), format(superClass, 35), clusters.toString(), count);
} catch (Exception e) {
}
}
message("\n----------------------------------------------+------------------------------------+------------+----------------+");
message("\n TOTAL = %-3d %15d |",
classes.size(), totalElements);
message("\n----------------------------------------------+------------------------------------+------------+----------------+");
} else
message("\nNo database selected yet.");
}
@ConsoleCommand(description = "Display all keys in the database dictionary")
public void dictionaryKeys() {
checkForDatabase();
Iterable<Object> keys = currentDatabase.getDictionary().keys();
int i = 0;
for (Object k : keys) {
message(String.format("\n#%d: %s", i++, k));
}
message("\nFound " + i + " keys:");
}
@ConsoleCommand(description = "Loook up a record using the dictionary. If found, set it as the current record")
public void dictionaryGet(@ConsoleParameter(name = "key", description = "The key to search") final String iKey) {
checkForDatabase();
currentRecord = currentDatabase.getDictionary().get(iKey);
if (currentRecord == null)
message("\nEntry not found in dictionary.");
else {
currentRecord = (ORecordInternal<?>) currentRecord.load();
displayRecord(null);
}
}
@ConsoleCommand(description = "Insert or modify an entry in the database dictionary. The entry is comprised of key=String, value=record-id")
public void dictionaryPut(@ConsoleParameter(name = "key", description = "The key to bind") final String iKey,
@ConsoleParameter(name = "record-id", description = "The record-id of the record to bind to the key") final String iRecordId) {
checkForDatabase();
currentRecord = currentDatabase.load(new ORecordId(iRecordId));
if (currentRecord == null)
message("\nError: record with id '" + iRecordId + "' was not found in database");
else {
currentDatabase.getDictionary().put(iKey, (ODocument) currentRecord);
displayRecord(null);
message("\nThe entry " + iKey + "=" + iRecordId + " has been inserted in the database dictionary");
}
}
@ConsoleCommand(description = "Remove the association in the dictionary")
public void dictionaryRemove(@ConsoleParameter(name = "key", description = "The key to remove") final String iKey) {
checkForDatabase();
boolean result = currentDatabase.getDictionary().remove(iKey);
if (!result)
message("\nEntry not found in dictionary.");
else
message("\nEntry removed from the dictionary.");
}
@ConsoleCommand(description = "Copy a database to a remote server")
public void copyDatabase(
@ConsoleParameter(name = "db-name", description = "Name of the database to share") final String iDatabaseName,
@ConsoleParameter(name = "db-user", description = "Database user") final String iDatabaseUserName,
@ConsoleParameter(name = "db-password", description = "Database password") String iDatabaseUserPassword,
@ConsoleParameter(name = "server-name", description = "Remote server's name as <address>:<port>") final String iRemoteName,
@ConsoleParameter(name = "engine-name", description = "Remote server's engine to use between 'local' or 'memory'") final String iRemoteEngine)
throws IOException {
try {
if (serverAdmin == null)
throw new IllegalStateException("You must be connected to a remote server to share a database");
message("\nCopying database '" + iDatabaseName + "' to the server '" + iRemoteName + "' via network streaming...");
serverAdmin.copyDatabase(iDatabaseName, iDatabaseUserName, iDatabaseUserPassword, iRemoteName, iRemoteEngine);
message("\nDatabase '" + iDatabaseName + "' has been copied to the server '" + iRemoteName + "'");
} catch (Exception e) {
printError(e);
}
}
@ConsoleCommand(description = "Displays the status of the cluster nodes")
public void clusterStatus() throws IOException {
if (serverAdmin == null)
throw new IllegalStateException("You must be connected to a remote server to get the cluster status");
checkForRemoteServer();
try {
message("\nCluster status:");
out.println(serverAdmin.clusterStatus().toJSON("attribSameRow,alwaysFetchEmbedded,fetchPlan:*:0"));
} catch (Exception e) {
printError(e);
}
}
@ConsoleCommand(description = "Check database integrity")
public void checkDatabase(@ConsoleParameter(name = "options", description = "Options: -v", optional = true) final String iOptions)
throws IOException {
checkForDatabase();
if (!(currentDatabase.getStorage() instanceof OStorageLocalAbstract)) {
message("\nCannot check integrity of non-local database. Connect to it using local mode.");
return;
}
boolean verbose = iOptions != null && iOptions.indexOf("-v") > -1;
try {
((OStorageLocalAbstract) currentDatabase.getStorage()).check(verbose, this);
} catch (ODatabaseImportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Compare two databases")
public void compareDatabases(
@ConsoleParameter(name = "db1-url", description = "URL of the first database") final String iDb1URL,
@ConsoleParameter(name = "db2-url", description = "URL of the second database") final String iDb2URL,
@ConsoleParameter(name = "user-name", description = "User name", optional = true) final String iUserName,
@ConsoleParameter(name = "user-password", description = "User password", optional = true) final String iUserPassword,
@ConsoleParameter(name = "detect-mapping-data", description = "Whether RID mapping data after DB import should be tried to found on the disk.", optional = true) Boolean autoDiscoveringMappingData)
throws IOException {
try {
final ODatabaseCompare compare;
if (iUserName == null)
compare = new ODatabaseCompare(iDb1URL, iDb2URL, this);
else
compare = new ODatabaseCompare(iDb1URL, iDb1URL, iUserName, iUserPassword, this);
compare.setAutoDetectExportImportMap(autoDiscoveringMappingData != null ? autoDiscoveringMappingData : true);
compare.compare();
} catch (ODatabaseExportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Import a database into the current one", splitInWords = false)
public void importDatabase(@ConsoleParameter(name = "options", description = "Import options") final String text)
throws IOException {
checkForDatabase();
message("\nImporting " + text + "...");
final List<String> items = OStringSerializerHelper.smartSplit(text, ' ');
final String fileName = items.size() <= 0 || (items.get(1)).charAt(0) == '-' ? null : items.get(1);
final String options = fileName != null ? text.substring((items.get(0)).length() + (items.get(1)).length() + 1).trim() : text;
try {
ODatabaseImport databaseImport = new ODatabaseImport(currentDatabase, fileName, this);
databaseImport.setOptions(options).importDatabase();
databaseImport.close();
} catch (ODatabaseImportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Backup a database", splitInWords = false)
public void backupDatabase(@ConsoleParameter(name = "options", description = "Backup options") final String iText)
throws IOException {
checkForDatabase();
out.println(new StringBuilder("Backuping current database to: ").append(iText).append("..."));
final List<String> items = OStringSerializerHelper.smartSplit(iText, ' ');
final String fileName = items.size() <= 0 || ((String) items.get(1)).charAt(0) == '-' ? null : (String) items.get(1);
// final String options = fileName != null ? iText.substring(
// ((String) items.get(0)).length() + ((String) items.get(1)).length() + 1).trim() : iText;
final long startTime = System.currentTimeMillis();
try {
currentDatabase.backup(new FileOutputStream(fileName), null, null);
message("\nBackup executed in %.2f seconds", ((float) (System.currentTimeMillis() - startTime) / 1000));
} catch (ODatabaseExportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Restore a database into the current one", splitInWords = false)
public void restoreDatabase(@ConsoleParameter(name = "options", description = "Restore options") final String text)
throws IOException {
checkForDatabase();
message("\nRestoring database %s...", text);
final List<String> items = OStringSerializerHelper.smartSplit(text, ' ');
final String fileName = items.size() <= 0 || (items.get(1)).charAt(0) == '-' ? null : items.get(1);
// final String options = fileName != null ? text.substring((items.get(0)).length() + (items.get(1)).length() + 1).trim() :
// text;
final long startTime = System.currentTimeMillis();
try {
currentDatabase.restore(new FileInputStream(fileName), null, null);
message("\nDatabase restored in %.2f seconds", ((float) (System.currentTimeMillis() - startTime) / 1000));
} catch (ODatabaseImportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Export a database", splitInWords = false)
public void exportDatabase(@ConsoleParameter(name = "options", description = "Export options") final String iText)
throws IOException {
checkForDatabase();
out.println(new StringBuilder("Exporting current database to: ").append(iText).append(" in GZipped JSON format ..."));
final List<String> items = OStringSerializerHelper.smartSplit(iText, ' ');
final String fileName = items.size() <= 0 || ((String) items.get(1)).charAt(0) == '-' ? null : (String) items.get(1);
final String options = fileName != null ? iText.substring(
((String) items.get(0)).length() + ((String) items.get(1)).length() + 1).trim() : iText;
try {
new ODatabaseExport(currentDatabase, fileName, this).setOptions(options).exportDatabase().close();
} catch (ODatabaseExportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Export a database schema")
public void exportSchema(@ConsoleParameter(name = "output-file", description = "Output file path") final String iOutputFilePath)
throws IOException {
checkForDatabase();
message("\nExporting current database to: " + iOutputFilePath + "...");
try {
ODatabaseExport exporter = new ODatabaseExport(currentDatabase, iOutputFilePath, this);
exporter.setIncludeRecords(false);
exporter.exportDatabase().close();
} catch (ODatabaseExportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Export the current record in the requested format")
public void exportRecord(@ConsoleParameter(name = "format", description = "Format, such as 'json'") final String iFormat,
@ConsoleParameter(name = "options", description = "Options", optional = true) String iOptions) throws IOException {
checkForDatabase();
checkCurrentObject();
final ORecordSerializer serializer = ORecordSerializerFactory.instance().getFormat(iFormat.toLowerCase());
if (serializer == null) {
message("\nERROR: Format '" + iFormat + "' was not found.");
printSupportedSerializerFormat();
return;
} else if (!(serializer instanceof ORecordSerializerStringAbstract)) {
message("\nERROR: Format '" + iFormat + "' does not export as text.");
printSupportedSerializerFormat();
return;
}
if (iOptions == null || iOptions.length() <= 0) {
iOptions = "rid,version,class,type,attribSameRow,keepTypes,alwaysFetchEmbedded,fetchPlan:*:0";
}
try {
out.println(((ORecordSerializerStringAbstract) serializer).toString(currentRecord, iOptions));
} catch (ODatabaseExportException e) {
printError(e);
}
}
@ConsoleCommand(description = "Return all configured properties")
public void properties() {
message("\nPROPERTIES:");
message("\n+---------------------+----------------------+");
message("\n| %-30s| %-30s |", "NAME", "VALUE");
message("\n+---------------------+----------------------+");
for (Entry<String, String> p : properties.entrySet()) {
message("\n| %-30s= %-30s |", p.getKey(), p.getValue());
}
message("\n+---------------------+----------------------+");
}
@ConsoleCommand(description = "Return the value of a property")
public void get(@ConsoleParameter(name = "property-name", description = "Name of the property") final String iPropertyName) {
Object value = properties.get(iPropertyName);
out.println();
if (value == null)
message("\nProperty '" + iPropertyName + "' is not setted");
else
out.println(iPropertyName + " = " + value);
}
@ConsoleCommand(description = "Change the value of a property")
public void set(@ConsoleParameter(name = "property-name", description = "Name of the property") final String iPropertyName,
@ConsoleParameter(name = "property-value", description = "Value to set") final String iPropertyValue) {
Object prevValue = properties.get(iPropertyName);
out.println();
if (iPropertyName.equalsIgnoreCase("limit") && (Integer.parseInt(iPropertyValue) == 0 || Integer.parseInt(iPropertyValue) < -1)) {
message("\nERROR: Limit must be > 0 or = -1 (no limit)");
} else {
if (prevValue != null)
message("\nPrevious value was: " + prevValue);
properties.put(iPropertyName, iPropertyValue);
out.println();
out.println(iPropertyName + " = " + iPropertyValue);
}
}
@ConsoleCommand(description = "Declare an intent")
public void declareIntent(
@ConsoleParameter(name = "Intent name", description = "name of the intent to execute") final String iIntentName) {
checkForDatabase();
message("\nDeclaring intent '" + iIntentName + "'...");
if (iIntentName.equalsIgnoreCase("massiveinsert"))
currentDatabase.declareIntent(new OIntentMassiveInsert());
else if (iIntentName.equalsIgnoreCase("massiveread"))
currentDatabase.declareIntent(new OIntentMassiveRead());
else
throw new IllegalArgumentException("Intent '" + iIntentName
+ "' not supported. Available ones are: massiveinsert, massiveread");
message("\nIntent '" + iIntentName + "' setted successfully");
}
@ConsoleCommand(description = "Execute a command against the profiler")
public void profiler(
@ConsoleParameter(name = "profiler command", description = "command to execute against the profiler") final String iCommandName) {
if (iCommandName.equalsIgnoreCase("on")) {
Orient.instance().getProfiler().startRecording();
message("\nProfiler is ON now, use 'profiler off' to turn off.");
} else if (iCommandName.equalsIgnoreCase("off")) {
Orient.instance().getProfiler().stopRecording();
message("\nProfiler is OFF now, use 'profiler on' to turn on.");
} else if (iCommandName.equalsIgnoreCase("dump")) {
out.println(Orient.instance().getProfiler().dump());
}
}
@ConsoleCommand(description = "Return the value of a configuration value")
public void configGet(@ConsoleParameter(name = "config-name", description = "Name of the configuration") final String iConfigName)
throws IOException {
final OGlobalConfiguration config = OGlobalConfiguration.findByKey(iConfigName);
if (config == null)
throw new IllegalArgumentException("Configuration variable '" + iConfigName + "' wasn't found");
final String value;
if (serverAdmin != null) {
value = serverAdmin.getGlobalConfiguration(config);
message("\nRemote configuration: ");
} else {
value = config.getValueAsString();
message("\nLocal configuration: ");
}
out.println(iConfigName + " = " + value);
}
@ConsoleCommand(description = "Sleep X milliseconds")
public void sleep(final String iTime) {
try {
Thread.sleep(Long.parseLong(iTime));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@ConsoleCommand(description = "Change the value of a configuration value")
public void configSet(
@ConsoleParameter(name = "config-name", description = "Name of the configuration") final String iConfigName,
@ConsoleParameter(name = "config-value", description = "Value to set") final String iConfigValue) throws IOException {
final OGlobalConfiguration config = OGlobalConfiguration.findByKey(iConfigName);
if (config == null)
throw new IllegalArgumentException("Configuration variable '" + iConfigName + "' not found");
if (serverAdmin != null) {
serverAdmin.setGlobalConfiguration(config, iConfigValue);
message("\n\nRemote configuration value changed correctly");
} else {
config.setValue(iConfigValue);
message("\n\nLocal configuration value changed correctly");
}
out.println();
}
@ConsoleCommand(description = "Return all the configuration values")
public void config() throws IOException {
if (serverAdmin != null) {
// REMOTE STORAGE
final Map<String, String> values = serverAdmin.getGlobalConfigurations();
message("\nREMOTE SERVER CONFIGURATION:");
message("\n+------------------------------------+--------------------------------+");
message("\n| %-35s| %-30s |", "NAME", "VALUE");
message("\n+------------------------------------+--------------------------------+");
for (Entry<String, String> p : values.entrySet()) {
message("\n| %-35s= %-30s |", p.getKey(), p.getValue());
}
} else {
// LOCAL STORAGE
message("\nLOCAL SERVER CONFIGURATION:");
message("\n+------------------------------------+--------------------------------+");
message("\n| %-35s| %-30s |", "NAME", "VALUE");
message("\n+------------------------------------+--------------------------------+");
for (OGlobalConfiguration cfg : OGlobalConfiguration.values()) {
message("\n| %-35s= %-30s |", cfg.getKey(), cfg.getValue());
}
}
message("\n+------------------------------------+--------------------------------+");
}
/** Should be used only by console commands */
public ODatabaseDocument getCurrentDatabase() {
return currentDatabase;
}
/** Should be used only by console commands */
public String getCurrentDatabaseName() {
return currentDatabaseName;
}
/** Should be used only by console commands */
public String getCurrentDatabaseUserName() {
return currentDatabaseUserName;
}
/** Should be used only by console commands */
public String getCurrentDatabaseUserPassword() {
return currentDatabaseUserPassword;
}
/** Should be used only by console commands */
public ORecordInternal<?> getCurrentRecord() {
return currentRecord;
}
/** Should be used only by console commands */
public List<OIdentifiable> getCurrentResultSet() {
return currentResultSet;
}
/** Should be used only by console commands */
public void loadRecordInternal(String iRecordId, String iFetchPlan) {
checkForDatabase();
currentRecord = currentDatabase.load(new ORecordId(iRecordId), iFetchPlan);
displayRecord(null);
message("\nOK");
}
/** Should be used only by console commands */
public void reloadRecordInternal(String iRecordId, String iFetchPlan) {
checkForDatabase();
currentRecord = ((ODatabaseRecordAbstract) currentDatabase.getUnderlying()).executeReadRecord(new ORecordId(iRecordId), null,
iFetchPlan, true, false);
displayRecord(null);
message("\nOK");
}
/** Should be used only by console commands */
public void checkForRemoteServer() {
if (serverAdmin == null
&& (currentDatabase == null || !(currentDatabase.getStorage() instanceof OStorageRemoteThread) || currentDatabase
.isClosed()))
throw new OException("Remote server is not connected. Use 'connect remote:<host>[:<port>][/<database-name>]' to connect");
}
/** Should be used only by console commands */
public void checkForDatabase() {
if (currentDatabase == null)
throw new OException("Database not selected. Use 'connect <database-name>' to connect to a database.");
if (currentDatabase.isClosed())
throw new ODatabaseException("Database '" + currentDatabaseName + "' is closed");
}
/** Should be used only by console commands */
public void checkCurrentObject() {
if (currentRecord == null)
throw new OException("The is no current object selected: create a new one or load it");
}
private void dumpRecordDetails() {
if (currentRecord instanceof ODocument) {
ODocument rec = (ODocument) currentRecord;
message("\n--------------------------------------------------");
message("\nODocument - Class: %s id: %s v.%s", rec.getClassName(), rec.getIdentity().toString(), rec.getRecordVersion()
.toString());
message("\n--------------------------------------------------");
Object value;
for (String fieldName : rec.fieldNames()) {
value = rec.field(fieldName);
if (value instanceof byte[])
value = "byte[" + ((byte[]) value).length + "]";
else if (value instanceof Iterator<?>) {
final List<Object> coll = new ArrayList<Object>();
while (((Iterator<?>) value).hasNext())
coll.add(((Iterator<?>) value).next());
value = coll;
}
message("\n%20s : %-20s", fieldName, value);
}
} else if (currentRecord instanceof ORecordFlat) {
ORecordFlat rec = (ORecordFlat) currentRecord;
message("\n--------------------------------------------------");
message("\nFlat - record id: %s v.%s", rec.getIdentity().toString(), rec.getRecordVersion().toString());
message("\n--------------------------------------------------");
message(rec.value());
} else if (currentRecord instanceof ORecordBytes) {
ORecordBytes rec = (ORecordBytes) currentRecord;
message("\n--------------------------------------------------");
message("\nBytes - record id: %s v.%s", rec.getIdentity().toString(), rec.getRecordVersion().toString());
message("\n--------------------------------------------------");
final byte[] value = rec.toStream();
final int max = Math.min(Integer.parseInt(properties.get("maxBinaryDisplay")), Array.getLength(value));
for (int i = 0; i < max; ++i) {
message("%03d", Array.getByte(value, i));
}
} else {
message("\n--------------------------------------------------");
message("\n%s - record id: %s v.%s", currentRecord.getClass().getSimpleName(), currentRecord.getIdentity().toString(),
currentRecord.getRecordVersion().toString());
}
out.println();
}
public String ask(final String iText) {
out.print(iText);
final Scanner scanner = new Scanner(in);
final String answer = scanner.nextLine();
scanner.close();
return answer;
}
public void onMessage(final String iText) {
message(iText);
}
private void printSupportedSerializerFormat() {
message("\nSupported formats are:");
for (ORecordSerializer s : ORecordSerializerFactory.instance().getFormats()) {
if (s instanceof ORecordSerializerStringAbstract)
message("\n- " + s.toString());
}
}
private void browseRecords(final int limit, final OIdentifiableIterator<?> it) {
final OTableFormatter tableFormatter = new OTableFormatter(this).setMaxWidthSize(Integer.parseInt(properties.get("width")));
currentResultSet.clear();
while (it.hasNext() && currentResultSet.size() <= limit)
currentResultSet.add(it.next());
tableFormatter.writeRecords(currentResultSet, limit);
}
private Object sqlCommand(final String iExpectedCommand, String iReceivedCommand, final String iMessage,
final boolean iIncludeResult) {
checkForDatabase();
if (iReceivedCommand == null)
return null;
iReceivedCommand = iExpectedCommand + " " + iReceivedCommand.trim();
currentResultSet.clear();
final long start = System.currentTimeMillis();
final Object result = new OCommandSQL(iReceivedCommand).setProgressListener(this).execute();
float elapsedSeconds = getElapsedSecs(start);
if (iIncludeResult)
message(iMessage, result, elapsedSeconds);
else
message(iMessage, elapsedSeconds);
return result;
}
public void onBegin(final Object iTask, final long iTotal) {
lastPercentStep = 0;
message("[");
if (interactiveMode) {
for (int i = 0; i < 10; ++i)
message(" ");
message("] 0%");
}
}
public boolean onProgress(final Object iTask, final long iCounter, final float iPercent) {
final int completitionBar = (int) iPercent / 10;
if (((int) (iPercent * 10)) == lastPercentStep)
return true;
final StringBuilder buffer = new StringBuilder();
if (interactiveMode) {
buffer.append("\r[");
for (int i = 0; i < completitionBar; ++i)
buffer.append('=');
for (int i = completitionBar; i < 10; ++i)
buffer.append(' ');
message("] %3.1f%% ", iPercent);
} else {
for (int i = lastPercentStep / 100; i < completitionBar; ++i)
buffer.append('=');
}
message(buffer.toString());
lastPercentStep = (int) (iPercent * 10);
return true;
}
@ConsoleCommand(description = "Display the current path")
public void pwd() {
message("\nCurrent path: " + new File("").getAbsolutePath());
}
public void onCompletition(final Object iTask, final boolean iSucceed) {
if (interactiveMode)
if (iSucceed)
message("\r[==========] 100% Done.");
else
message(" Error!");
else
message(iSucceed ? "] Done." : " Error!");
}
protected void printApplicationInfo() {
message("\nOrientDB console v." + OConstants.getVersion() + " " + OConstants.ORIENT_URL);
message("\nType 'help' to display all the commands supported.");
}
protected static boolean setTerminalToCBreak() throws IOException, InterruptedException {
// set the console to be character-buffered instead of line-buffered
int result = stty("-icanon min 1");
if (result != 0) {
return false;
}
// disable character echoing
stty("-echo");
return true;
}
protected void dumpResultSet(final int limit) {
new OTableFormatter(this).setMaxWidthSize(Integer.parseInt(properties.get("width"))).writeRecords(currentResultSet, limit);
}
/**
* Execute the stty command with the specified arguments against the current active terminal.
*/
protected static int stty(final String args) throws IOException, InterruptedException {
String cmd = "stty " + args + " < /dev/tty";
return exec(new String[] { "sh", "-c", cmd });
}
protected float getElapsedSecs(final long start) {
return (float) (System.currentTimeMillis() - start) / 1000;
}
/**
* Execute the specified command and return the output (both stdout and stderr).
*/
protected static int exec(final String[] cmd) throws IOException, InterruptedException {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
Process p = Runtime.getRuntime().exec(cmd);
int c;
InputStream in = p.getInputStream();
while ((c = in.read()) != -1) {
bout.write(c);
}
in = p.getErrorStream();
while ((c = in.read()) != -1) {
bout.write(c);
}
p.waitFor();
return p.exitValue();
}
protected void printError(final Exception e) {
if (properties.get("debug") != null && Boolean.parseBoolean(properties.get("debug").toString())) {
message("\n\n!ERROR:");
e.printStackTrace();
} else {
// SHORT FORM
message("\n\n!ERROR: " + e.getMessage());
if (e.getCause() != null) {
Throwable t = e.getCause();
while (t != null) {
message("\n-> " + t.getMessage());
t = t.getCause();
}
}
}
}
protected void updateDatabaseInfo() {
currentDatabase.getStorage().reload();
currentDatabase.getMetadata().getSchema().reload();
currentDatabase.getMetadata().getIndexManager().reload();
}
} | 1no label
| tools_src_main_java_com_orientechnologies_orient_console_OConsoleDatabaseApp.java |
1,518 | @Component("blPaginationPageLinkProcessor")
public class PaginationPageLinkProcessor extends AbstractAttributeModifierAttrProcessor {
/**
* Sets the name of this processor to be used in Thymeleaf template
*/
public PaginationPageLinkProcessor() {
super("paginationpagelink");
}
@Override
public int getPrecedence() {
return 10000;
}
@Override
@SuppressWarnings("unchecked")
protected Map<String, String> getModifiedAttributeValues(Arguments arguments, Element element, String attributeName) {
Map<String, String> attrs = new HashMap<String, String>();
BroadleafRequestContext blcContext = BroadleafRequestContext.getBroadleafRequestContext();
HttpServletRequest request = blcContext.getRequest();
String baseUrl = request.getRequestURL().toString();
Map<String, String[]> params = new HashMap<String, String[]>(request.getParameterMap());
Integer page = (Integer) StandardExpressionProcessor.processExpression(arguments, element.getAttributeValue(attributeName));
if (page != null && page > 1) {
params.put(ProductSearchCriteria.PAGE_NUMBER, new String[] { page.toString() });
} else {
params.remove(ProductSearchCriteria.PAGE_NUMBER);
}
String url = ProcessorUtils.getUrl(baseUrl, params);
attrs.put("href", url);
return attrs;
}
@Override
protected ModificationType getModificationType(Arguments arguments, Element element, String attributeName, String newAttributeName) {
return ModificationType.SUBSTITUTION;
}
@Override
protected boolean removeAttributeIfEmpty(Arguments arguments, Element element, String attributeName, String newAttributeName) {
return true;
}
@Override
protected boolean recomputeProcessorsAfterExecution(Arguments arguments, Element element, String attributeName) {
return false;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_processor_PaginationPageLinkProcessor.java |
1,432 | public class OChannelTextServer extends OChannelText {
public OChannelTextServer(final Socket iSocket, final OContextConfiguration iConfiguration) throws IOException {
super(iSocket, iConfiguration);
socket.setKeepAlive(true);
socket.setPerformancePreferences(1, 2, 0);
socket.setSendBufferSize(socketBufferSize);
socket.setReceiveBufferSize(socketBufferSize);
inStream = new BufferedInputStream(socket.getInputStream(), socketBufferSize);
outStream = new BufferedOutputStream(socket.getOutputStream(), socketBufferSize);
}
} | 0true
| enterprise_src_main_java_com_orientechnologies_orient_enterprise_channel_text_OChannelTextServer.java |
2,534 | new HashMap<String, Object>() {{
put("field", "value");
put("field2", "value2");
}}); | 0true
| src_test_java_org_elasticsearch_common_xcontent_support_XContentMapValuesTests.java |
1,163 | new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < NUMBER_OF_ITERATIONS; i++) {
BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
transportServiceClient.submitRequest(bigNode, "benchmark", message, options().withType(TransportRequestOptions.Type.BULK), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
}
}).txGet();
}
latch.countDown();
}
}).start(); | 0true
| src_test_java_org_elasticsearch_benchmark_transport_BenchmarkNettyLargeMessages.java |
1,246 | public class SimpleFulfillmentLocationResolver implements FulfillmentLocationResolver {
protected Address address;
@Override
public Address resolveLocationForFulfillmentGroup(FulfillmentGroup group) {
return address;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_service_fulfillment_SimpleFulfillmentLocationResolver.java |
2,661 | zenPingB.setNodesProvider(new DiscoveryNodesProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build();
}
@Override
public NodeService nodeService() {
return null;
}
}); | 0true
| src_test_java_org_elasticsearch_discovery_zen_ping_unicast_UnicastZenPingTests.java |
813 | public static class Presentation {
public static class Tab {
public static class Name {
public static final String Codes = "OfferImpl_Codes_Tab";
public static final String Advanced = "OfferImpl_Advanced_Tab";
}
public static class Order {
public static final int Codes = 1000;
public static final int Advanced = 2000;
}
}
public static class Group {
public static class Name {
public static final String Description = "OfferImpl_Description";
public static final String Amount = "OfferImpl_Amount";
public static final String ActivityRange = "OfferImpl_Activity_Range";
public static final String Qualifiers = "OfferImpl_Qualifiers";
public static final String ItemTarget = "OfferImpl_Item_Target";
public static final String Advanced = "OfferImpl_Advanced";
}
public static class Order {
public static final int Description = 1000;
public static final int Amount = 2000;
public static final int ActivityRange = 3000;
public static final int Qualifiers = 4000;
public static final int ItemTarget = 5000;
public static final int Advanced = 1000;
}
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_OfferImpl.java |
343 | public class NodesShutdownRequest extends MasterNodeOperationRequest<NodesShutdownRequest> {
String[] nodesIds = Strings.EMPTY_ARRAY;
TimeValue delay = TimeValue.timeValueSeconds(1);
boolean exit = true;
NodesShutdownRequest() {
}
public NodesShutdownRequest(String... nodesIds) {
this.nodesIds = nodesIds;
}
public NodesShutdownRequest nodesIds(String... nodesIds) {
this.nodesIds = nodesIds;
return this;
}
/**
* The delay for the shutdown to occur. Defaults to <tt>1s</tt>.
*/
public NodesShutdownRequest delay(TimeValue delay) {
this.delay = delay;
return this;
}
public TimeValue delay() {
return this.delay;
}
/**
* The delay for the shutdown to occur. Defaults to <tt>1s</tt>.
*/
public NodesShutdownRequest delay(String delay) {
return delay(TimeValue.parseTimeValue(delay, null));
}
/**
* Should the JVM be exited as well or not. Defaults to <tt>true</tt>.
*/
public NodesShutdownRequest exit(boolean exit) {
this.exit = exit;
return this;
}
/**
* Should the JVM be exited as well or not. Defaults to <tt>true</tt>.
*/
public boolean exit() {
return exit;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
delay = readTimeValue(in);
nodesIds = in.readStringArray();
exit = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
delay.writeTo(out);
out.writeStringArrayNullable(nodesIds);
out.writeBoolean(exit);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_NodesShutdownRequest.java |
231 | public class NodeCommandTest
{
private NodeStore nodeStore;
@Rule
public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
@Test
public void shouldSerializeAndDeserializeUnusedRecords() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
NodeRecord after = new NodeRecord( 12, 2, 1 );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeCreatedRecord() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setCreated();
after.setInUse( true );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeUpdatedRecord() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeInlineLabels() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
NodeLabels nodeLabels = parseLabelsField( after );
nodeLabels.add( 1337, nodeStore );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeDynamicRecordLabels() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
NodeLabels nodeLabels = parseLabelsField( after );
for ( int i = 10; i < 100; i++ )
{
nodeLabels.add( i, nodeStore );
}
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeDynamicRecordsRemoved() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
List<DynamicRecord> beforeDyn = asList( dynamicRecord( 0, true, true, -1l, LONG.intValue(), new byte[]{1,2,3,4,5,6,7,8}));
before.setLabelField( dynamicPointer( beforeDyn ), beforeDyn );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
List<DynamicRecord> dynamicRecords = asList( dynamicRecord( 0, false, true, -1l, LONG.intValue(), new byte[]{1,2,3,4,5,6,7,8}));
after.setLabelField( dynamicPointer( dynamicRecords ), dynamicRecords );
// When
Command.NodeCommand cmd = new Command.NodeCommand( null, before, after );
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
cmd.writeToFile( buffer );
Command.NodeCommand result = (Command.NodeCommand) readCommand( null, null, buffer, allocate( 64 ) );
// Then
assertThat( result, equalTo( cmd ) );
assertThat( result.getMode(), equalTo( cmd.getMode() ) );
assertThat( result.getBefore(), equalTo( cmd.getBefore() ) );
assertThat( result.getAfter(), equalTo( cmd.getAfter() ) );
// And dynamic records should be the same
assertThat( result.getBefore().getDynamicLabelRecords(), equalTo( cmd.getBefore().getDynamicLabelRecords()));
assertThat( result.getAfter().getDynamicLabelRecords(), equalTo( cmd.getAfter().getDynamicLabelRecords() ) );
}
private void assertSerializationWorksFor( Command.NodeCommand cmd ) throws IOException
{
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
cmd.writeToFile( buffer );
Command.NodeCommand result = (Command.NodeCommand) readCommand( null, null, buffer, allocate( 64 ) );
// Then
assertThat( result, equalTo( cmd ) );
assertThat( result.getMode(), equalTo( cmd.getMode() ) );
assertThat( result.getBefore(), equalTo( cmd.getBefore() ) );
assertThat( result.getAfter(), equalTo( cmd.getAfter() ) );
// And labels should be the same
assertThat( labels( result.getBefore() ), equalTo( labels( cmd.getBefore() ) ) );
assertThat( labels( result.getAfter() ), equalTo( labels( cmd.getAfter() ) ) );
// And dynamic records should be the same
assertThat( result.getBefore().getDynamicLabelRecords(), equalTo( result.getBefore().getDynamicLabelRecords()));
assertThat( result.getAfter().getDynamicLabelRecords(), equalTo( result.getAfter().getDynamicLabelRecords() ) );
}
private Set<Integer> labels( NodeRecord record )
{
long[] rawLabels = parseLabelsField( record ).get( nodeStore );
Set<Integer> labels = new HashSet<>( rawLabels.length );
for ( long label : rawLabels )
{
labels.add( safeCastLongToInt( label ) );
}
return labels;
}
@Before
public void before() throws Exception
{
@SuppressWarnings("deprecation")
StoreFactory storeFactory = new StoreFactory( new Config(), new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL, new DefaultTxHook() );
File storeFile = new File( "nodestore" );
storeFactory.createNodeStore( storeFile );
nodeStore = storeFactory.newNodeStore( storeFile );
}
@After
public void after() throws Exception
{
nodeStore.close();
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NodeCommandTest.java |
269 | private static class KeyIterationPredicate implements Predicate<KeySlice> {
@Override
public boolean apply(@Nullable KeySlice row) {
return (row != null) && row.getColumns().size() > 0;
}
} | 0true
| titan-cassandra_src_main_java_com_thinkaurelius_titan_diskstorage_cassandra_thrift_CassandraThriftKeyColumnValueStore.java |
1,520 | public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private Closure closure;
private boolean isVertex;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
try {
this.closure = (Closure) engine.eval(context.getConfiguration().get(CLOSURE));
} catch (final ScriptException e) {
throw new IOException(e.getMessage(), e);
}
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
//for (int i = 0; i < value.pathCount(); i++) {
this.closure.call(value);
//}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.IN)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edgesProcessed++;
//for (int i = 0; i < edge.pathCount(); i++) {
this.closure.call(edge);
//}
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_PROCESSED, edgesProcessed);
edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edgesProcessed++;
//for (int i = 0; i < edge.pathCount(); i++) {
this.closure.call(edge);
//}
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
context.write(NullWritable.get(), value);
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_SideEffectMap.java |
1,516 | public final class NodeInitializerFactory {
private static final ILogger LOGGER = Logger.getLogger(NodeInitializerFactory.class);
private static final String FACTORY_ID = "com.hazelcast.NodeInitializer";
private NodeInitializerFactory() {
}
public static NodeInitializer create(ClassLoader classLoader) {
try {
Iterator<NodeInitializer> iter = ServiceLoader.iterator(NodeInitializer.class, FACTORY_ID, classLoader);
while (iter.hasNext()) {
NodeInitializer initializer = iter.next();
if (!(initializer.getClass().equals(DefaultNodeInitializer.class))) {
return initializer;
}
}
} catch (Exception e) {
LOGGER.warning("NodeInitializer could not be instantiated! => "
+ e.getClass().getName() + ": " + e.getMessage());
}
return new DefaultNodeInitializer();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_instance_NodeInitializerFactory.java |
452 | @ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
public class ClusterStatsTests extends ElasticsearchIntegrationTest {
private void assertCounts(ClusterStatsNodes.Counts counts, int total, int masterOnly, int dataOnly, int masterData, int client) {
assertThat(counts.getTotal(), Matchers.equalTo(total));
assertThat(counts.getMasterOnly(), Matchers.equalTo(masterOnly));
assertThat(counts.getDataOnly(), Matchers.equalTo(dataOnly));
assertThat(counts.getMasterData(), Matchers.equalTo(masterData));
assertThat(counts.getClient(), Matchers.equalTo(client));
}
@Test
public void testNodeCounts() {
cluster().startNode();
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 1, 0, 0, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.data", false));
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 2, 1, 0, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.master", false));
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 3, 1, 1, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.client", true));
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 4, 1, 1, 1, 1);
}
private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, int total, int primaries, double replicationFactor) {
assertThat(stats.getIndices(), Matchers.equalTo(indices));
assertThat(stats.getTotal(), Matchers.equalTo(total));
assertThat(stats.getPrimaries(), Matchers.equalTo(primaries));
assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor));
}
@Test
public void testIndicesShardStats() {
cluster().startNode();
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
prepareCreate("test1").setSettings("number_of_shards", 2, "number_of_replicas", 1).get();
ensureYellow();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l));
assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
// add another node, replicas should get assigned
cluster().startNode();
ensureGreen();
index("test1", "type", "1", "f", "f");
refresh(); // make the doc visible
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l));
assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
ensureGreen();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2));
assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5);
assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5));
assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2));
assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3));
assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5));
assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3));
assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4));
assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5));
assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0));
assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0));
}
@Test
public void testValuesSmokeScreen() {
cluster().ensureAtMostNumNodes(5);
cluster().ensureAtLeastNumNodes(1);
SigarService sigarService = cluster().getInstance(SigarService.class);
index("test1", "type", "1", "f", "f");
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getTimestamp(), Matchers.greaterThan(946681200000l)); // 1 Jan 2000
assertThat(response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l));
assertThat(response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l));
assertThat(response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
if (sigarService.sigarAvailable()) {
// We only get those if we have sigar
assertThat(response.nodesStats.getOs().getAvailableProcessors(), Matchers.greaterThan(0));
assertThat(response.nodesStats.getOs().getAvailableMemory().bytes(), Matchers.greaterThan(0l));
assertThat(response.nodesStats.getOs().getCpus().size(), Matchers.greaterThan(0));
}
assertThat(response.nodesStats.getVersions().size(), Matchers.greaterThan(0));
assertThat(response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true));
assertThat(response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0));
assertThat(response.nodesStats.getProcess().count, Matchers.greaterThan(0));
// 0 happens when not supported on platform
assertThat(response.nodesStats.getProcess().getAvgOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(0L));
// these can be -1 if not supported on platform
assertThat(response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
assertThat(response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
}
} | 0true
| src_test_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsTests.java |
2,756 | public interface HttpResponse extends RestResponse {
} | 0true
| src_main_java_org_elasticsearch_http_HttpResponse.java |
3,299 | return new DoubleValues(true) {
final BytesRef bytes = new BytesRef();
int i = Integer.MAX_VALUE;
int valueCount = 0;
@Override
public int setDocument(int docId) {
values.get(docId, bytes);
assert bytes.length % 8 == 0;
i = 0;
return valueCount = bytes.length / 8;
}
@Override
public double nextValue() {
assert i < valueCount;
return ByteUtils.readDoubleLE(bytes.bytes, bytes.offset + i++ * 8);
}
}; | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_BinaryDVNumericAtomicFieldData.java |
75 | @SuppressWarnings("serial")
static final class MapReduceKeysToIntTask<K,V>
extends BulkTask<K,V,Integer> {
final ObjectToInt<? super K> transformer;
final IntByIntToInt reducer;
final int basis;
int result;
MapReduceKeysToIntTask<K,V> rights, nextRight;
MapReduceKeysToIntTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysToIntTask<K,V> nextRight,
ObjectToInt<? super K> transformer,
int basis,
IntByIntToInt reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.basis = basis; this.reducer = reducer;
}
public final Integer getRawResult() { return result; }
public final void compute() {
final ObjectToInt<? super K> transformer;
final IntByIntToInt reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
int r = this.basis;
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysToIntTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, r, reducer)).fork();
}
for (Node<K,V> p; (p = advance()) != null; )
r = reducer.apply(r, transformer.apply(p.key));
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V>
t = (MapReduceKeysToIntTask<K,V>)c,
s = t.rights;
while (s != null) {
t.result = reducer.apply(t.result, s.result);
s = t.rights = s.nextRight;
}
}
}
}
} | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
498 | public final class ClientInvocationServiceImpl implements ClientInvocationService {
private final HazelcastClient client;
private final ClientConnectionManager connectionManager;
private final ConcurrentMap<String, Integer> registrationMap = new ConcurrentHashMap<String, Integer>();
private final ConcurrentMap<String, String> registrationAliasMap = new ConcurrentHashMap<String, String>();
private final Set<ClientCallFuture> failedListeners =
Collections.newSetFromMap(new ConcurrentHashMap<ClientCallFuture, Boolean>());
public ClientInvocationServiceImpl(HazelcastClient client) {
this.client = client;
this.connectionManager = client.getConnectionManager();
}
public <T> ICompletableFuture<T> invokeOnRandomTarget(ClientRequest request) throws Exception {
return send(request);
}
public <T> ICompletableFuture<T> invokeOnTarget(ClientRequest request, Address target) throws Exception {
return send(request, target);
}
public <T> ICompletableFuture<T> invokeOnKeyOwner(ClientRequest request, Object key) throws Exception {
ClientPartitionServiceImpl partitionService = (ClientPartitionServiceImpl) client.getClientPartitionService();
final Address owner = partitionService.getPartitionOwner(partitionService.getPartitionId(key));
if (owner != null) {
return invokeOnTarget(request, owner);
}
return invokeOnRandomTarget(request);
}
public <T> ICompletableFuture<T> invokeOnRandomTarget(ClientRequest request, EventHandler handler) throws Exception {
return sendAndHandle(request, handler);
}
public <T> ICompletableFuture<T> invokeOnTarget(ClientRequest request, Address target, EventHandler handler)
throws Exception {
return sendAndHandle(request, target, handler);
}
public <T> ICompletableFuture<T> invokeOnKeyOwner(ClientRequest request, Object key, EventHandler handler)
throws Exception {
ClientPartitionServiceImpl partitionService = (ClientPartitionServiceImpl) client.getClientPartitionService();
final Address owner = partitionService.getPartitionOwner(partitionService.getPartitionId(key));
if (owner != null) {
return invokeOnTarget(request, owner, handler);
}
return invokeOnRandomTarget(request, handler);
}
// NIO public
public ICompletableFuture send(ClientRequest request, ClientConnection connection) {
request.setSingleConnection();
return doSend(request, connection, null);
}
public Future reSend(ClientCallFuture future) throws Exception {
final ClientConnection connection = connectionManager.tryToConnect(null);
sendInternal(future, connection);
return future;
}
public void registerFailedListener(ClientCallFuture future) {
failedListeners.add(future);
}
public void triggerFailedListeners() {
final Iterator<ClientCallFuture> iterator = failedListeners.iterator();
while (iterator.hasNext()) {
final ClientCallFuture failedListener = iterator.next();
iterator.remove();
failedListener.resend();
}
}
public void registerListener(String uuid, Integer callId) {
registrationAliasMap.put(uuid, uuid);
registrationMap.put(uuid, callId);
}
public void reRegisterListener(String uuid, String alias, Integer callId) {
final String oldAlias = registrationAliasMap.put(uuid, alias);
if (oldAlias != null) {
registrationMap.remove(oldAlias);
registrationMap.put(alias, callId);
}
}
public boolean isRedoOperation() {
return client.getClientConfig().isRedoOperation();
}
public String deRegisterListener(String alias) {
final String uuid = registrationAliasMap.remove(alias);
if (uuid != null) {
final Integer callId = registrationMap.remove(alias);
connectionManager.removeEventHandler(callId);
}
return uuid;
}
//NIO private
private ICompletableFuture send(ClientRequest request) throws Exception {
final ClientConnection connection = connectionManager.tryToConnect(null);
return doSend(request, connection, null);
}
private ICompletableFuture send(ClientRequest request, Address target) throws Exception {
final ClientConnection connection = connectionManager.tryToConnect(target);
return doSend(request, connection, null);
}
private ICompletableFuture sendAndHandle(ClientRequest request, EventHandler handler) throws Exception {
final ClientConnection connection = connectionManager.tryToConnect(null);
return doSend(request, connection, handler);
}
private ICompletableFuture sendAndHandle(ClientRequest request, Address target, EventHandler handler) throws Exception {
final ClientConnection connection = connectionManager.tryToConnect(target);
return doSend(request, connection, handler);
}
private ICompletableFuture doSend(ClientRequest request, ClientConnection connection, EventHandler handler) {
final ClientCallFuture future = new ClientCallFuture(client, request, handler);
sendInternal(future, connection);
return future;
}
private void sendInternal(ClientCallFuture future, ClientConnection connection) {
connection.registerCallId(future);
future.setConnection(connection);
final SerializationService ss = client.getSerializationService();
final Data data = ss.toData(future.getRequest());
if (!connection.write(new DataAdapter(data))) {
final int callId = future.getRequest().getCallId();
connection.deRegisterCallId(callId);
connection.deRegisterEventHandler(callId);
future.notify(new TargetNotMemberException("Address : " + connection.getRemoteEndpoint()));
}
}
} | 1no label
| hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientInvocationServiceImpl.java |
2,993 | public static class Wrapper extends Filter implements CacheKeyFilter {
private final Filter filter;
private final Key key;
public Wrapper(Filter filter, Key key) {
this.filter = filter;
this.key = key;
}
@Override
public Key cacheKey() {
return key;
}
public Filter wrappedFilter() {
return filter;
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
return filter.getDocIdSet(context, acceptDocs);
}
@Override
public int hashCode() {
return filter.hashCode();
}
@Override
public boolean equals(Object obj) {
return filter.equals(obj);
}
@Override
public String toString() {
return filter.toString();
}
} | 0true
| src_main_java_org_elasticsearch_index_cache_filter_support_CacheKeyFilter.java |
1,292 | public class DataPropagationTask implements Callable<Void> {
@Override
public Void call() throws Exception {
Random random = new Random();
final ODatabaseDocumentTx db = new ODatabaseDocumentTx(baseDocumentTx.getURL());
db.open("admin", "admin");
try {
List<ORID> testTwoList = new ArrayList<ORID>();
List<ORID> firstDocs = new ArrayList<ORID>();
OClass classOne = db.getMetadata().getSchema().getClass("TestOne");
OClass classTwo = db.getMetadata().getSchema().getClass("TestTwo");
for (int i = 0; i < 5000; i++) {
ODocument docOne = new ODocument(classOne);
docOne.field("intProp", random.nextInt());
byte[] stringData = new byte[256];
random.nextBytes(stringData);
String stringProp = new String(stringData);
docOne.field("stringProp", stringProp);
Set<String> stringSet = new HashSet<String>();
for (int n = 0; n < 5; n++) {
stringSet.add("str" + random.nextInt());
}
docOne.field("stringSet", stringSet);
docOne.save();
firstDocs.add(docOne.getIdentity());
if (random.nextBoolean()) {
ODocument docTwo = new ODocument(classTwo);
List<String> stringList = new ArrayList<String>();
for (int n = 0; n < 5; n++) {
stringList.add("strnd" + random.nextInt());
}
docTwo.field("stringList", stringList);
docTwo.save();
testTwoList.add(docTwo.getIdentity());
}
if (!testTwoList.isEmpty()) {
int startIndex = random.nextInt(testTwoList.size());
int endIndex = random.nextInt(testTwoList.size() - startIndex) + startIndex;
Map<String, ORID> linkMap = new HashMap<String, ORID>();
for (int n = startIndex; n < endIndex; n++) {
ORID docTwoRid = testTwoList.get(n);
linkMap.put(docTwoRid.toString(), docTwoRid);
}
docOne.field("linkMap", linkMap);
docOne.save();
}
boolean deleteDoc = random.nextDouble() <= 0.2;
if (deleteDoc) {
ORID rid = firstDocs.remove(random.nextInt(firstDocs.size()));
db.delete(rid);
}
}
} finally {
db.close();
}
return null;
}
} | 0true
| core_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedStorageRestoreFromWAL.java |
1,422 | public class MetaDataMappingService extends AbstractComponent {
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final IndicesService indicesService;
// the mutex protect all the refreshOrUpdate variables!
private final Object refreshOrUpdateMutex = new Object();
private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<MappingTask>();
private long refreshOrUpdateInsertOrder;
private long refreshOrUpdateProcessedInsertOrder;
@Inject
public MetaDataMappingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.indicesService = indicesService;
}
static class MappingTask {
final String index;
final String indexUUID;
MappingTask(String index, final String indexUUID) {
this.index = index;
this.indexUUID = indexUUID;
}
}
static class RefreshTask extends MappingTask {
final String[] types;
RefreshTask(String index, final String indexUUID, String[] types) {
super(index, indexUUID);
this.types = types;
}
}
static class UpdateTask extends MappingTask {
final String type;
final CompressedString mappingSource;
final long order; // -1 for unknown
final String nodeId; // null fr unknown
final ClusterStateUpdateListener listener;
UpdateTask(String index, String indexUUID, String type, CompressedString mappingSource, long order, String nodeId, ClusterStateUpdateListener listener) {
super(index, indexUUID);
this.type = type;
this.mappingSource = mappingSource;
this.order = order;
this.nodeId = nodeId;
this.listener = listener;
}
}
/**
* Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much
* as possible so we won't create the same index all the time for example for the updates on the same mapping
* and generate a single cluster change event out of all of those.
*/
ClusterState executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception {
final List<MappingTask> allTasks = new ArrayList<MappingTask>();
synchronized (refreshOrUpdateMutex) {
if (refreshOrUpdateQueue.isEmpty()) {
return currentState;
}
// we already processed this task in a bulk manner in a previous cluster event, simply ignore
// it so we will let other tasks get in and processed ones, we will handle the queued ones
// later on in a subsequent cluster state event
if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
return currentState;
}
allTasks.addAll(refreshOrUpdateQueue);
refreshOrUpdateQueue.clear();
refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
}
if (allTasks.isEmpty()) {
return currentState;
}
// break down to tasks per index, so we can optimize the on demand index service creation
// to only happen for the duration of a single index processing of its respective events
Map<String, List<MappingTask>> tasksPerIndex = Maps.newHashMap();
for (MappingTask task : allTasks) {
if (task.index == null) {
logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
}
List<MappingTask> indexTasks = tasksPerIndex.get(task.index);
if (indexTasks == null) {
indexTasks = new ArrayList<MappingTask>();
tasksPerIndex.put(task.index, indexTasks);
}
indexTasks.add(task);
}
boolean dirty = false;
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {
String index = entry.getKey();
IndexMetaData indexMetaData = mdBuilder.get(index);
if (indexMetaData == null) {
// index got deleted on us, ignore...
logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index);
continue;
}
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
// the latest (based on order) update mapping one per node
List<MappingTask> allIndexTasks = entry.getValue();
List<MappingTask> tasks = new ArrayList<MappingTask>();
for (MappingTask task : allIndexTasks) {
if (!indexMetaData.isSameUUID(task.indexUUID)) {
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
continue;
}
boolean add = true;
// if its an update task, make sure we only process the latest ordered one per node
if (task instanceof UpdateTask) {
UpdateTask uTask = (UpdateTask) task;
// we can only do something to compare if we have the order && node
if (uTask.order != -1 && uTask.nodeId != null) {
for (int i = 0; i < tasks.size(); i++) {
MappingTask existing = tasks.get(i);
if (existing instanceof UpdateTask) {
UpdateTask eTask = (UpdateTask) existing;
if (eTask.type.equals(uTask.type)) {
// if we have the order, and the node id, then we can compare, and replace if applicable
if (eTask.order != -1 && eTask.nodeId != null) {
if (eTask.nodeId.equals(uTask.nodeId) && uTask.order > eTask.order) {
// a newer update task, we can replace so we execute it one!
tasks.set(i, uTask);
add = false;
break;
}
}
}
}
}
}
}
if (add) {
tasks.add(task);
}
}
// construct the actual index if needed, and make sure the relevant mappings are there
boolean removeIndex = false;
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), currentState.nodes().localNode().id());
removeIndex = true;
Set<String> typesToIntroduce = Sets.newHashSet();
for (MappingTask task : tasks) {
if (task instanceof UpdateTask) {
typesToIntroduce.add(((UpdateTask) task).type);
} else if (task instanceof RefreshTask) {
Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
}
}
for (String type : typesToIntroduce) {
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(type)) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source(), false);
}
}
}
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
try {
boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder);
if (indexDirty) {
mdBuilder.put(builder);
dirty = true;
}
} finally {
if (removeIndex) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
// fork sending back updates, so we won't wait to send them back on the cluster state, there
// might be a few of those...
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
for (Object task : allTasks) {
if (task instanceof UpdateTask) {
UpdateTask uTask = (UpdateTask) task;
ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true);
uTask.listener.onResponse(response);
}
}
}
});
if (!dirty) {
return currentState;
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
boolean dirty = false;
String index = indexService.index().name();
// keep track of what we already refreshed, no need to refresh it again...
Set<String> processedRefreshes = Sets.newHashSet();
for (MappingTask task : tasks) {
if (task instanceof RefreshTask) {
RefreshTask refreshTask = (RefreshTask) task;
try {
List<String> updatedTypes = Lists.newArrayList();
for (String type : refreshTask.types) {
if (processedRefreshes.contains(type)) {
continue;
}
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
if (mapper == null) {
continue;
}
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
updatedTypes.add(type);
builder.putMapping(new MappingMetaData(mapper));
}
processedRefreshes.add(type);
}
if (updatedTypes.isEmpty()) {
continue;
}
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
dirty = true;
} catch (Throwable t) {
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
}
} else if (task instanceof UpdateTask) {
UpdateTask updateTask = (UpdateTask) task;
try {
String type = updateTask.type;
CompressedString mappingSource = updateTask.mappingSource;
MappingMetaData mappingMetaData = builder.mapping(type);
if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
continue;
}
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false);
processedRefreshes.add(type);
// if we end up with the same mapping as the original once, ignore
if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
continue;
}
// build the updated mapping source
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
}
builder.putMapping(new MappingMetaData(updatedMapper));
dirty = true;
} catch (Throwable t) {
logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
}
} else {
logger.warn("illegal state, got wrong mapping task type [{}]", task);
}
}
return dirty;
}
/**
* Refreshes mappings if they are not the same between original and parsed version
*/
public void refreshMapping(final String index, final String indexUUID, final String... types) {
final long insertOrder;
synchronized (refreshOrUpdateMutex) {
insertOrder = ++refreshOrUpdateInsertOrder;
refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
}
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
@Override
public void onFailure(String source, Throwable t) {
logger.warn("failure during [{}]", t, source);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return executeRefreshOrUpdate(currentState, insertOrder);
}
});
}
public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final long order, final String nodeId, final ClusterStateUpdateListener listener) {
final long insertOrder;
synchronized (refreshOrUpdateMutex) {
insertOrder = ++refreshOrUpdateInsertOrder;
refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, order, nodeId, listener));
}
clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "], order [" + order + "]", Priority.HIGH, new ClusterStateUpdateTask() {
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(final ClusterState currentState) throws Exception {
return executeRefreshOrUpdate(currentState, insertOrder);
}
});
}
public void removeMapping(final DeleteMappingClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
clusterService.submitStateUpdateTask("remove-mapping [" + Arrays.toString(request.types()) + "]", Priority.HIGH, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
if (request.indices().length == 0) {
throw new IndexMissingException(new Index("_all"));
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
boolean changed = false;
String latestIndexWithout = null;
for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData);
if (indexMetaData != null) {
boolean isLatestIndexWithout = true;
for (String type : request.types()) {
if (indexMetaData.mappings().containsKey(type)) {
indexBuilder.removeMapping(type);
changed = true;
isLatestIndexWithout = false;
}
}
if (isLatestIndexWithout) {
latestIndexWithout = indexMetaData.index();
}
}
builder.put(indexBuilder);
}
if (!changed) {
throw new TypeMissingException(new Index(latestIndexWithout), request.types());
}
logger.info("[{}] remove_mapping [{}]", request.indices(), request.types());
return ClusterState.builder(currentState).metaData(builder).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
}
public void putMapping(final PutMappingClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(final ClusterState currentState) throws Exception {
List<String> indicesToClose = Lists.newArrayList();
try {
for (String index : request.indices()) {
if (!currentState.metaData().hasIndex(index)) {
throw new IndexMissingException(new Index(index));
}
}
// pre create indices here and add mappings to them so we can merge the mappings here if needed
for (String index : request.indices()) {
if (indicesService.hasIndex(index)) {
continue;
}
final IndexMetaData indexMetaData = currentState.metaData().index(index);
IndexService indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), clusterService.localNode().id());
indicesToClose.add(indexMetaData.index());
// make sure to add custom default mapping if exists
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false);
}
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(request.type())) {
indexService.mapperService().merge(request.type(), indexMetaData.mappings().get(request.type()).source(), false);
}
}
Map<String, DocumentMapper> newMappers = newHashMap();
Map<String, DocumentMapper> existingMappers = newHashMap();
for (String index : request.indices()) {
IndexService indexService = indicesService.indexService(index);
if (indexService != null) {
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), false);
} else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()));
if (existingMapper != null) {
// first, simulate
DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper, mergeFlags().simulate(true));
// if we have conflicts, and we are not supposed to ignore them, throw an exception
if (!request.ignoreConflicts() && mergeResult.hasConflicts()) {
throw new MergeMappingException(mergeResult.conflicts());
}
}
}
newMappers.put(index, newMapper);
if (existingMapper != null) {
existingMappers.put(index, existingMapper);
}
} else {
throw new IndexMissingException(new Index(index));
}
}
String mappingType = request.type();
if (mappingType == null) {
mappingType = newMappers.values().iterator().next().type();
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
final Map<String, MappingMetaData> mappings = newHashMap();
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
String index = entry.getKey();
// do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index);
CompressedString existingSource = null;
if (existingMappers.containsKey(entry.getKey())) {
existingSource = existingMappers.get(entry.getKey()).mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false);
CompressedString updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
if (existingSource.equals(updatedSource)) {
// same source, no changes, ignore it
} else {
// use the merged mapping source
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
}
}
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
}
}
}
if (mappings.isEmpty()) {
// no changes, return
return currentState;
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
if (indexMetaData == null) {
throw new IndexMissingException(new Index(indexName));
}
MappingMetaData mappingMd = mappings.get(indexName);
if (mappingMd != null) {
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
}
}
return ClusterState.builder(currentState).metaData(builder).build();
} finally {
for (String index : indicesToClose) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaDataMappingService.java |
1,259 | public class OClusterLocal extends OSharedResourceAdaptive implements OCluster {
public static final int RECORD_SIZE = 11 + OVersionFactory.instance().getVersionSize();
public static final String TYPE = "PHYSICAL";
private static final int RECORD_TYPE_OFFSET = 10;
private static final String DEF_EXTENSION = ".ocl";
private static final int DEF_SIZE = 1000000;
private static final byte RECORD_WAS_DELETED = (byte) -1;
private OMultiFileSegment fileSegment;
private int id;
private long beginOffsetData = -1;
private long endOffsetData = -1; // end of
// data
// offset.
// -1 =
// latest
protected OClusterLocalHole holeSegment;
private OStoragePhysicalClusterConfigurationLocal config;
private OStorageLocal storage;
private String name;
private long version;
public OClusterLocal() {
super(OGlobalConfiguration.ENVIRONMENT_CONCURRENT.getValueAsBoolean());
}
public void configure(final OStorage iStorage, OStorageClusterConfiguration iConfig) throws IOException {
config = (OStoragePhysicalClusterConfigurationLocal) iConfig;
init(iStorage, config.getId(), config.getName(), config.getLocation(), config.getDataSegmentId());
}
public void configure(final OStorage iStorage, final int iId, final String iClusterName, final String iLocation,
final int iDataSegmentId, final Object... iParameters) throws IOException {
config = new OStoragePhysicalClusterConfigurationLocal(iStorage.getConfiguration(), iId, iDataSegmentId);
config.name = iClusterName;
init(iStorage, iId, iClusterName, iLocation, iDataSegmentId);
}
public void create(int iStartSize) throws IOException {
acquireExclusiveLock();
try {
if (iStartSize == -1)
iStartSize = DEF_SIZE;
if (config.root.clusters.size() <= config.id)
config.root.clusters.add(config);
else
config.root.clusters.set(config.id, config);
fileSegment.create(iStartSize);
holeSegment.create();
fileSegment.files[0].writeHeaderLong(0, beginOffsetData);
fileSegment.files[0].writeHeaderLong(OBinaryProtocol.SIZE_LONG, beginOffsetData);
fileSegment.files[0].writeHeaderLong(2 * OBinaryProtocol.SIZE_LONG, 2);
} finally {
releaseExclusiveLock();
}
}
public void open() throws IOException {
acquireExclusiveLock();
try {
fileSegment.open();
holeSegment.open();
beginOffsetData = fileSegment.files[0].readHeaderLong(0);
endOffsetData = fileSegment.files[0].readHeaderLong(OBinaryProtocol.SIZE_LONG);
version = fileSegment.files[0].readHeaderLong(2 * OBinaryProtocol.SIZE_LONG);
if (version < 1) {
convertDeletedRecords();
}
if (version < 2) {
if (endOffsetData < 0) {
endOffsetData = fileSegment.getFilledUpTo() / RECORD_SIZE - 1;
if (endOffsetData >= 0) {
long[] fetchPos;
for (long currentPos = endOffsetData * RECORD_SIZE; currentPos >= beginOffsetData; currentPos -= RECORD_SIZE) {
fetchPos = fileSegment.getRelativePosition(currentPos);
if (fileSegment.files[(int) fetchPos[0]].readByte(fetchPos[1] + RECORD_TYPE_OFFSET) != RECORD_WAS_DELETED)
// GOOD RECORD: SET IT AS BEGIN
break;
endOffsetData--;
}
}
fileSegment.files[0].writeHeaderLong(OBinaryProtocol.SIZE_LONG, endOffsetData);
}
fileSegment.files[0].writeHeaderLong(2 * OBinaryProtocol.SIZE_LONG, 2);
}
} finally {
releaseExclusiveLock();
}
}
private void convertDeletedRecords() throws IOException {
int holesCount = holeSegment.getHoles();
OLogManager.instance().info(this, "Please wait till %d holes will be converted to new format in cluster %s.", holesCount, name);
for (int i = 0; i < holesCount; i++) {
long relativeHolePosition = holeSegment.getEntryPosition(i);
if (relativeHolePosition > -1) {
final long[] pos = fileSegment.getRelativePosition(relativeHolePosition);
final OFile f = fileSegment.files[(int) pos[0]];
final long p = pos[1] + RECORD_TYPE_OFFSET;
f.writeByte(p, RECORD_WAS_DELETED);
}
if (i % 1000 == 0)
OLogManager.instance().info(this, "%d holes were converted in cluster %s ...", i, name);
}
OLogManager.instance().info(this, "Conversion of holes to new format was finished for cluster %s.", holesCount, name);
}
public void close() throws IOException {
acquireExclusiveLock();
try {
fileSegment.close();
holeSegment.close();
} finally {
releaseExclusiveLock();
}
}
@Override
public void close(boolean flush) throws IOException {
close();
}
@Override
public OModificationLock getExternalModificationLock() {
throw new UnsupportedOperationException("getExternalModificationLock");
}
@Override
public OPhysicalPosition createRecord(byte[] content, ORecordVersion recordVersion, byte recordType) throws IOException {
throw new UnsupportedOperationException("createRecord");
}
@Override
public boolean deleteRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("deleteRecord");
}
@Override
public void updateRecord(OClusterPosition clusterPosition, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
throw new UnsupportedOperationException("updateRecord");
}
@Override
public ORawBuffer readRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("readRecord");
}
@Override
public boolean exists() {
throw new UnsupportedOperationException("exists");
}
public void delete() throws IOException {
acquireExclusiveLock();
try {
for (OFile f : fileSegment.files)
f.delete();
fileSegment.files = null;
holeSegment.delete();
} finally {
releaseExclusiveLock();
}
}
public void truncate() throws IOException {
storage.checkForClusterPermissions(getName());
acquireExclusiveLock();
try {
// REMOVE ALL DATA BLOCKS
final OClusterPosition begin = getFirstPosition();
if (begin.isPersistent()) {
final OClusterPosition end = getLastPosition();
final OPhysicalPosition ppos = new OPhysicalPosition();
for (ppos.clusterPosition = begin; ppos.clusterPosition.compareTo(end) <= 0; ppos.clusterPosition = ppos.clusterPosition
.inc()) {
final OPhysicalPosition pposToDelete = getPhysicalPosition(ppos);
if (pposToDelete != null && storage.checkForRecordValidity(pposToDelete)) {
final ODataLocal data = storage.getDataSegmentById(pposToDelete.dataSegmentId);
if (data != null)
data.deleteRecord(pposToDelete.dataSegmentPos);
}
}
}
fileSegment.truncate();
holeSegment.truncate();
beginOffsetData = -1;
endOffsetData = -1;
} finally {
releaseExclusiveLock();
}
}
public void set(ATTRIBUTES iAttribute, Object iValue) throws IOException {
if (iAttribute == null)
throw new IllegalArgumentException("attribute is null");
final String stringValue = iValue != null ? iValue.toString() : null;
acquireExclusiveLock();
try {
switch (iAttribute) {
case NAME:
setNameInternal(stringValue);
break;
case DATASEGMENT:
setDataSegmentInternal(stringValue);
break;
}
} finally {
releaseExclusiveLock();
}
}
/**
* Fills and return the PhysicalPosition object received as parameter with the physical position of logical record iPosition
*
* @throws IOException
*/
public OPhysicalPosition getPhysicalPosition(final OPhysicalPosition iPPosition) throws IOException {
final OClusterPosition position = iPPosition.clusterPosition;
final long filePosition = position.longValue() * RECORD_SIZE;
acquireSharedLock();
try {
if (position.isNew() || position.compareTo(getLastPosition()) > 0)
return null;
final long[] pos = fileSegment.getRelativePosition(filePosition);
final OFile f = fileSegment.files[(int) pos[0]];
long p = pos[1];
iPPosition.dataSegmentId = f.readShort(p);
iPPosition.dataSegmentPos = f.readLong(p += OBinaryProtocol.SIZE_SHORT);
iPPosition.recordType = f.readByte(p += OBinaryProtocol.SIZE_LONG);
if (iPPosition.recordType == RECORD_WAS_DELETED)
return null;
p += OBinaryProtocol.SIZE_BYTE;
iPPosition.recordVersion.getSerializer().readFrom(f, p, iPPosition.recordVersion);
return iPPosition;
} finally {
releaseSharedLock();
}
}
@Override
public boolean useWal() {
return false;
}
@Override
public float recordGrowFactor() {
return 1;
}
@Override
public float recordOverflowGrowFactor() {
return 1;
}
@Override
public String compression() {
return ONothingCompression.NAME;
}
/**
* Update position in data segment (usually on defrag)
*
* @throws IOException
*/
public void updateDataSegmentPosition(OClusterPosition iPosition, final int iDataSegmentId, final long iDataSegmentPosition)
throws IOException {
long position = iPosition.longValue();
position = position * RECORD_SIZE;
acquireExclusiveLock();
try {
final long[] pos = fileSegment.getRelativePosition(position);
final OFile f = fileSegment.files[(int) pos[0]];
long p = pos[1];
f.writeShort(p, (short) iDataSegmentId);
f.writeLong(p += OBinaryProtocol.SIZE_SHORT, iDataSegmentPosition);
} finally {
releaseExclusiveLock();
}
}
public void updateVersion(OClusterPosition iPosition, final ORecordVersion iVersion) throws IOException {
long position = iPosition.longValue();
position = position * RECORD_SIZE;
acquireExclusiveLock();
try {
final long[] pos = fileSegment.getRelativePosition(position);
iVersion.getSerializer().writeTo(fileSegment.files[(int) pos[0]],
pos[1] + OBinaryProtocol.SIZE_SHORT + OBinaryProtocol.SIZE_LONG + OBinaryProtocol.SIZE_BYTE, iVersion);
} finally {
releaseExclusiveLock();
}
}
public void updateRecordType(OClusterPosition iPosition, final byte iRecordType) throws IOException {
long position = iPosition.longValue();
position = position * RECORD_SIZE;
acquireExclusiveLock();
try {
final long[] pos = fileSegment.getRelativePosition(position);
fileSegment.files[(int) pos[0]].writeByte(pos[1] + OBinaryProtocol.SIZE_SHORT + OBinaryProtocol.SIZE_LONG, iRecordType);
} finally {
releaseExclusiveLock();
}
}
/**
* Removes the Logical position entry. Add to the hole segment and add the minus to the version.
*
* @throws IOException
*/
public void removePhysicalPosition(final OClusterPosition iPosition) throws IOException {
final long position = iPosition.longValue() * RECORD_SIZE;
acquireExclusiveLock();
try {
final long[] pos = fileSegment.getRelativePosition(position);
final OFile file = fileSegment.files[(int) pos[0]];
final long p = pos[1] + RECORD_TYPE_OFFSET;
holeSegment.pushPosition(position);
file.writeByte(p, RECORD_WAS_DELETED);
updateBoundsAfterDeletion(iPosition.longValue());
} finally {
releaseExclusiveLock();
}
}
public boolean removeHole(final long iPosition) throws IOException {
acquireExclusiveLock();
try {
return holeSegment.removeEntryWithPosition(iPosition * RECORD_SIZE);
} finally {
releaseExclusiveLock();
}
}
public int getDataSegmentId() {
acquireSharedLock();
try {
return config.getDataSegmentId();
} finally {
releaseSharedLock();
}
}
/**
* Adds a new entry.
*
* @throws IOException
*/
public boolean addPhysicalPosition(final OPhysicalPosition iPPosition) throws IOException {
final long[] pos;
long offset;
acquireExclusiveLock();
try {
offset = holeSegment.popLastEntryPosition();
boolean recycled;
if (offset > -1) {
// REUSE THE HOLE
pos = fileSegment.getRelativePosition(offset);
recycled = true;
} else {
// NO HOLES FOUND: ALLOCATE MORE SPACE
pos = allocateRecord();
offset = fileSegment.getAbsolutePosition(pos);
recycled = false;
}
final OFile file = fileSegment.files[(int) pos[0]];
long p = pos[1];
file.writeShort(p, (short) iPPosition.dataSegmentId);
file.writeLong(p += OBinaryProtocol.SIZE_SHORT, iPPosition.dataSegmentPos);
file.writeByte(p += OBinaryProtocol.SIZE_LONG, iPPosition.recordType);
if (recycled) {
// GET LAST VERSION
iPPosition.recordVersion.getSerializer().readFrom(file, p + OBinaryProtocol.SIZE_BYTE, iPPosition.recordVersion);
if (iPPosition.recordVersion.isTombstone())
iPPosition.recordVersion.revive();
iPPosition.recordVersion.increment();
} else
iPPosition.recordVersion.reset();
iPPosition.recordVersion.getSerializer().writeTo(file, p + OBinaryProtocol.SIZE_BYTE, iPPosition.recordVersion);
iPPosition.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(offset / RECORD_SIZE);
updateBoundsAfterInsertion(iPPosition.clusterPosition.longValue());
} finally {
releaseExclusiveLock();
}
return true;
}
/**
* Allocates space to store a new record.
*/
protected long[] allocateRecord() throws IOException {
return fileSegment.allocateSpace(RECORD_SIZE);
}
@Override
public OClusterPosition getFirstPosition() {
acquireSharedLock();
try {
return OClusterPositionFactory.INSTANCE.valueOf(beginOffsetData);
} finally {
releaseSharedLock();
}
}
/**
* Returns the endOffsetData value if it's not equals to the last one, otherwise the total entries.
*/
@Override
public OClusterPosition getLastPosition() {
acquireSharedLock();
try {
return OClusterPositionFactory.INSTANCE.valueOf(endOffsetData);
} finally {
releaseSharedLock();
}
}
public long getEntries() {
acquireSharedLock();
try {
return fileSegment.getFilledUpTo() / RECORD_SIZE - holeSegment.getHoles();
} finally {
releaseSharedLock();
}
}
@Override
public long getTombstonesCount() {
return 0;
}
@Override
public void convertToTombstone(OClusterPosition iPosition) throws IOException {
throw new UnsupportedOperationException("convertToTombstone");
}
@Override
public boolean hasTombstonesSupport() {
return false;
}
public int getId() {
return id;
}
public OClusterEntryIterator absoluteIterator() {
return new OClusterEntryIterator(this);
}
public long getSize() {
acquireSharedLock();
try {
return fileSegment.getSize();
} finally {
releaseSharedLock();
}
}
public long getFilledUpTo() {
acquireSharedLock();
try {
return fileSegment.getFilledUpTo();
} finally {
releaseSharedLock();
}
}
@Override
public String toString() {
return name + " (id=" + id + ")";
}
public void lock() {
acquireSharedLock();
}
public void unlock() {
releaseSharedLock();
}
public String getType() {
return TYPE;
}
public boolean isHashBased() {
return false;
}
public long getRecordsSize() {
acquireSharedLock();
try {
long size = fileSegment.getFilledUpTo();
final OClusterEntryIterator it = absoluteIterator();
while (it.hasNext()) {
final OPhysicalPosition pos = it.next();
if (pos.dataSegmentPos > -1 && !pos.recordVersion.isTombstone())
size += storage.getDataSegmentById(pos.dataSegmentId).getRecordSize(pos.dataSegmentPos);
}
return size;
} catch (IOException e) {
throw new OIOException("Error on calculating cluster size for: " + getName(), e);
} finally {
releaseSharedLock();
}
}
public void synch() throws IOException {
acquireSharedLock();
try {
fileSegment.synch();
holeSegment.synch();
} finally {
releaseSharedLock();
}
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
acquireExclusiveLock();
try {
fileSegment.setSoftlyClosed(softlyClosed);
holeSegment.setSoftlyClosed(softlyClosed);
} finally {
releaseExclusiveLock();
}
}
@Override
public boolean wasSoftlyClosed() throws IOException {
acquireSharedLock();
try {
boolean wasSoftlyClosed = fileSegment.wasSoftlyClosedAtPreviousTime();
wasSoftlyClosed = wasSoftlyClosed && holeSegment.wasSoftlyClosedAtPreviousTime();
return wasSoftlyClosed;
} finally {
releaseSharedLock();
}
}
public String getName() {
return name;
}
public OStoragePhysicalClusterConfiguration getConfig() {
return config;
}
private void setNameInternal(final String iNewName) {
if (storage.getClusterIdByName(iNewName) > -1)
throw new IllegalArgumentException("Cluster with name '" + iNewName + "' already exists");
for (int i = 0; i < fileSegment.files.length; i++) {
final String osFileName = fileSegment.files[i].getName();
if (osFileName.startsWith(name)) {
final File newFile = new File(storage.getStoragePath() + "/" + iNewName
+ osFileName.substring(osFileName.lastIndexOf(name) + name.length()));
for (OStorageFileConfiguration conf : config.infoFiles) {
if (conf.parent.name.equals(name))
conf.parent.name = iNewName;
if (conf.path.endsWith(osFileName))
conf.path = new String(conf.path.replace(osFileName, newFile.getName()));
}
boolean renamed = fileSegment.files[i].renameTo(newFile);
while (!renamed) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
renamed = fileSegment.files[i].renameTo(newFile);
}
}
}
config.name = iNewName;
holeSegment.rename(name, iNewName);
storage.renameCluster(name, iNewName);
name = iNewName;
storage.getConfiguration().update();
}
/**
* Assigns a different data-segment id.
*
* @param iName
* Data-segment's name
*/
private void setDataSegmentInternal(final String iName) {
final int dataId = storage.getDataSegmentIdByName(iName);
config.setDataSegmentId(dataId);
storage.getConfiguration().update();
}
protected void updateBoundsAfterInsertion(final long iPosition) throws IOException {
if (iPosition < beginOffsetData || beginOffsetData == -1) {
// UPDATE END OF DATA
beginOffsetData = iPosition;
fileSegment.files[0].writeHeaderLong(0, beginOffsetData);
}
if (iPosition > endOffsetData) {
// UPDATE END OF DATA
endOffsetData = iPosition;
fileSegment.files[0].writeHeaderLong(OBinaryProtocol.SIZE_LONG, endOffsetData);
}
}
protected void updateBoundsAfterDeletion(final long iPosition) throws IOException {
final long position = iPosition * RECORD_SIZE;
if (iPosition == beginOffsetData) {
if (getEntries() == 0)
beginOffsetData = -1;
else {
// DISCOVER THE BEGIN OF DATA
beginOffsetData++;
long[] fetchPos;
for (long currentPos = position + RECORD_SIZE; currentPos < fileSegment.getFilledUpTo(); currentPos += RECORD_SIZE) {
fetchPos = fileSegment.getRelativePosition(currentPos);
if (fileSegment.files[(int) fetchPos[0]].readByte(fetchPos[1] + RECORD_TYPE_OFFSET) != RECORD_WAS_DELETED)
// GOOD RECORD: SET IT AS BEGIN
break;
beginOffsetData++;
}
}
fileSegment.files[0].writeHeaderLong(0, beginOffsetData);
}
if (iPosition == endOffsetData) {
if (getEntries() == 0)
endOffsetData = -1;
else {
// DISCOVER THE END OF DATA
endOffsetData--;
long[] fetchPos;
for (long currentPos = position - RECORD_SIZE; currentPos >= beginOffsetData; currentPos -= RECORD_SIZE) {
fetchPos = fileSegment.getRelativePosition(currentPos);
if (fileSegment.files[(int) fetchPos[0]].readByte(fetchPos[1] + RECORD_TYPE_OFFSET) != RECORD_WAS_DELETED)
// GOOD RECORD: SET IT AS BEGIN
break;
endOffsetData--;
}
}
fileSegment.files[0].writeHeaderLong(OBinaryProtocol.SIZE_LONG, endOffsetData);
}
}
protected void init(final OStorage iStorage, final int iId, final String iClusterName, final String iLocation,
final int iDataSegmentId, final Object... iParameters) throws IOException {
OFileUtils.checkValidName(iClusterName);
storage = (OStorageLocal) iStorage;
config.setDataSegmentId(iDataSegmentId);
config.id = iId;
config.name = iClusterName;
name = iClusterName;
id = iId;
if (fileSegment == null) {
fileSegment = new OMultiFileSegment(storage, config, DEF_EXTENSION, RECORD_SIZE);
config.setHoleFile(new OStorageClusterHoleConfiguration(config, OStorageVariableParser.DB_PATH_VARIABLE + "/" + config.name,
config.fileType, config.fileMaxSize));
holeSegment = new OClusterLocalHole(this, storage, config.getHoleFile());
}
}
public boolean isSoftlyClosed() {
// Look over files of the cluster
if (!fileSegment.wasSoftlyClosedAtPreviousTime())
return false;
// Look over the hole segment
if (!holeSegment.wasSoftlyClosedAtPreviousTime())
return false;
// Look over files of the corresponding data segment
final ODataLocal dataSegment = storage.getDataSegmentById(config.getDataSegmentId());
if (!dataSegment.wasSoftlyClosedAtPreviousTime())
return false;
// Look over the hole segment
if (!dataSegment.holeSegment.wasSoftlyClosedAtPreviousTime())
return false;
return true;
}
@Override
public OPhysicalPosition[] higherPositions(OPhysicalPosition position) throws IOException {
long filePosition = position.clusterPosition.longValue() * RECORD_SIZE;
if (filePosition < 0)
filePosition = 0;
acquireSharedLock();
try {
if (getFirstPosition().longValue() < 0)
return new OPhysicalPosition[0];
final long lastFilePosition = getLastPosition().longValue() * RECORD_SIZE;
if (filePosition >= lastFilePosition)
return new OPhysicalPosition[0];
byte recordType;
OFile f;
long[] pos;
do {
filePosition += RECORD_SIZE;
pos = fileSegment.getRelativePosition(filePosition);
f = fileSegment.files[(int) pos[0]];
long p = pos[1] + RECORD_TYPE_OFFSET;
recordType = f.readByte(p);
} while (recordType == RECORD_WAS_DELETED && filePosition < lastFilePosition);
if (recordType == RECORD_WAS_DELETED) {
return new OPhysicalPosition[0];
} else {
long p = pos[1];
final OPhysicalPosition physicalPosition = readPhysicalPosition(f, p);
return new OPhysicalPosition[] { physicalPosition };
}
} finally {
releaseSharedLock();
}
}
private OPhysicalPosition readPhysicalPosition(OFile f, long p) throws IOException {
final OPhysicalPosition physicalPosition = new OPhysicalPosition();
physicalPosition.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(p / RECORD_SIZE);
physicalPosition.dataSegmentId = f.readShort(p);
physicalPosition.dataSegmentPos = f.readLong(p += OBinaryProtocol.SIZE_SHORT);
physicalPosition.recordType = f.readByte(p += OBinaryProtocol.SIZE_LONG);
physicalPosition.recordVersion.getSerializer().readFrom(f, p += OBinaryProtocol.SIZE_BYTE, physicalPosition.recordVersion);
return physicalPosition;
}
@Override
public OPhysicalPosition[] ceilingPositions(OPhysicalPosition position) throws IOException {
long filePosition = position.clusterPosition.longValue() * RECORD_SIZE;
if (filePosition < 0)
filePosition = 0;
acquireSharedLock();
try {
if (getFirstPosition().longValue() < 0)
return new OPhysicalPosition[0];
final long lastFilePosition = getLastPosition().longValue() * RECORD_SIZE;
if (filePosition > lastFilePosition)
return new OPhysicalPosition[0];
byte recordType;
OFile f;
long[] pos;
do {
pos = fileSegment.getRelativePosition(filePosition);
f = fileSegment.files[(int) pos[0]];
long p = pos[1] + RECORD_TYPE_OFFSET;
recordType = f.readByte(p);
filePosition += RECORD_SIZE;
} while (recordType == RECORD_WAS_DELETED && filePosition <= lastFilePosition);
if (recordType == RECORD_WAS_DELETED) {
return new OPhysicalPosition[0];
} else {
long p = pos[1];
final OPhysicalPosition physicalPosition = readPhysicalPosition(f, p);
return new OPhysicalPosition[] { physicalPosition };
}
} finally {
releaseSharedLock();
}
}
@Override
public OPhysicalPosition[] lowerPositions(OPhysicalPosition position) throws IOException {
long filePosition = position.clusterPosition.longValue() * RECORD_SIZE;
acquireSharedLock();
try {
long firstFilePosition = getFirstPosition().longValue() * RECORD_SIZE;
if (filePosition <= firstFilePosition)
return new OPhysicalPosition[0];
byte recordType;
long[] pos;
OFile f;
do {
filePosition -= RECORD_SIZE;
pos = fileSegment.getRelativePosition(filePosition);
f = fileSegment.files[(int) pos[0]];
long p = pos[1] + RECORD_TYPE_OFFSET;
recordType = f.readByte(p);
} while (recordType == RECORD_WAS_DELETED && filePosition > firstFilePosition);
if (recordType == RECORD_WAS_DELETED) {
return new OPhysicalPosition[0];
} else {
long p = pos[1];
final OPhysicalPosition physicalPosition = readPhysicalPosition(f, p);
return new OPhysicalPosition[] { physicalPosition };
}
} finally {
releaseSharedLock();
}
}
@Override
public OPhysicalPosition[] floorPositions(OPhysicalPosition position) throws IOException {
long filePosition = position.clusterPosition.longValue() * RECORD_SIZE;
acquireSharedLock();
try {
long firstFilePosition = getFirstPosition().longValue() * RECORD_SIZE;
if (filePosition <= firstFilePosition)
return new OPhysicalPosition[0];
byte recordType;
long[] pos;
OFile f;
do {
pos = fileSegment.getRelativePosition(filePosition);
f = fileSegment.files[(int) pos[0]];
long p = pos[1] + RECORD_TYPE_OFFSET;
recordType = f.readByte(p);
filePosition -= RECORD_SIZE;
} while (recordType == RECORD_WAS_DELETED && filePosition >= firstFilePosition);
if (recordType == RECORD_WAS_DELETED) {
return new OPhysicalPosition[0];
} else {
long p = pos[1];
final OPhysicalPosition physicalPosition = readPhysicalPosition(f, p);
return new OPhysicalPosition[] { physicalPosition };
}
} finally {
releaseSharedLock();
}
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OClusterLocal.java |
7 | @Component("blCustomerPasswordCustomPersistenceHandler")
public class CustomerPasswordCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
@Resource(name="blCustomerService")
protected CustomerService customerService;
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
String[] customCriteria = persistencePackage.getCustomCriteria();
return customCriteria != null && customCriteria.length > 0 && customCriteria[0].equals("passwordUpdate");
}
@Override
public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
Customer customer = customerService.readCustomerByUsername(entity.findProperty("username").getValue());
if (StringUtils.isEmpty(customer.getEmailAddress())) {
throw new ServiceException("Unable to update password because an email address is not available for this customer. An email address is required to send the customer the new system generated password.");
}
PasswordReset passwordReset = new PasswordReset();
passwordReset.setUsername(entity.findProperty("username").getValue());
passwordReset.setPasswordChangeRequired(false);
passwordReset.setEmail(customer.getEmailAddress());
passwordReset.setPasswordLength(22);
passwordReset.setSendResetEmailReliableAsync(false);
customer = customerService.resetPassword(passwordReset);
return entity;
}
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_handler_CustomerPasswordCustomPersistenceHandler.java |
477 | public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder<GetAliasesResponse, GetAliasesRequestBuilder> {
public GetAliasesRequestBuilder(IndicesAdminClient client, String... aliases) {
super(client, aliases);
}
@Override
protected void doExecute(ActionListener<GetAliasesResponse> listener) {
((IndicesAdminClient) client).getAliases(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_alias_get_GetAliasesRequestBuilder.java |
2,882 | public class LetterTokenizerFactory extends AbstractTokenizerFactory {
@Inject
public LetterTokenizerFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
}
@Override
public Tokenizer create(Reader reader) {
return new LetterTokenizer(version, reader);
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_LetterTokenizerFactory.java |
5 | public class AbbreviationsManager {
/** A regular expression used to separate alternative abbreviations. (\s == any whitespace) */
private static final Pattern ABBREVIATION_SEPARATOR = Pattern.compile("\\s*\\|\\s*");
/** A regular expression used to separate words. */
private static final Pattern WORD_SEPARATOR = Pattern.compile("\\s+");
private Map<String, List<String>> abbreviations = new HashMap<String, List<String>>();
/**
* Creates a new abbreviations manager configured with a set of abbreviation
* properties. Abbreviation properties are of the form:
* <pre>
* phrase = alt1 | alt2 | ...
* </pre>
* Whitespace around the "=" and "|" separators is removed. The phrase is
* converted to lower case, but the alternatives are used verbatim.
*
* @param abbreviationProperties the abbreviation properties
*/
public AbbreviationsManager(Properties abbreviationProperties) {
@SuppressWarnings("unchecked")
Enumeration<String> e = (Enumeration<String>) abbreviationProperties.propertyNames();
while (e.hasMoreElements()) {
String phrase = e.nextElement();
String lcPhrase = phrase.toLowerCase();
String[] alternatives = ABBREVIATION_SEPARATOR.split(abbreviationProperties.getProperty(phrase).trim());
List<String> abbreviationsForPhrase = new ArrayList<String>(Arrays.asList(alternatives));
Collections.sort(abbreviationsForPhrase, new Comparator<String>() {
@Override
public int compare(String o1, String o2) {
return o1.length() - o2.length();
}
});
abbreviations.put(lcPhrase, abbreviationsForPhrase);
}
}
/**
* Gets the alternative abbreviations for a phrase. The original phrase is always the
* the first alternative returned. If no abbreviations are found for the phrase, returns
* a list with one element, the original phrase. The phrase is converted to lower case
* before looking up its alternatives.
*
* @param phrase the phrase to abbreviate
* @return a list of alternative abbreviations, with the original phrase as the first element
*/
public List<String> getAlternatives(String phrase) {
List<String> result = new ArrayList<String>();
result.add(phrase);
List<String> alternatives = abbreviations.get(phrase.toLowerCase());
if (alternatives != null) {
result.addAll(alternatives);
}
return result;
}
/**
* Finds the phrases within a string that can be abbreviated, and returns
* a structure with those phrases and the alternatives for each phrase.
* A phrase is a sequence of one or more words in the original string, where
* words are delimited by whitespace. At each point in the original string,
* the longest phrase for which there are abbreviations is found.
*
* @param s the string to find abbreviations for
* @return a structure describing the available abbreviations
*/
public Abbreviations getAbbreviations(String s) {
AbbreviationsImpl abbrev = new AbbreviationsImpl(s);
List<String> phrases = getPhrasesWithAbbreviations(s);
for (String phrase : phrases) {
abbrev.addPhrase(phrase, getAlternatives(phrase));
}
return abbrev;
}
/**
* Constructs a partition of a string into phrases, along word boundaries,
* where each phrase has one or more alternative abbreviations, and each
* phrase is the longest match against the abbreviations at that position
* in the original string.
*
* @param s the original string to partition into phrases
* @return a list of phrases
*/
private List<String> getPhrasesWithAbbreviations(String s) {
int phraseStart = 0;
List<String> phrasesWithAbbreviations = new ArrayList<String>();
Matcher wordBoundary = WORD_SEPARATOR.matcher(s);
while (phraseStart < s.length()) {
int phraseLength = getLongestPhraseLength(s.substring(phraseStart));
phrasesWithAbbreviations.add(s.substring(phraseStart, phraseStart + phraseLength));
if (wordBoundary.find(phraseStart + phraseLength)) {
phraseStart = wordBoundary.end();
} else {
phraseStart = s.length();
}
}
return phrasesWithAbbreviations;
}
/**
* Finds the longest phrase within a string that has abbreviations. The first word
* is always a possibility, even if no alternatives exist to that word.
*
* @param s the string for which to find the longest phrase with alternatives
* @return the length of the longest phrase with alternative abbreviations
*/
private int getLongestPhraseLength(String s) {
// If the entire string matches, then it is obviously the longest matching phrase.
if (abbreviations.containsKey(s.toLowerCase())) {
return s.length();
}
Matcher wordBoundary = WORD_SEPARATOR.matcher(s);
if (!wordBoundary.find()) {
// No word boundaries found. Entire string is only possible phrase.
return s.length();
}
// First word is always an abbreviation candidate, perhaps with no
// alternatives but itself.
int longestMatchLength = wordBoundary.start();
while (wordBoundary.find()) {
if (abbreviations.containsKey(s.substring(0, wordBoundary.start()).toLowerCase())) {
longestMatchLength = wordBoundary.start();
}
}
return longestMatchLength;
}
} | 0true
| tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationsManager.java |
1,459 | public class OCommandExecutorSQLDeleteEdgeTest {
private ODatabaseDocumentTx db;
private ORID folderId1;
private ORID folderId2;
private ORID userId1;
private ORID userId2;
@BeforeClass
public void init() throws Exception {
db = Orient.instance().getDatabaseFactory().createDatabase("graph", "local:target/OCommandExecutorSQLDeleteEdgeTest");
if (db.exists()) {
db.open("admin", "admin");
db.drop();
}
db.create();
final OSchema schema = db.getMetadata().getSchema();
schema.createClass("User", schema.getClass("V"));
schema.createClass("Folder", schema.getClass("V"));
schema.createClass("CanAccess", schema.getClass("E"));
}
@AfterClass
public void tearDown() throws Exception {
db.drop();
}
@BeforeMethod
public void setUp() throws Exception {
db.getMetadata().getSchema().getClass("User").truncate();
db.getMetadata().getSchema().getClass("Folder").truncate();
db.getMetadata().getSchema().getClass("CanAccess").truncate();
userId1 = new ODocument("User").field("username", "gongolo").save().getIdentity();
userId2 = new ODocument("User").field("username", "user2").save().getIdentity();
folderId1 = new ODocument("Folder").field("keyId", "01234567893").save().getIdentity();
folderId2 = new ODocument("Folder").field("keyId", "01234567894").save().getIdentity();
db.command(new OCommandSQL("create edge CanAccess from " + userId1 + " to " + folderId1)).execute();
}
@Test
public void testFromSelect() throws Exception {
final int res = (Integer) db.command(
new OCommandSQL("delete edge CanAccess from (select from User where username = 'gongolo') to " + folderId1)).execute();
Assert.assertEquals(res, 1);
Assert.assertTrue(db.query(new OSQLSynchQuery<Object>("select flatten(out(CanAccess)) from " + userId1)).isEmpty());
}
@Test
public void testFromSelectToSelect() throws Exception {
final int res = (Integer) db
.command(
new OCommandSQL(
"delete edge CanAccess from ( select from User where username = 'gongolo' ) to ( select from Folder where keyId = '01234567893' )"))
.execute();
Assert.assertEquals(res, 1);
Assert.assertTrue(db.query(new OSQLSynchQuery<Object>("select flatten(out(CanAccess)) from " + userId1)).isEmpty());
}
} | 0true
| graphdb_src_test_java_com_orientechnologies_orient_graph_sql_OCommandExecutorSQLDeleteEdgeTest.java |
2,857 | public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet articles;
@Inject
public ElisionTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.articles = Analysis.parseArticles(env, settings, version);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new ElisionFilter(tokenStream, articles);
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_ElisionTokenFilterFactory.java |
479 | register(serviceName, new ClientProxyFactory() {
@Override
public ClientProxy create(String id) {
String instanceName = client.getName();
return instantiateClientProxy(proxyType, instanceName, serviceName, id);
}
}); | 1no label
| hazelcast-client_src_main_java_com_hazelcast_client_spi_ProxyManager.java |
1,279 | public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
private void assertIndexHealth(ClusterIndexHealth indexHealth, ShardCounter counter, IndexMetaData indexMetaData) {
assertThat(indexHealth.getStatus(), equalTo(counter.status()));
assertThat(indexHealth.getNumberOfShards(), equalTo(indexMetaData.getNumberOfShards()));
assertThat(indexHealth.getNumberOfReplicas(), equalTo(indexMetaData.getNumberOfReplicas()));
assertThat(indexHealth.getActiveShards(), equalTo(counter.active));
assertThat(indexHealth.getRelocatingShards(), equalTo(counter.relocating));
assertThat(indexHealth.getInitializingShards(), equalTo(counter.initializing));
assertThat(indexHealth.getUnassignedShards(), equalTo(counter.unassigned));
assertThat(indexHealth.getShards().size(), equalTo(indexMetaData.getNumberOfShards()));
assertThat(indexHealth.getValidationFailures(), empty());
int totalShards = 0;
for (ClusterShardHealth shardHealth : indexHealth.getShards().values()) {
totalShards += shardHealth.getActiveShards() + shardHealth.getInitializingShards() + shardHealth.getUnassignedShards();
}
assertThat(totalShards, equalTo(indexMetaData.getNumberOfShards() * (1 + indexMetaData.getNumberOfReplicas())));
}
protected class ShardCounter {
public int active;
public int relocating;
public int initializing;
public int unassigned;
public int primaryActive;
public int primaryInactive;
public ClusterHealthStatus status() {
if (primaryInactive > 0) {
return ClusterHealthStatus.RED;
}
if (unassigned > 0 || initializing > 0) {
return ClusterHealthStatus.YELLOW;
}
return ClusterHealthStatus.GREEN;
}
public void update(ShardRouting shardRouting) {
if (shardRouting.active()) {
active++;
if (shardRouting.primary()) {
primaryActive++;
}
if (shardRouting.relocating()) {
relocating++;
}
return;
}
if (shardRouting.primary()) {
primaryInactive++;
}
if (shardRouting.initializing()) {
initializing++;
} else {
unassigned++;
}
}
}
static int node_id = 1;
private ImmutableShardRouting genShardRouting(String index, int shardId, boolean primary) {
ShardRoutingState state;
int i = randomInt(40);
if (i > 5) {
state = ShardRoutingState.STARTED;
} else if (i > 3) {
state = ShardRoutingState.RELOCATING;
} else if (i > 1) {
state = ShardRoutingState.INITIALIZING;
} else {
state = ShardRoutingState.UNASSIGNED;
}
switch (state) {
case UNASSIGNED:
return new MutableShardRouting(index, shardId, null, primary, ShardRoutingState.UNASSIGNED, 1);
case STARTED:
return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.STARTED, 1);
case INITIALIZING:
return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.INITIALIZING, 1);
case RELOCATING:
return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), "node_" + Integer.toString(node_id++), primary, ShardRoutingState.RELOCATING, 1);
default:
throw new ElasticsearchException("Unknown state: " + state.name());
}
}
private IndexShardRoutingTable genShardRoutingTable(String index, int shardId, int replicas, ShardCounter counter) {
IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId), true);
ImmutableShardRouting shardRouting = genShardRouting(index, shardId, true);
counter.update(shardRouting);
builder.addShard(shardRouting);
for (; replicas > 0; replicas--) {
shardRouting = genShardRouting(index, shardId, false);
counter.update(shardRouting);
builder.addShard(shardRouting);
}
return builder.build();
}
IndexRoutingTable genIndexRoutingTable(IndexMetaData indexMetaData, ShardCounter counter) {
IndexRoutingTable.Builder builder = IndexRoutingTable.builder(indexMetaData.index());
for (int shard = 0; shard < indexMetaData.numberOfShards(); shard++) {
builder.addIndexShard(genShardRoutingTable(indexMetaData.index(), shard, indexMetaData.getNumberOfReplicas(), counter));
}
return builder.build();
}
@Test
public void testClusterIndexHealth() {
int numberOfShards = randomInt(3) + 1;
int numberOfReplicas = randomInt(4);
IndexMetaData indexMetaData = IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
ShardCounter counter = new ShardCounter();
IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
logger.info("index status: {}, expected {}", indexHealth.getStatus(), counter.status());
assertIndexHealth(indexHealth, counter, indexMetaData);
}
private void assertClusterHealth(ClusterHealthResponse clusterHealth, ShardCounter counter) {
assertThat(clusterHealth.getStatus(), equalTo(counter.status()));
assertThat(clusterHealth.getActiveShards(), equalTo(counter.active));
assertThat(clusterHealth.getActivePrimaryShards(), equalTo(counter.primaryActive));
assertThat(clusterHealth.getInitializingShards(), equalTo(counter.initializing));
assertThat(clusterHealth.getRelocatingShards(), equalTo(counter.relocating));
assertThat(clusterHealth.getUnassignedShards(), equalTo(counter.unassigned));
assertThat(clusterHealth.getValidationFailures(), empty());
}
@Test
public void testClusterHealth() {
ShardCounter counter = new ShardCounter();
RoutingTable.Builder routingTable = RoutingTable.builder();
MetaData.Builder metaData = MetaData.builder();
for (int i = randomInt(4); i >= 0; i--) {
int numberOfShards = randomInt(3) + 1;
int numberOfReplicas = randomInt(4);
IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
metaData.put(indexMetaData, true);
routingTable.add(indexRoutingTable);
}
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(null), clusterState);
logger.info("cluster status: {}, expected {}", clusterHealth.getStatus(), counter.status());
assertClusterHealth(clusterHealth, counter);
}
@Test
public void testValidations() {
IndexMetaData indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(2).build();
ShardCounter counter = new ShardCounter();
IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(3).build();
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
assertThat(indexHealth.getValidationFailures(), Matchers.hasSize(2));
RoutingTable.Builder routingTable = RoutingTable.builder();
MetaData.Builder metaData = MetaData.builder();
metaData.put(indexMetaData, true);
routingTable.add(indexRoutingTable);
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(null), clusterState);
// currently we have no cluster level validation failures as index validation issues are reported per index.
assertThat(clusterHealth.getValidationFailures(), Matchers.hasSize(0));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_ClusterHealthResponsesTests.java |
332 | private static class DummyCommand extends XaCommand
{
private int type = -1;
DummyCommand( int type )
{
this.type = type;
}
@Override
public void execute()
{
}
// public void writeToFile( FileChannel fileChannel, ByteBuffer buffer )
// throws IOException
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// buffer.clear();
buffer.putInt( type );
// buffer.flip();
// fileChannel.write( buffer );
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java |
1,890 | class ScopeBindingProcessor extends AbstractProcessor {
ScopeBindingProcessor(Errors errors) {
super(errors);
}
@Override
public Boolean visit(ScopeBinding command) {
Scope scope = command.getScope();
Class<? extends Annotation> annotationType = command.getAnnotationType();
if (!Annotations.isScopeAnnotation(annotationType)) {
errors.withSource(annotationType).missingScopeAnnotation();
// Go ahead and bind anyway so we don't get collateral errors.
}
if (!Annotations.isRetainedAtRuntime(annotationType)) {
errors.withSource(annotationType)
.missingRuntimeRetention(command.getSource());
// Go ahead and bind anyway so we don't get collateral errors.
}
Scope existing = injector.state.getScope(checkNotNull(annotationType, "annotation type"));
if (existing != null) {
errors.duplicateScopes(existing, annotationType, scope);
} else {
injector.state.putAnnotation(annotationType, checkNotNull(scope, "scope"));
}
return true;
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_ScopeBindingProcessor.java |
498 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name="BLC_CATALOG")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
public class CatalogImpl implements Catalog {
private static final Log LOG = LogFactory.getLog(CatalogImpl.class);
@Id
@GeneratedValue(generator= "CatalogId")
@GenericGenerator(
name="CatalogId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="CatalogImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.common.site.domain.CatalogImpl")
}
)
@Column(name = "CATALOG_ID")
protected Long id;
@Column(name = "NAME")
@AdminPresentation(friendlyName = "Catalog_Name", order=1, prominent = true)
protected String name;
@ManyToMany(targetEntity = SiteImpl.class)
@JoinTable(name = "BLC_SITE_CATALOG", joinColumns = @JoinColumn(name = "CATALOG_ID"), inverseJoinColumns = @JoinColumn(name = "SITE_ID"))
@BatchSize(size = 50)
protected List<Site> sites = new ArrayList<Site>();
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public List<Site> getSites() {
return sites;
}
@Override
public void setSites(List<Site> sites) {
this.sites = sites;
}
public void checkCloneable(Catalog catalog) throws CloneNotSupportedException, SecurityException, NoSuchMethodException {
Method cloneMethod = catalog.getClass().getMethod("clone", new Class[]{});
if (cloneMethod.getDeclaringClass().getName().startsWith("org.broadleafcommerce") && !catalog.getClass().getName().startsWith("org.broadleafcommerce")) {
//subclass is not implementing the clone method
throw new CloneNotSupportedException("Custom extensions and implementations should implement clone.");
}
}
@Override
public Catalog clone() {
Catalog clone;
try {
clone = (Catalog) Class.forName(this.getClass().getName()).newInstance();
try {
checkCloneable(clone);
} catch (CloneNotSupportedException e) {
LOG.warn("Clone implementation missing in inheritance hierarchy outside of Broadleaf: " + clone.getClass().getName(), e);
}
clone.setId(id);
clone.setName(name);
} catch (Exception e) {
throw new RuntimeException(e);
}
return clone;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_site_domain_CatalogImpl.java |
2,429 | public abstract class AbstractRunnable implements Runnable {
/**
* Should the runnable force its execution in case it gets rejected?
*/
public boolean isForceExecution() {
return false;
}
} | 0true
| src_main_java_org_elasticsearch_common_util_concurrent_AbstractRunnable.java |
3,405 | public class FlushStats implements Streamable, ToXContent {
private long total;
private long totalTimeInMillis;
public FlushStats() {
}
public FlushStats(long total, long totalTimeInMillis) {
this.total = total;
this.totalTimeInMillis = totalTimeInMillis;
}
public void add(long total, long totalTimeInMillis) {
this.total += total;
this.totalTimeInMillis += totalTimeInMillis;
}
public void add(FlushStats flushStats) {
if (flushStats == null) {
return;
}
this.total += flushStats.total;
this.totalTimeInMillis += flushStats.totalTimeInMillis;
}
/**
* The total number of flush executed.
*/
public long getTotal() {
return this.total;
}
/**
* The total time merges have been executed (in milliseconds).
*/
public long getTotalTimeInMillis() {
return this.totalTimeInMillis;
}
/**
* The total time merges have been executed.
*/
public TimeValue getTotalTime() {
return new TimeValue(totalTimeInMillis);
}
public static FlushStats readFlushStats(StreamInput in) throws IOException {
FlushStats flushStats = new FlushStats();
flushStats.readFrom(in);
return flushStats;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.FLUSH);
builder.field(Fields.TOTAL, total);
builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, totalTimeInMillis);
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString FLUSH = new XContentBuilderString("flush");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TOTAL_TIME = new XContentBuilderString("total_time");
static final XContentBuilderString TOTAL_TIME_IN_MILLIS = new XContentBuilderString("total_time_in_millis");
}
@Override
public void readFrom(StreamInput in) throws IOException {
total = in.readVLong();
totalTimeInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(total);
out.writeVLong(totalTimeInMillis);
}
} | 0true
| src_main_java_org_elasticsearch_index_flush_FlushStats.java |
501 | public interface Theme extends Serializable {
public String getName();
public void setName(String name);
/**
* The display name for a site. Returns blank if no theme if no path is available. Should return
* a path that does not start with "/" and that ends with a "/". For example, "store/".
* @return
*/
public String getPath();
/**
* Sets the path of the theme.
* @param path
*/
public void setPath(String path);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_site_domain_Theme.java |
772 | public class TransportIndexAction extends TransportShardReplicationOperationAction<IndexRequest, IndexRequest, IndexResponse> {
private final AutoCreateIndex autoCreateIndex;
private final boolean allowIdGeneration;
private final TransportCreateIndexAction createIndexAction;
private final MappingUpdatedAction mappingUpdatedAction;
private final boolean waitForMappingChange;
@Inject
public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction) {
super(settings, transportService, clusterService, indicesService, threadPool, shardStateAction);
this.createIndexAction = createIndexAction;
this.mappingUpdatedAction = mappingUpdatedAction;
this.autoCreateIndex = new AutoCreateIndex(settings);
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
this.waitForMappingChange = settings.getAsBoolean("action.wait_on_mapping_change", false);
}
@Override
protected void doExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) {
request.beforeLocalFork(); // we fork on another thread...
createIndexAction.execute(new CreateIndexRequest(request.index()).cause("auto(index api)").masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse result) {
innerExecute(request, listener);
}
@Override
public void onFailure(Throwable e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// we have the index, do it
try {
innerExecute(request, listener);
} catch (Throwable e1) {
listener.onFailure(e1);
}
} else {
listener.onFailure(e);
}
}
});
} else {
innerExecute(request, listener);
}
}
@Override
protected boolean resolveRequest(ClusterState state, IndexRequest request, ActionListener<IndexResponse> indexResponseActionListener) {
MetaData metaData = clusterService.state().metaData();
String aliasOrIndex = request.index();
request.index(metaData.concreteIndex(request.index()));
MappingMetaData mappingMd = null;
if (metaData.hasIndex(request.index())) {
mappingMd = metaData.index(request.index()).mappingOrDefault(request.type());
}
request.process(metaData, aliasOrIndex, mappingMd, allowIdGeneration);
return true;
}
private void innerExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
super.doExecute(request, listener);
}
@Override
protected boolean checkWriteConsistency() {
return true;
}
@Override
protected IndexRequest newRequestInstance() {
return new IndexRequest();
}
@Override
protected IndexRequest newReplicaRequestInstance() {
return new IndexRequest();
}
@Override
protected IndexResponse newResponseInstance() {
return new IndexResponse();
}
@Override
protected String transportAction() {
return IndexAction.NAME;
}
@Override
protected String executor() {
return ThreadPool.Names.INDEX;
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, IndexRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, IndexRequest request) {
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index());
}
@Override
protected ShardIterator shards(ClusterState clusterState, IndexRequest request) {
return clusterService.operationRouting()
.indexShards(clusterService.state(), request.index(), request.type(), request.id(), request.routing());
}
@Override
protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
final IndexRequest request = shardRequest.request;
// validate, if routing is required, that we got routing
IndexMetaData indexMetaData = clusterState.metaData().index(request.index());
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
if (mappingMd != null && mappingMd.routing().required()) {
if (request.routing() == null) {
throw new RoutingMissingException(request.index(), request.type(), request.id());
}
}
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
long version;
boolean created;
Engine.IndexingOperation op;
if (request.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse)
.version(request.version())
.versionType(request.versionType())
.origin(Engine.Operation.Origin.PRIMARY);
if (index.parsedDoc().mappingsModified()) {
updateMappingOnMaster(request, indexMetaData);
}
indexShard.index(index);
version = index.version();
op = index;
created = index.created();
} else {
Engine.Create create = indexShard.prepareCreate(sourceToParse)
.version(request.version())
.versionType(request.versionType())
.origin(Engine.Operation.Origin.PRIMARY);
if (create.parsedDoc().mappingsModified()) {
updateMappingOnMaster(request, indexMetaData);
}
indexShard.create(create);
version = create.version();
op = create;
created = true;
}
if (request.refresh()) {
try {
indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
} catch (Throwable e) {
// ignore
}
}
// update the version on the request, so it will be used for the replicas
request.version(version);
IndexResponse response = new IndexResponse(request.index(), request.type(), request.id(), version, created);
return new PrimaryResponse<IndexResponse, IndexRequest>(shardRequest.request, response, op);
}
@Override
protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.request.index()).shardSafe(shardRequest.shardId);
IndexRequest request = shardRequest.request;
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
if (request.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse)
.version(request.version())
.origin(Engine.Operation.Origin.REPLICA);
indexShard.index(index);
} else {
Engine.Create create = indexShard.prepareCreate(sourceToParse)
.version(request.version())
.origin(Engine.Operation.Origin.REPLICA);
indexShard.create(create);
}
if (request.refresh()) {
try {
indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
} catch (Exception e) {
// ignore
}
}
}
private void updateMappingOnMaster(final IndexRequest request, IndexMetaData indexMetaData) {
final CountDownLatch latch = new CountDownLatch(1);
try {
final MapperService mapperService = indicesService.indexServiceSafe(request.index()).mapperService();
final DocumentMapper documentMapper = mapperService.documentMapper(request.type());
if (documentMapper == null) { // should not happen
return;
}
// we generate the order id before we get the mapping to send and refresh the source, so
// if 2 happen concurrently, we know that the later order will include the previous one
long orderId = mappingUpdatedAction.generateNextMappingUpdateOrder();
documentMapper.refreshSource();
DiscoveryNode node = clusterService.localNode();
final MappingUpdatedAction.MappingUpdatedRequest mappingRequest =
new MappingUpdatedAction.MappingUpdatedRequest(request.index(), indexMetaData.uuid(), request.type(), documentMapper.mappingSource(), orderId, node != null ? node.id() : null);
logger.trace("Sending mapping updated to master: {}", mappingRequest);
mappingUpdatedAction.execute(mappingRequest, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {
@Override
public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {
// all is well
latch.countDown();
}
@Override
public void onFailure(Throwable e) {
latch.countDown();
logger.warn("Failed to update master on updated mapping for {}", e, mappingRequest);
}
});
} catch (Exception e) {
latch.countDown();
logger.warn("Failed to update master on updated mapping for index [" + request.index() + "], type [" + request.type() + "]", e);
}
if (waitForMappingChange) {
try {
latch.await(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
}
}
} | 0true
| src_main_java_org_elasticsearch_action_index_TransportIndexAction.java |
283 | static class PropertyKeyTokenCommand extends Command
{
private final PropertyKeyTokenRecord record;
private final PropertyKeyTokenStore store;
PropertyKeyTokenCommand( PropertyKeyTokenStore store,
PropertyKeyTokenRecord record )
{
super( record.getId(), Mode.fromRecordState( record ) );
this.record = record;
this.store = store;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitPropertyKeyToken( record );
}
@Override
public String toString()
{
return record.toString();
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
// no-op
}
@Override
public void execute()
{
store.updateRecord( record );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// id+in_use(byte)+count(int)+key_blockId(int)+nr_key_records(int)
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
buffer.put( PROP_INDEX_COMMAND );
buffer.putInt( record.getId() );
buffer.put( inUse );
buffer.putInt( record.getPropertyCount() ).putInt( record.getNameId() );
if ( record.isLight() )
{
buffer.putInt( 0 );
}
else
{
writeDynamicRecords( buffer, record.getNameRecords() );
}
}
public static Command readFromFile( NeoStore neoStore, ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
// id+in_use(byte)+count(int)+key_blockId(int)
if ( !readAndFlip( byteChannel, buffer, 13 ) )
{
return null;
}
int id = buffer.getInt();
byte inUseFlag = buffer.get();
boolean inUse = false;
if ( (inUseFlag & Record.IN_USE.byteValue()) == Record.IN_USE
.byteValue() )
{
inUse = true;
}
else if ( inUseFlag != Record.NOT_IN_USE.byteValue() )
{
throw new IOException( "Illegal in use flag: " + inUseFlag );
}
PropertyKeyTokenRecord record = new PropertyKeyTokenRecord( id );
record.setInUse( inUse );
record.setPropertyCount( buffer.getInt() );
record.setNameId( buffer.getInt() );
if ( !readDynamicRecords( byteChannel, buffer, record, PROPERTY_INDEX_DYNAMIC_RECORD_ADDER ) )
{
return null;
}
return new PropertyKeyTokenCommand( neoStore == null ? null : neoStore.getPropertyStore()
.getPropertyKeyTokenStore(), record );
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java |
2,001 | public static class FailAwareMapStore implements MapStore {
final Map db = new ConcurrentHashMap();
final AtomicLong deletes = new AtomicLong();
final AtomicLong deleteAlls = new AtomicLong();
final AtomicLong stores = new AtomicLong();
final AtomicLong storeAlls = new AtomicLong();
final AtomicLong loads = new AtomicLong();
final AtomicLong loadAlls = new AtomicLong();
final AtomicLong loadAllKeys = new AtomicLong();
final AtomicBoolean storeFail = new AtomicBoolean(false);
final AtomicBoolean loadFail = new AtomicBoolean(false);
final List<BlockingQueue> listeners = new CopyOnWriteArrayList<BlockingQueue>();
public void addListener(BlockingQueue obj) {
listeners.add(obj);
}
public void notifyListeners() {
for (BlockingQueue listener : listeners) {
listener.offer(new Object());
}
}
public void delete(Object key) {
try {
if (storeFail.get()) {
throw new RuntimeException();
} else {
db.remove(key);
}
} finally {
deletes.incrementAndGet();
notifyListeners();
}
}
public void setFail(boolean shouldFail, boolean loadFail) {
this.storeFail.set(shouldFail);
this.loadFail.set(loadFail);
}
public int dbSize() {
return db.size();
}
public boolean dbContainsKey(Object key) {
return db.containsKey(key);
}
public Object dbGet(Object key) {
return db.get(key);
}
public void store(Object key, Object value) {
try {
if (storeFail.get()) {
throw new RuntimeException();
} else {
db.put(key, value);
}
} finally {
stores.incrementAndGet();
notifyListeners();
}
}
public Set loadAllKeys() {
try {
return db.keySet();
} finally {
loadAllKeys.incrementAndGet();
}
}
public Object load(Object key) {
try {
if (loadFail.get()) {
throw new RuntimeException();
} else {
return db.get(key);
}
} finally {
loads.incrementAndGet();
}
}
public void storeAll(Map map) {
try {
if (storeFail.get()) {
throw new RuntimeException();
} else {
db.putAll(map);
}
} finally {
storeAlls.incrementAndGet();
notifyListeners();
}
}
public Map loadAll(Collection keys) {
try {
if (loadFail.get()) {
throw new RuntimeException();
} else {
Map results = new HashMap();
for (Object key : keys) {
Object value = db.get(key);
if (value != null) {
results.put(key, value);
}
}
return results;
}
} finally {
loadAlls.incrementAndGet();
notifyListeners();
}
}
public void deleteAll(Collection keys) {
try {
if (storeFail.get()) {
throw new RuntimeException();
} else {
for (Object key : keys) {
db.remove(key);
}
}
} finally {
deleteAlls.incrementAndGet();
notifyListeners();
}
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java |
461 | executor.execute(new Runnable() {
@Override
public void run() {
int half = testValues.length / 2;
for (int i = 0; i < testValues.length; i++) {
final ReplicatedMap map = i < half ? map1 : map2;
final AbstractMap.SimpleEntry<Integer, Integer> entry = testValues[i];
map.put(entry.getKey(), entry.getValue());
}
}
}, 2, EntryEventType.ADDED, testValues.length, 0.75, map1, map2); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_replicatedmap_ClientReplicatedMapTest.java |
767 | public class SetProxyImpl<E> extends AbstractCollectionProxyImpl<SetService, E> implements ISet<E> {
public SetProxyImpl(String name, NodeEngine nodeEngine, SetService service) {
super(name, nodeEngine, service);
}
@Override
protected CollectionConfig getConfig(NodeEngine nodeEngine) {
return nodeEngine.getConfig().findSetConfig(name);
}
@Override
public String getServiceName() {
return SetService.SERVICE_NAME;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_set_SetProxyImpl.java |
326 | public static final ReadConfiguration EMPTY = new ReadConfiguration() {
@Override
public<O> O get(String key, Class<O> datatype) {
return null;
}
@Override
public Iterable<String> getKeys(String prefix) {
return ImmutableList.of();
}
@Override
public void close() {
//Nothing
}
}; | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_configuration_ReadConfiguration.java |
3,418 | public static class Snapshot {
private final SnapshotIndexCommit indexCommit;
private final Translog.Snapshot translogSnapshot;
private final long lastIndexVersion;
private final long lastTranslogId;
private final long lastTranslogLength;
private final int lastTotalTranslogOperations;
public Snapshot(SnapshotIndexCommit indexCommit, Translog.Snapshot translogSnapshot, long lastIndexVersion, long lastTranslogId, long lastTranslogLength, int lastTotalTranslogOperations) {
this.indexCommit = indexCommit;
this.translogSnapshot = translogSnapshot;
this.lastIndexVersion = lastIndexVersion;
this.lastTranslogId = lastTranslogId;
this.lastTranslogLength = lastTranslogLength;
this.lastTotalTranslogOperations = lastTotalTranslogOperations;
}
/**
* Indicates that the index has changed from the latest snapshot.
*/
public boolean indexChanged() {
return lastIndexVersion != indexCommit.getGeneration();
}
/**
* Indicates that a new transaction log has been created. Note check this <b>before</b> you
* check {@link #sameTranslogNewOperations()}.
*/
public boolean newTranslogCreated() {
return translogSnapshot.translogId() != lastTranslogId;
}
/**
* Indicates that the same translog exists, but new operations have been appended to it. Throws
* {@link org.elasticsearch.ElasticsearchIllegalStateException} if {@link #newTranslogCreated()} is <tt>true</tt>, so
* always check that first.
*/
public boolean sameTranslogNewOperations() {
if (newTranslogCreated()) {
throw new ElasticsearchIllegalStateException("Should not be called when there is a new translog");
}
return translogSnapshot.length() > lastTranslogLength;
}
public SnapshotIndexCommit indexCommit() {
return indexCommit;
}
public Translog.Snapshot translogSnapshot() {
return translogSnapshot;
}
public long lastIndexVersion() {
return lastIndexVersion;
}
public long lastTranslogId() {
return lastTranslogId;
}
public long lastTranslogLength() {
return lastTranslogLength;
}
public int lastTotalTranslogOperations() {
return this.lastTotalTranslogOperations;
}
} | 0true
| src_main_java_org_elasticsearch_index_gateway_IndexShardGateway.java |
5,425 | public static class WithScript extends Bytes {
private final BytesValues bytesValues;
public WithScript(FieldDataSource delegate, SearchScript script) {
this.bytesValues = new BytesValues(delegate, script);
}
@Override
public MetaData metaData() {
return MetaData.UNKNOWN;
}
@Override
public BytesValues bytesValues() {
return bytesValues;
}
static class BytesValues extends org.elasticsearch.index.fielddata.BytesValues {
private final FieldDataSource source;
private final SearchScript script;
private final BytesRef scratch;
public BytesValues(FieldDataSource source, SearchScript script) {
super(true);
this.source = source;
this.script = script;
scratch = new BytesRef();
}
@Override
public int setDocument(int docId) {
return source.bytesValues().setDocument(docId);
}
@Override
public BytesRef nextValue() {
BytesRef value = source.bytesValues().nextValue();
script.setNextVar("_value", value.utf8ToString());
scratch.copyChars(script.run().toString());
return scratch;
}
}
} | 1no label
| src_main_java_org_elasticsearch_search_aggregations_support_FieldDataSource.java |
1,615 | private class StateSendThread extends Thread {
private final TimedMemberStateFactory timedMemberStateFactory;
private final int updateIntervalMs;
private StateSendThread() {
super(instance.getThreadGroup(), instance.node.getThreadNamePrefix("MC.State.Sender"));
timedMemberStateFactory = new TimedMemberStateFactory(instance);
updateIntervalMs = calcUpdateInterval();
}
private int calcUpdateInterval() {
int updateInterval = managementCenterConfig.getUpdateInterval();
return updateInterval > 0 ? updateInterval * 1000 : 5000;
}
@Override
public void run() {
try {
while (isRunning()) {
sleepOnVersionMismatch();
sendState();
sleep();
}
} catch (Throwable throwable) {
inspectOutputMemoryError(throwable);
logger.warning("Hazelcast Management Center Service will be shutdown due to exception.", throwable);
shutdown();
}
}
private void sleep() throws InterruptedException {
Thread.sleep(updateIntervalMs);
}
private void sendState() throws InterruptedException, MalformedURLException {
URL url = newCollectorUrl();
try {
//todo: does the connection not need to be closed?
HttpURLConnection connection = openConnection(url);
OutputStream outputStream = connection.getOutputStream();
try {
identifier.write(outputStream);
ObjectDataOutputStream out = serializationService.createObjectDataOutputStream(outputStream);
TimedMemberState timedMemberState = timedMemberStateFactory.createTimedMemberState();
timedMemberState.writeData(out);
outputStream.flush();
post(connection);
} finally {
closeResource(outputStream);
}
} catch (ConnectException e) {
if (logger.isFinestEnabled()) {
logger.finest(e);
} else {
logger.info("Failed to connect to:" + url);
}
} catch (Exception e) {
logger.warning(e);
}
}
private HttpURLConnection openConnection(URL url) throws IOException {
if (logger.isFinestEnabled()) {
logger.finest("Opening collector connection:" + url);
}
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setDoOutput(true);
connection.setRequestMethod("POST");
connection.setConnectTimeout(5000);
connection.setReadTimeout(5000);
return connection;
}
private URL newCollectorUrl() throws MalformedURLException {
String url = cleanupUrl(managementCenterUrl) + "collector.do";
if (clusterId != null) {
url += "?clusterid=" + clusterId;
}
if (securityToken != null) {
if (clusterId == null) {
url += "?securitytoken=" + securityToken;
} else {
url += "&securitytoken=" + securityToken;
}
}
return new URL(url);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_management_ManagementCenterService.java |
100 | private static class FindPositionalArgumentsVisitor
extends Visitor
implements NaturalVisitor {
Tree.PositionalArgumentList argumentList;
int offset;
private Tree.PositionalArgumentList getArgumentList() {
return argumentList;
}
private FindPositionalArgumentsVisitor(int offset) {
this.offset = offset;
}
@Override
public void visit(Tree.ExtendedType that) {
//don't add proposals for extends clause
}
@Override
public void visit(Tree.PositionalArgumentList that) {
Integer start = that.getStartIndex();
Integer stop = that.getStopIndex();
if (start!=null && offset>=start &&
stop!=null && offset<=stop+1) {
argumentList = that;
}
super.visit(that);
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ConvertToNamedArgumentsProposal.java |
95 | public interface ODirectMemory {
/**
* Presentation of null pointer in given memory model.
*/
public long NULL_POINTER = 0;
/**
* Allocates amount of memory that is needed to write passed in byte array and writes it.
*
* @param bytes
* Data that is needed to be written.
* @return Pointer to the allocated piece of memory.
*/
long allocate(byte[] bytes);
/**
* Allocates given amount of memory (in bytes) from pool and returns pointer on allocated memory or {@link #NULL_POINTER} if there
* is no enough memory in pool.
*
* @param size
* Size that is needed to be allocated.
* @return Pointer to the allocated memory.
*/
long allocate(long size);
/**
* Returns allocated memory back to the pool.
*
* @param pointer
* Pointer to the allocated piece of memory.
*/
void free(long pointer);
/**
* Reads raw data from given piece of memory.
*
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
* @param length
* Size of data which should be returned.
* @return Raw data from given piece of memory.
*/
byte[] get(long pointer, int length);
void get(long pointer, byte[] array, int arrayOffset, int length);
/**
* Writes data to the given piece of memory.
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
* @param content
* @param arrayOffset
* @param length
*/
void set(long pointer, byte[] content, int arrayOffset, int length);
/**
* Return <code>int</code> value from given piece of memory.
*
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
* @return Int value.
*/
int getInt(long pointer);
/**
* Write <code>int</code> value to given piece of memory.
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
*
*/
void setInt(long pointer, int value);
void setShort(long pointer, short value);
short getShort(long pointer);
/**
* Return <code>long</code> value from given piece of memory.
*
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
* @return long value.
*/
long getLong(long pointer);
/**
* Write <code>long</code> value to given piece of memory.
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
*
*/
void setLong(long pointer, long value);
/**
* Return <code>byte</code> value from given piece of memory.
*
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
* @return byte value.
*/
byte getByte(long pointer);
/**
* Write <code>byte</code> value to given piece of memory.
*
* @param pointer
* Memory pointer, returned by {@link #allocate(long)} method.
*
*/
void setByte(long pointer, byte value);
void setChar(long pointer, char value);
char getChar(long pointer);
/**
* Performs copying of raw data in memory from one position to another.
*
* @param srcPointer
* Memory pointer, returned by {@link #allocate(long)} method, from which data will be copied.
* @param destPointer
* Memory pointer to which data will be copied.
* @param len
* Data length.
*/
void moveData(long srcPointer, long destPointer, long len);
} | 0true
| commons_src_main_java_com_orientechnologies_common_directmemory_ODirectMemory.java |
2,639 | threadPool.generic().execute(new Runnable() {
@Override
public void run() {
MulticastZenPing.this.stop();
MulticastZenPing.this.start();
}
}); | 0true
| src_main_java_org_elasticsearch_discovery_zen_ping_multicast_MulticastZenPing.java |
54 | @RequestMapping("/" + AdminStructuredContentController.SECTION_KEY)
public class AdminStructuredContentController extends AdminBasicEntityController {
protected static final String SECTION_KEY = "structured-content";
@Override
protected String getSectionKey(Map<String, String> pathVars) {
//allow external links to work for ToOne items
if (super.getSectionKey(pathVars) != null) {
return super.getSectionKey(pathVars);
}
return SECTION_KEY;
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
public String viewEntityForm(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id) throws Exception {
// Get the normal entity form for this item
String returnPath = super.viewEntityForm(request, response, model, pathVars, id);
EntityForm ef = (EntityForm) model.asMap().get("entityForm");
// Attach the dynamic fields to the form
DynamicEntityFormInfo info = new DynamicEntityFormInfo()
.withCeilingClassName(StructuredContentType.class.getName())
.withCriteriaName("constructForm")
.withPropertyName("structuredContentType")
.withPropertyValue(ef.findField("structuredContentType").getValue());
EntityForm dynamicForm = getDynamicFieldTemplateForm(info, id, null);
ef.putDynamicFormInfo("structuredContentType", info);
ef.putDynamicForm("structuredContentType", dynamicForm);
// We don't want to allow changing types once a structured content item exists
ef.findField("structuredContentType").setReadOnly(true);
return returnPath;
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.POST)
public String saveEntity(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id,
@ModelAttribute(value="entityForm") EntityForm entityForm, BindingResult result,
RedirectAttributes ra) throws Exception {
// Attach the dynamic form info so that the update service will know how to split up the fields
DynamicEntityFormInfo info = new DynamicEntityFormInfo()
.withCeilingClassName(StructuredContentType.class.getName())
.withCriteriaName("constructForm")
.withPropertyName("structuredContentType");
entityForm.putDynamicFormInfo("structuredContentType", info);
String returnPath = super.saveEntity(request, response, model, pathVars, id, entityForm, result, ra);
if (result.hasErrors()) {
info = entityForm.getDynamicFormInfo("structuredContentType");
info.setPropertyValue(entityForm.findField("structuredContentType").getValue());
//grab back the dynamic form that was actually put in
EntityForm inputDynamicForm = entityForm.getDynamicForm("structuredContentType");
EntityForm dynamicForm = getDynamicFieldTemplateForm(info, id, inputDynamicForm);
entityForm.putDynamicForm("structuredContentType", dynamicForm);
}
return returnPath;
}
@RequestMapping(value = "/{propertyName}/dynamicForm", method = RequestMethod.GET)
public String getDynamicForm(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable("propertyName") String propertyName,
@RequestParam("propertyTypeId") String propertyTypeId) throws Exception {
DynamicEntityFormInfo info = new DynamicEntityFormInfo()
.withCeilingClassName(StructuredContentType.class.getName())
.withCriteriaName("constructForm")
.withPropertyName(propertyName)
.withPropertyValue(propertyTypeId);
return super.getDynamicForm(request, response, model, pathVars, info);
}
} | 1no label
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_web_controller_AdminStructuredContentController.java |
290 | public class ORubyScriptFormatter implements OScriptFormatter {
public String getFunctionDefinition(final OFunction f) {
final StringBuilder fCode = new StringBuilder();
fCode.append("def ");
fCode.append(f.getName());
fCode.append('(');
int i = 0;
if (f.getParameters() != null)
for (String p : f.getParameters()) {
if (i++ > 0)
fCode.append(',');
fCode.append(p);
}
fCode.append(")\n");
final Scanner scanner = new Scanner(f.getCode());
try {
scanner.useDelimiter("\n").skip("\r");
while (scanner.hasNext()) {
fCode.append('\t');
fCode.append(scanner.next());
}
} finally {
scanner.close();
}
fCode.append("\nend\n");
return fCode.toString();
}
@Override
public String getFunctionInvoke(final OFunction iFunction, final Object[] iArgs) {
final StringBuilder code = new StringBuilder();
code.append(iFunction.getName());
code.append('(');
if (iArgs != null) {
int i = 0;
for (Object a : iArgs) {
if (i++ > 0)
code.append(',');
code.append(a);
}
}
code.append(");");
return code.toString();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_script_formatter_ORubyScriptFormatter.java |
1,681 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_ADMIN_PERMISSION")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "AdminPermissionImpl_baseAdminPermission")
public class AdminPermissionImpl implements AdminPermission {
private static final Log LOG = LogFactory.getLog(AdminPermissionImpl.class);
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "AdminPermissionId")
@GenericGenerator(
name="AdminPermissionId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="AdminPermissionImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.openadmin.server.security.domain.AdminPermissionImpl")
}
)
@Column(name = "ADMIN_PERMISSION_ID")
@AdminPresentation(friendlyName = "AdminPermissionImpl_Admin_Permission_ID", group = "AdminPermissionImpl_Primary_Key", visibility = VisibilityEnum.HIDDEN_ALL)
protected Long id;
@Column(name = "NAME", nullable=false)
@Index(name="ADMINPERM_NAME_INDEX", columnNames={"NAME"})
@AdminPresentation(friendlyName = "AdminPermissionImpl_Name", order=1, group = "AdminPermissionImpl_Permission", prominent=true)
protected String name;
@Column(name = "PERMISSION_TYPE", nullable=false)
@Index(name="ADMINPERM_TYPE_INDEX", columnNames={"PERMISSION_TYPE"})
@AdminPresentation(friendlyName = "AdminPermissionImpl_Permission_Type", order = 3, group = "AdminPermissionImpl_Permission", fieldType = SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration = "org.broadleafcommerce.openadmin.server.security.service.type.PermissionType", prominent = true)
protected String type;
@Column(name = "DESCRIPTION", nullable=false)
@AdminPresentation(friendlyName = "AdminPermissionImpl_Description", order=2, group = "AdminPermissionImpl_Permission", prominent=true)
protected String description;
@ManyToMany(fetch = FetchType.LAZY, targetEntity = AdminRoleImpl.class)
@JoinTable(name = "BLC_ADMIN_ROLE_PERMISSION_XREF", joinColumns = @JoinColumn(name = "ADMIN_PERMISSION_ID", referencedColumnName = "ADMIN_PERMISSION_ID"), inverseJoinColumns = @JoinColumn(name = "ADMIN_ROLE_ID", referencedColumnName = "ADMIN_ROLE_ID"))
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
protected Set<AdminRole> allRoles= new HashSet<AdminRole>();
@ManyToMany(fetch = FetchType.LAZY, targetEntity = AdminUserImpl.class)
@JoinTable(name = "BLC_ADMIN_USER_PERMISSION_XREF", joinColumns = @JoinColumn(name = "ADMIN_PERMISSION_ID", referencedColumnName = "ADMIN_PERMISSION_ID"), inverseJoinColumns = @JoinColumn(name = "ADMIN_USER_ID", referencedColumnName = "ADMIN_USER_ID"))
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
protected Set<AdminUser> allUsers= new HashSet<AdminUser>();
@OneToMany(mappedBy = "adminPermission", targetEntity = AdminPermissionQualifiedEntityImpl.class, cascade = {CascadeType.ALL})
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
protected List<AdminPermissionQualifiedEntity> qualifiedEntities = new ArrayList<AdminPermissionQualifiedEntity>();
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public String getDescription() {
return description;
}
@Override
public void setDescription(String description) {
this.description = description;
}
@Override
public Set<AdminRole> getAllRoles() {
return allRoles;
}
@Override
public void setAllRoles(Set<AdminRole> allRoles) {
this.allRoles = allRoles;
}
@Override
public PermissionType getType() {
return PermissionType.getInstance(type);
}
@Override
public void setType(PermissionType type) {
if (type != null) {
this.type = type.getType();
}
}
@Override
public List<AdminPermissionQualifiedEntity> getQualifiedEntities() {
return qualifiedEntities;
}
@Override
public void setQualifiedEntities(List<AdminPermissionQualifiedEntity> qualifiedEntities) {
this.qualifiedEntities = qualifiedEntities;
}
@Override
public Set<AdminUser> getAllUsers() {
return allUsers;
}
@Override
public void setAllUsers(Set<AdminUser> allUsers) {
this.allUsers = allUsers;
}
public void checkCloneable(AdminPermission adminPermission) throws CloneNotSupportedException, SecurityException, NoSuchMethodException {
Method cloneMethod = adminPermission.getClass().getMethod("clone", new Class[]{});
if (cloneMethod.getDeclaringClass().getName().startsWith("org.broadleafcommerce") && !adminPermission.getClass().getName().startsWith("org.broadleafcommerce")) {
//subclass is not implementing the clone method
throw new CloneNotSupportedException("Custom extensions and implementations should implement clone.");
}
}
@Override
public AdminPermission clone() {
AdminPermission clone;
try {
clone = (AdminPermission) Class.forName(this.getClass().getName()).newInstance();
try {
checkCloneable(clone);
} catch (CloneNotSupportedException e) {
LOG.warn("Clone implementation missing in inheritance hierarchy outside of Broadleaf: " + clone.getClass().getName(), e);
}
clone.setId(id);
clone.setName(name);
clone.setType(getType());
clone.setDescription(description);
//don't clone the allUsers collection, as it would cause a recursion
//don't clone the allRoles collection, as it would cause a recursion
if (qualifiedEntities != null) {
for (AdminPermissionQualifiedEntity qualifiedEntity : qualifiedEntities) {
AdminPermissionQualifiedEntity qualifiedEntityClone = qualifiedEntity.clone();
qualifiedEntityClone.setAdminPermission(clone);
clone.getQualifiedEntities().add(qualifiedEntityClone);
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return clone;
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_security_domain_AdminPermissionImpl.java |
10 | public interface DataArchive extends FeedDataArchive {
/**
* Return the level of service of this data archive.
*
* @return the level of service of this data archive
*/
public LOS getLOS();
} | 0true
| mctcore_src_main_java_gov_nasa_arc_mct_api_feed_DataArchive.java |
406 | runConflictingTx(new TxJob() {
@Override
public void run(IndexTransaction tx) {
tx.delete(defStore, defDoc, TEXT, ImmutableMap.of(), true);
}
}, new TxJob() { | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_diskstorage_indexing_IndexProviderTest.java |
755 | public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest, MultiGetResponse, MultiGetRequestBuilder> {
public MultiGetRequestBuilder(Client client) {
super((InternalClient) client, new MultiGetRequest());
}
public MultiGetRequestBuilder add(String index, @Nullable String type, String id) {
request.add(index, type, id);
return this;
}
public MultiGetRequestBuilder add(String index, @Nullable String type, Iterable<String> ids) {
for (String id : ids) {
request.add(index, type, id);
}
return this;
}
public MultiGetRequestBuilder add(String index, @Nullable String type, String... ids) {
for (String id : ids) {
request.add(index, type, id);
}
return this;
}
public MultiGetRequestBuilder add(MultiGetRequest.Item item) {
request.add(item);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public MultiGetRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Should a refresh be executed before this get operation causing the operation to
* return the latest value. Note, heavy get should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public MultiGetRequestBuilder setRefresh(boolean refresh) {
request.refresh(refresh);
return this;
}
public MultiGetRequestBuilder setRealtime(Boolean realtime) {
request.realtime(realtime);
return this;
}
@Override
protected void doExecute(ActionListener<MultiGetResponse> listener) {
((Client) client).multiGet(request, listener);
}
} | 1no label
| src_main_java_org_elasticsearch_action_get_MultiGetRequestBuilder.java |
4,510 | public class TransportNodesListShardStoreMetaData extends TransportNodesOperationAction<TransportNodesListShardStoreMetaData.Request, TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData, TransportNodesListShardStoreMetaData.NodeRequest, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> {
private final IndicesService indicesService;
private final NodeEnvironment nodeEnv;
@Inject
public TransportNodesListShardStoreMetaData(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, NodeEnvironment nodeEnv) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.indicesService = indicesService;
this.nodeEnv = nodeEnv;
}
public ActionFuture<NodesStoreFilesMetaData> list(ShardId shardId, boolean onlyUnallocated, String[] nodesIds, @Nullable TimeValue timeout) {
return execute(new Request(shardId, onlyUnallocated, nodesIds).timeout(timeout));
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return "/cluster/nodes/indices/shard/store";
}
@Override
protected Request newRequest() {
return new Request();
}
@Override
protected NodeRequest newNodeRequest() {
return new NodeRequest();
}
@Override
protected NodeRequest newNodeRequest(String nodeId, Request request) {
return new NodeRequest(nodeId, request);
}
@Override
protected NodeStoreFilesMetaData newNodeResponse() {
return new NodeStoreFilesMetaData();
}
@Override
protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) {
final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList();
final List<FailedNodeException> failures = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeStoreFilesMetaData) { // will also filter out null response for unallocated ones
nodeStoreFilesMetaDatas.add((NodeStoreFilesMetaData) resp);
} else if (resp instanceof FailedNodeException) {
failures.add((FailedNodeException) resp);
}
}
return new NodesStoreFilesMetaData(clusterName, nodeStoreFilesMetaDatas.toArray(new NodeStoreFilesMetaData[nodeStoreFilesMetaDatas.size()]),
failures.toArray(new FailedNodeException[failures.size()]));
}
@Override
protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) throws ElasticsearchException {
if (request.unallocated) {
IndexService indexService = indicesService.indexService(request.shardId.index().name());
if (indexService == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
if (!indexService.hasShard(request.shardId.id())) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
}
IndexMetaData metaData = clusterService.state().metaData().index(request.shardId.index().name());
if (metaData == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
try {
return new NodeStoreFilesMetaData(clusterService.localNode(), listStoreMetaData(request.shardId));
} catch (IOException e) {
throw new ElasticsearchException("Failed to list store metadata for shard [" + request.shardId + "]", e);
}
}
private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) {
InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId.id());
if (indexShard != null) {
return new StoreFilesMetaData(true, shardId, indexShard.store().list());
}
}
// try and see if we an list unallocated
IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name());
if (metaData == null) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
String storeType = metaData.settings().get("index.store.type", "fs");
if (!storeType.contains("fs")) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
File[] shardLocations = nodeEnv.shardLocations(shardId);
File[] shardIndexLocations = new File[shardLocations.length];
for (int i = 0; i < shardLocations.length; i++) {
shardIndexLocations[i] = new File(shardLocations[i], "index");
}
boolean exists = false;
for (File shardIndexLocation : shardIndexLocations) {
if (shardIndexLocation.exists()) {
exists = true;
break;
}
}
if (!exists) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
Map<String, String> checksums = Store.readChecksums(shardIndexLocations);
if (checksums == null) {
checksums = ImmutableMap.of();
}
Map<String, StoreFileMetaData> files = Maps.newHashMap();
for (File shardIndexLocation : shardIndexLocations) {
File[] listedFiles = shardIndexLocation.listFiles();
if (listedFiles == null) {
continue;
}
for (File file : listedFiles) {
// BACKWARD CKS SUPPORT
if (file.getName().endsWith(".cks")) {
continue;
}
if (Store.isChecksum(file.getName())) {
continue;
}
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), checksums.get(file.getName())));
}
}
return new StoreFilesMetaData(false, shardId, files);
}
@Override
protected boolean accumulateExceptions() {
return true;
}
public static class StoreFilesMetaData implements Iterable<StoreFileMetaData>, Streamable {
private boolean allocated;
private ShardId shardId;
private Map<String, StoreFileMetaData> files;
StoreFilesMetaData() {
}
public StoreFilesMetaData(boolean allocated, ShardId shardId, Map<String, StoreFileMetaData> files) {
this.allocated = allocated;
this.shardId = shardId;
this.files = files;
}
public boolean allocated() {
return allocated;
}
public ShardId shardId() {
return this.shardId;
}
public long totalSizeInBytes() {
long totalSizeInBytes = 0;
for (StoreFileMetaData file : this) {
totalSizeInBytes += file.length();
}
return totalSizeInBytes;
}
@Override
public Iterator<StoreFileMetaData> iterator() {
return files.values().iterator();
}
public boolean fileExists(String name) {
return files.containsKey(name);
}
public StoreFileMetaData file(String name) {
return files.get(name);
}
public static StoreFilesMetaData readStoreFilesMetaData(StreamInput in) throws IOException {
StoreFilesMetaData md = new StoreFilesMetaData();
md.readFrom(in);
return md;
}
@Override
public void readFrom(StreamInput in) throws IOException {
allocated = in.readBoolean();
shardId = ShardId.readShardId(in);
int size = in.readVInt();
files = Maps.newHashMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
files.put(md.name(), md);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(allocated);
shardId.writeTo(out);
out.writeVInt(files.size());
for (StoreFileMetaData md : files.values()) {
md.writeTo(out);
}
}
}
static class Request extends NodesOperationRequest<Request> {
private ShardId shardId;
private boolean unallocated;
public Request() {
}
public Request(ShardId shardId, boolean unallocated, Set<String> nodesIds) {
super(nodesIds.toArray(new String[nodesIds.size()]));
this.shardId = shardId;
this.unallocated = unallocated;
}
public Request(ShardId shardId, boolean unallocated, String... nodesIds) {
super(nodesIds);
this.shardId = shardId;
this.unallocated = unallocated;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
unallocated = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeBoolean(unallocated);
}
}
public static class NodesStoreFilesMetaData extends NodesOperationResponse<NodeStoreFilesMetaData> {
private FailedNodeException[] failures;
NodesStoreFilesMetaData() {
}
public NodesStoreFilesMetaData(ClusterName clusterName, NodeStoreFilesMetaData[] nodes, FailedNodeException[] failures) {
super(clusterName, nodes);
this.failures = failures;
}
public FailedNodeException[] failures() {
return failures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeStoreFilesMetaData[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeStoreFilesMetaData.readListShardStoreNodeOperationResponse(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeStoreFilesMetaData response : nodes) {
response.writeTo(out);
}
}
}
static class NodeRequest extends NodeOperationRequest {
private ShardId shardId;
private boolean unallocated;
NodeRequest() {
}
NodeRequest(String nodeId, TransportNodesListShardStoreMetaData.Request request) {
super(request, nodeId);
this.shardId = request.shardId;
this.unallocated = request.unallocated;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
unallocated = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeBoolean(unallocated);
}
}
public static class NodeStoreFilesMetaData extends NodeOperationResponse {
private StoreFilesMetaData storeFilesMetaData;
NodeStoreFilesMetaData() {
}
public NodeStoreFilesMetaData(DiscoveryNode node, StoreFilesMetaData storeFilesMetaData) {
super(node);
this.storeFilesMetaData = storeFilesMetaData;
}
public StoreFilesMetaData storeFilesMetaData() {
return storeFilesMetaData;
}
public static NodeStoreFilesMetaData readListShardStoreNodeOperationResponse(StreamInput in) throws IOException {
NodeStoreFilesMetaData resp = new NodeStoreFilesMetaData();
resp.readFrom(in);
return resp;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.readBoolean()) {
storeFilesMetaData = StoreFilesMetaData.readStoreFilesMetaData(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (storeFilesMetaData == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
storeFilesMetaData.writeTo(out);
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_indices_store_TransportNodesListShardStoreMetaData.java |
2,348 | public interface Text extends Comparable<Text>, Serializable {
/**
* Are bytes available without the need to be converted into bytes when calling {@link #bytes()}.
*/
boolean hasBytes();
/**
* The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()}
*/
BytesReference bytes();
/**
* Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}.
*/
boolean hasString();
/**
* Returns the string representation of the text, might be converted to a string on the fly.
*/
String string();
/**
* Returns the string representation of the text, might be converted to a string on the fly.
*/
String toString();
} | 0true
| src_main_java_org_elasticsearch_common_text_Text.java |
3,476 | public class DocumentFieldMappers implements Iterable<FieldMapper> {
private final DocumentMapper docMapper;
private final FieldMappersLookup fieldMappers;
private volatile FieldNameAnalyzer indexAnalyzer;
private volatile FieldNameAnalyzer searchAnalyzer;
private volatile FieldNameAnalyzer searchQuoteAnalyzer;
public DocumentFieldMappers(DocumentMapper docMapper) {
this.docMapper = docMapper;
this.fieldMappers = new FieldMappersLookup();
this.indexAnalyzer = new FieldNameAnalyzer(ImmutableOpenMap.<String, Analyzer>of(), docMapper.indexAnalyzer());
this.searchAnalyzer = new FieldNameAnalyzer(ImmutableOpenMap.<String, Analyzer>of(), docMapper.searchAnalyzer());
this.searchQuoteAnalyzer = new FieldNameAnalyzer(ImmutableOpenMap.<String, Analyzer>of(), docMapper.searchQuotedAnalyzer());
}
public void addNewMappers(Iterable<FieldMapper> newMappers) {
fieldMappers.addNewMappers(newMappers);
final ImmutableOpenMap.Builder<String, Analyzer> indexAnalyzers = ImmutableOpenMap.builder(this.indexAnalyzer.analyzers());
final ImmutableOpenMap.Builder<String, Analyzer> searchAnalyzers = ImmutableOpenMap.builder(this.searchAnalyzer.analyzers());
final ImmutableOpenMap.Builder<String, Analyzer> searchQuoteAnalyzers = ImmutableOpenMap.builder(this.searchQuoteAnalyzer.analyzers());
for (FieldMapper fieldMapper : newMappers) {
if (fieldMapper.indexAnalyzer() != null) {
indexAnalyzers.put(fieldMapper.names().indexName(), fieldMapper.indexAnalyzer());
}
if (fieldMapper.searchAnalyzer() != null) {
searchAnalyzers.put(fieldMapper.names().indexName(), fieldMapper.searchAnalyzer());
}
if (fieldMapper.searchQuoteAnalyzer() != null) {
searchQuoteAnalyzers.put(fieldMapper.names().indexName(), fieldMapper.searchQuoteAnalyzer());
}
}
this.indexAnalyzer = new FieldNameAnalyzer(indexAnalyzers.build(), docMapper.indexAnalyzer());
this.searchAnalyzer = new FieldNameAnalyzer(searchAnalyzers.build(), docMapper.searchAnalyzer());
this.searchQuoteAnalyzer = new FieldNameAnalyzer(searchQuoteAnalyzers.build(), docMapper.searchQuotedAnalyzer());
}
@Override
public UnmodifiableIterator<FieldMapper> iterator() {
return fieldMappers.iterator();
}
public ImmutableList<FieldMapper> mappers() {
return this.fieldMappers.mappers();
}
public boolean hasMapper(FieldMapper fieldMapper) {
return fieldMappers.mappers().contains(fieldMapper);
}
public FieldMappers name(String name) {
return fieldMappers.name(name);
}
public FieldMappers indexName(String indexName) {
return fieldMappers.indexName(indexName);
}
public FieldMappers fullName(String fullName) {
return fieldMappers.fullName(fullName);
}
public Set<String> simpleMatchToIndexNames(String pattern) {
return fieldMappers.simpleMatchToIndexNames(pattern);
}
public Set<String> simpleMatchToFullName(String pattern) {
return fieldMappers.simpleMatchToFullName(pattern);
}
/**
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}, and last
* by {@link #name(String)}.
*/
public FieldMappers smartName(String name) {
return fieldMappers.smartName(name);
}
public FieldMapper smartNameFieldMapper(String name) {
return fieldMappers.smartNameFieldMapper(name);
}
/**
* A smart analyzer used for indexing that takes into account specific analyzers configured
* per {@link FieldMapper}.
*/
public Analyzer indexAnalyzer() {
return this.indexAnalyzer;
}
/**
* A smart analyzer used for indexing that takes into account specific analyzers configured
* per {@link FieldMapper} with a custom default analyzer for no explicit field analyzer.
*/
public Analyzer indexAnalyzer(Analyzer defaultAnalyzer) {
return new FieldNameAnalyzer(indexAnalyzer.analyzers(), defaultAnalyzer);
}
/**
* A smart analyzer used for searching that takes into account specific analyzers configured
* per {@link FieldMapper}.
*/
public Analyzer searchAnalyzer() {
return this.searchAnalyzer;
}
public Analyzer searchQuoteAnalyzer() {
return this.searchQuoteAnalyzer;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_DocumentFieldMappers.java |
165 | public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class and its nested classes provide the main
* functionality and control for a set of worker threads:
* Submissions from non-FJ threads enter into submission queues.
* Workers take these tasks and typically split them into subtasks
* that may be stolen by other workers. Preference rules give
* first priority to processing tasks from their own queues (LIFO
* or FIFO, depending on mode), then to randomized FIFO steals of
* tasks in other queues.
*
* WorkQueues
* ==========
*
* Most operations occur within work-stealing queues (in nested
* class WorkQueue). These are special forms of Deques that
* support only three of the four possible end-operations -- push,
* pop, and poll (aka steal), under the further constraints that
* push and pop are called only from the owning thread (or, as
* extended here, under a lock), while poll may be called from
* other threads. (If you are unfamiliar with them, you probably
* want to read Herlihy and Shavit's book "The Art of
* Multiprocessor programming", chapter 16 describing these in
* more detail before proceeding.) The main work-stealing queue
* design is roughly similar to those in the papers "Dynamic
* Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
* (http://research.sun.com/scalable/pubs/index.html) and
* "Idempotent work stealing" by Michael, Saraswat, and Vechev,
* PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
* The main differences ultimately stem from GC requirements that
* we null out taken slots as soon as we can, to maintain as small
* a footprint as possible even in programs generating huge
* numbers of tasks. To accomplish this, we shift the CAS
* arbitrating pop vs poll (steal) from being on the indices
* ("base" and "top") to the slots themselves. So, both a
* successful pop and poll mainly entail a CAS of a slot from
* non-null to null. Because we rely on CASes of references, we
* do not need tag bits on base or top. They are simple ints as
* used in any circular array-based queue (see for example
* ArrayDeque). Updates to the indices must still be ordered in a
* way that guarantees that top == base means the queue is empty,
* but otherwise may err on the side of possibly making the queue
* appear nonempty when a push, pop, or poll have not fully
* committed. Note that this means that the poll operation,
* considered individually, is not wait-free. One thief cannot
* successfully continue until another in-progress one (or, if
* previously empty, a push) completes. However, in the
* aggregate, we ensure at least probabilistic non-blockingness.
* If an attempted steal fails, a thief always chooses a different
* random victim target to try next. So, in order for one thief to
* progress, it suffices for any in-progress poll or new push on
* any empty queue to complete. (This is why we normally use
* method pollAt and its variants that try once at the apparent
* base index, else consider alternative actions, rather than
* method poll.)
*
* This approach also enables support of a user mode in which local
* task processing is in FIFO, not LIFO order, simply by using
* poll rather than pop. This can be useful in message-passing
* frameworks in which tasks are never joined. However neither
* mode considers affinities, loads, cache localities, etc, so
* rarely provide the best possible performance on a given
* machine, but portably provide good throughput by averaging over
* these factors. (Further, even if we did try to use such
* information, we do not usually have a basis for exploiting it.
* For example, some sets of tasks profit from cache affinities,
* but others are harmed by cache pollution effects.)
*
* WorkQueues are also used in a similar way for tasks submitted
* to the pool. We cannot mix these tasks in the same queues used
* for work-stealing (this would contaminate lifo/fifo
* processing). Instead, we randomly associate submission queues
* with submitting threads, using a form of hashing. The
* ThreadLocal Submitter class contains a value initially used as
* a hash code for choosing existing queues, but may be randomly
* repositioned upon contention with other submitters. In
* essence, submitters act like workers except that they are
* restricted to executing local tasks that they submitted (or in
* the case of CountedCompleters, others with the same root task).
* However, because most shared/external queue operations are more
* expensive than internal, and because, at steady state, external
* submitters will compete for CPU with workers, ForkJoinTask.join
* and related methods disable them from repeatedly helping to
* process tasks if all workers are active. Insertion of tasks in
* shared mode requires a lock (mainly to protect in the case of
* resizing) but we use only a simple spinlock (using bits in
* field qlock), because submitters encountering a busy queue move
* on to try or create other queues -- they block only when
* creating and registering new queues.
*
* Management
* ==========
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other. We cannot negate this in the
* implementation of other management responsibilities. The main
* tactic for avoiding bottlenecks is packing nearly all
* essentially atomic control state into two volatile variables
* that are by far most often read (not written) as status and
* consistency checks.
*
* Field "ctl" contains 64 bits holding all the information needed
* to atomically decide to add, inactivate, enqueue (on an event
* queue), dequeue, and/or re-activate workers. To enable this
* packing, we restrict maximum parallelism to (1<<15)-1 (which is
* far in excess of normal operating range) to allow ids, counts,
* and their negations (used for thresholding) to fit into 16bit
* fields.
*
* Field "plock" is a form of sequence lock with a saturating
* shutdown bit (similarly for per-queue "qlocks"), mainly
* protecting updates to the workQueues array, as well as to
* enable shutdown. When used as a lock, it is normally only very
* briefly held, so is nearly always available after at most a
* brief spin, but we use a monitor-based backup strategy to
* block when needed.
*
* Recording WorkQueues. WorkQueues are recorded in the
* "workQueues" array that is created upon first use and expanded
* if necessary. Updates to the array while recording new workers
* and unrecording terminated ones are protected from each other
* by a lock but the array is otherwise concurrently readable, and
* accessed directly. To simplify index-based operations, the
* array size is always a power of two, and all readers must
* tolerate null slots. Worker queues are at odd indices. Shared
* (submission) queues are at even indices, up to a maximum of 64
* slots, to limit growth even if array needs to expand to add
* more workers. Grouping them together in this way simplifies and
* speeds up task scanning.
*
* All worker thread creation is on-demand, triggered by task
* submissions, replacement of terminated workers, and/or
* compensation for blocked workers. However, all other support
* code is set up to work with other policies. To ensure that we
* do not hold on to worker references that would prevent GC, ALL
* accesses to workQueues are via indices into the workQueues
* array (which is one source of some of the messy code
* constructions here). In essence, the workQueues array serves as
* a weak reference mechanism. Thus for example the wait queue
* field of ctl stores indices, not references. Access to the
* workQueues in associated methods (for example signalWork) must
* both index-check and null-check the IDs. All such accesses
* ignore bad IDs by returning out early from what they are doing,
* since this can only be associated with termination, in which
* case it is OK to give up. All uses of the workQueues array
* also check that it is non-null (even if previously
* non-null). This allows nulling during termination, which is
* currently not necessary, but remains an option for
* resource-revocation-based shutdown schemes. It also helps
* reduce JIT issuance of uncommon-trap code, which tends to
* unnecessarily complicate control flow in some methods.
*
* Event Queuing. Unlike HPC work-stealing frameworks, we cannot
* let workers spin indefinitely scanning for tasks when none can
* be found immediately, and we cannot start/resume workers unless
* there appear to be tasks available. On the other hand, we must
* quickly prod them into action when new tasks are submitted or
* generated. In many usages, ramp-up time to activate workers is
* the main limiting factor in overall performance (this is
* compounded at program start-up by JIT compilation and
* allocation). So we try to streamline this as much as possible.
* We park/unpark workers after placing in an event wait queue
* when they cannot find work. This "queue" is actually a simple
* Treiber stack, headed by the "id" field of ctl, plus a 15bit
* counter value (that reflects the number of times a worker has
* been inactivated) to avoid ABA effects (we need only as many
* version numbers as worker threads). Successors are held in
* field WorkQueue.nextWait. Queuing deals with several intrinsic
* races, mainly that a task-producing thread can miss seeing (and
* signalling) another thread that gave up looking for work but
* has not yet entered the wait queue. We solve this by requiring
* a full sweep of all workers (via repeated calls to method
* scan()) both before and after a newly waiting worker is added
* to the wait queue. During a rescan, the worker might release
* some other queued worker rather than itself, which has the same
* net effect. Because enqueued workers may actually be rescanning
* rather than waiting, we set and clear the "parker" field of
* WorkQueues to reduce unnecessary calls to unpark. (This
* requires a secondary recheck to avoid missed signals.) Note
* the unusual conventions about Thread.interrupts surrounding
* parking and other blocking: Because interrupts are used solely
* to alert threads to check termination, which is checked anyway
* upon blocking, we clear status (using Thread.interrupted)
* before any call to park, so that park does not immediately
* return due to status being set via some other unrelated call to
* interrupt in user code.
*
* Signalling. We create or wake up workers only when there
* appears to be at least one task they might be able to find and
* execute. However, many other threads may notice the same task
* and each signal to wake up a thread that might take it. So in
* general, pools will be over-signalled. When a submission is
* added or another worker adds a task to a queue that has fewer
* than two tasks, they signal waiting workers (or trigger
* creation of new ones if fewer than the given parallelism level
* -- signalWork), and may leave a hint to the unparked worker to
* help signal others upon wakeup). These primary signals are
* buttressed by others (see method helpSignal) whenever other
* threads scan for work or do not have a task to process. On
* most platforms, signalling (unpark) overhead time is noticeably
* long, and the time between signalling a thread and it actually
* making progress can be very noticeably long, so it is worth
* offloading these delays from critical paths as much as
* possible.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate if the pool has remained quiescent for a
* given period -- a short period if there are more threads than
* parallelism, longer as the number of threads decreases. This
* will slowly propagate, eventually terminating all workers after
* periods of non-use.
*
* Shutdown and Termination. A call to shutdownNow atomically sets
* a plock bit and then (non-atomically) sets each worker's
* qlock status, cancels all unprocessed tasks, and wakes up
* all waiting workers. Detecting whether termination should
* commence after a non-abrupt shutdown() call requires more work
* and bookkeeping. We need consensus about quiescence (i.e., that
* there is no more work). The active count provides a primary
* indication but non-abrupt shutdown still requires a rechecking
* scan for any workers that are inactive but not queued.
*
* Joining Tasks
* =============
*
* Any of several actions may be taken when one worker is waiting
* to join a task stolen (or always held) by another. Because we
* are multiplexing many tasks on to a pool of workers, we can't
* just let them block (as in Thread.join). We also cannot just
* reassign the joiner's run-time stack with another and replace
* it later, which would be a form of "continuation", that even if
* possible is not necessarily a good idea since we sometimes need
* both an unblocked task and its continuation to progress.
* Instead we combine two tactics:
*
* Helping: Arranging for the joiner to execute some task that it
* would be running if the steal had not occurred.
*
* Compensating: Unless there are already enough live threads,
* method tryCompensate() may create or re-activate a spare
* thread to compensate for blocked joiners until they unblock.
*
* A third form (implemented in tryRemoveAndExec) amounts to
* helping a hypothetical compensator: If we can readily tell that
* a possible action of a compensator is to steal and execute the
* task being joined, the joining thread can do so directly,
* without the need for a compensation thread (although at the
* expense of larger run-time stacks, but the tradeoff is
* typically worthwhile).
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker.
*
* The algorithm in tryHelpStealer entails a form of "linear"
* helping: Each worker records (in field currentSteal) the most
* recent task it stole from some other worker. Plus, it records
* (in field currentJoin) the task it is currently actively
* joining. Method tryHelpStealer uses these markers to try to
* find a worker to help (i.e., steal back a task from and execute
* it) that could hasten completion of the actively joined task.
* In essence, the joiner executes a task that would be on its own
* local deque had the to-be-joined task not been stolen. This may
* be seen as a conservative variant of the approach in Wagner &
* Calder "Leapfrogging: a portable technique for implementing
* efficient futures" SIGPLAN Notices, 1993
* (http://portal.acm.org/citation.cfm?id=155354). It differs in
* that: (1) We only maintain dependency links across workers upon
* steals, rather than use per-task bookkeeping. This sometimes
* requires a linear scan of workQueues array to locate stealers,
* but often doesn't because stealers leave hints (that may become
* stale/wrong) of where to locate them. It is only a hint
* because a worker might have had multiple steals and the hint
* records only one of them (usually the most current). Hinting
* isolates cost to when it is needed, rather than adding to
* per-task overhead. (2) It is "shallow", ignoring nesting and
* potentially cyclic mutual steals. (3) It is intentionally
* racy: field currentJoin is updated only while actively joining,
* which means that we miss links in the chain during long-lived
* tasks, GC stalls etc (which is OK since blocking in such cases
* is usually a good idea). (4) We bound the number of attempts
* to find work (see MAX_HELP) and fall back to suspending the
* worker and if necessary replacing it with another.
*
* Helping actions for CountedCompleters are much simpler: Method
* helpComplete can take and execute any task with the same root
* as the task being waited on. However, this still entails some
* traversal of completer chains, so is less efficient than using
* CountedCompleters without explicit joins.
*
* It is impossible to keep exactly the target parallelism number
* of threads running at any given time. Determining the
* existence of conservatively safe helping targets, the
* availability of already-created spares, and the apparent need
* to create new spares are all racy, so we rely on multiple
* retries of each. Compensation in the apparent absence of
* helping opportunities is challenging to control on JVMs, where
* GC and other activities can stall progress of tasks that in
* turn stall out many other dependent tasks, without us being
* able to determine whether they will ever require compensation.
* Even though work-stealing otherwise encounters little
* degradation in the presence of more threads than cores,
* aggressively adding new threads in such cases entails risk of
* unwanted positive feedback control loops in which more threads
* cause more dependent stalls (as well as delayed progress of
* unblocked threads to the point that we know they are available)
* leading to more situations requiring more threads, and so
* on. This aspect of control can be seen as an (analytically
* intractable) game with an opponent that may choose the worst
* (for us) active thread to stall at any time. We take several
* precautions to bound losses (and thus bound gains), mainly in
* methods tryCompensate and awaitJoin.
*
* Common Pool
* ===========
*
* The static common Pool always exists after static
* initialization. Since it (or any other created pool) need
* never be used, we minimize initial construction overhead and
* footprint to the setup of about a dozen fields, with no nested
* allocation. Most bootstrapping occurs within method
* fullExternalPush during the first submission to the pool.
*
* When external threads submit to the common pool, they can
* perform some subtask processing (see externalHelpJoin and
* related methods). We do not need to record whether these
* submissions are to the common pool -- if not, externalHelpJoin
* returns quickly (at the most helping to signal some common pool
* workers). These submitters would otherwise be blocked waiting
* for completion, so the extra effort (with liberally sprinkled
* task status checks) in inapplicable cases amounts to an odd
* form of limited spin-wait before blocking in ForkJoinTask.join.
*
* Style notes
* ===========
*
* There is a lot of representation-level coupling among classes
* ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
* fields of WorkQueue maintain data structures managed by
* ForkJoinPool, so are directly accessed. There is little point
* trying to reduce this, since any associated future changes in
* representations will need to be accompanied by algorithmic
* changes anyway. Several methods intrinsically sprawl because
* they must accumulate sets of consistent reads of volatiles held
* in local variables. Methods signalWork() and scan() are the
* main bottlenecks, so are especially heavily
* micro-optimized/mangled. There are lots of inline assignments
* (of form "while ((local = field) != 0)") which are usually the
* simplest way to ensure the required read orderings (which are
* sometimes critical). This leads to a "C"-like style of listing
* declarations of these locals at the heads of methods or blocks.
* There are several occurrences of the unusual "do {} while
* (!cas...)" which is the simplest way to force an update of a
* CAS'ed variable. There are also other coding oddities (including
* several unnecessary-looking hoisted null checks) that help
* some methods perform reasonably even when interpreted (not
* compiled).
*
* The order of declarations in this file is:
* (1) Static utility functions
* (2) Nested (static) classes
* (3) Static fields
* (4) Fields, along with constants used when unpacking some of them
* (5) Internal control methods
* (6) Callbacks and other support for ForkJoinTask methods
* (7) Exported methods
* (8) Static block initializing statics in minimally dependent order
*/
// Static utilities
/**
* If there is a security manager, makes sure caller has
* permission to modify threads.
*/
private static void checkPermission() {
SecurityManager security = System.getSecurityManager();
if (security != null)
security.checkPermission(modifyThreadPermission);
}
// Nested classes
/**
* Factory for creating new {@link ForkJoinWorkerThread}s.
* A {@code ForkJoinWorkerThreadFactory} must be defined and used
* for {@code ForkJoinWorkerThread} subclasses that extend base
* functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
* @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
* Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
static final class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
/**
* Per-thread records for threads that submit to pools. Currently
* holds only pseudo-random seed / index that is used to choose
* submission queues in method externalPush. In the future, this may
* also incorporate a means to implement different task rejection
* and resubmission policies.
*
* Seeds for submitters and workers/workQueues work in basically
* the same way but are initialized and updated using slightly
* different mechanics. Both are initialized using the same
* approach as in class ThreadLocal, where successive values are
* unlikely to collide with previous values. Seeds are then
* randomly modified upon collisions using xorshifts, which
* requires a non-zero seed.
*/
static final class Submitter {
int seed;
Submitter(int s) { seed = s; }
}
/**
* Class for artificial tasks that are used to replace the target
* of local joins if they are removed from an interior queue slot
* in WorkQueue.tryRemoveAndExec. We don't need the proxy to
* actually do anything beyond having a unique identity.
*/
static final class EmptyTask extends ForkJoinTask<Void> {
private static final long serialVersionUID = -7721805057305804111L;
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
public final Void getRawResult() { return null; }
public final void setRawResult(Void x) {}
public final boolean exec() { return true; }
}
/**
* Queues supporting work-stealing as well as external task
* submission. See above for main rationale and algorithms.
* Implementation relies heavily on "Unsafe" intrinsics
* and selective use of "volatile":
*
* Field "base" is the index (mod array.length) of the least valid
* queue slot, which is always the next position to steal (poll)
* from if nonempty. Reads and writes require volatile orderings
* but not CAS, because updates are only performed after slot
* CASes.
*
* Field "top" is the index (mod array.length) of the next queue
* slot to push to or pop from. It is written only by owner thread
* for push, or under lock for external/shared push, and accessed
* by other threads only after reading (volatile) base. Both top
* and base are allowed to wrap around on overflow, but (top -
* base) (or more commonly -(base - top) to force volatile read of
* base before top) still estimates size. The lock ("qlock") is
* forced to -1 on termination, causing all further lock attempts
* to fail. (Note: we don't need CAS for termination state because
* upon pool shutdown, all shared-queues will stop being used
* anyway.) Nearly all lock bodies are set up so that exceptions
* within lock bodies are "impossible" (modulo JVM errors that
* would cause failure anyway.)
*
* The array slots are read and written using the emulation of
* volatiles/atomics provided by Unsafe. Insertions must in
* general use putOrderedObject as a form of releasing store to
* ensure that all writes to the task object are ordered before
* its publication in the queue. All removals entail a CAS to
* null. The array is always a power of two. To ensure safety of
* Unsafe array operations, all accesses perform explicit null
* checks and implicit bounds checks via power-of-two masking.
*
* In addition to basic queuing support, this class contains
* fields described elsewhere to control execution. It turns out
* to work better memory-layout-wise to include them in this class
* rather than a separate class.
*
* Performance on most platforms is very sensitive to placement of
* instances of both WorkQueues and their arrays -- we absolutely
* do not want multiple WorkQueue instances or multiple queue
* arrays sharing cache lines. (It would be best for queue objects
* and their arrays to share, but there is nothing available to
* help arrange that). Unfortunately, because they are recorded
* in a common array, WorkQueue instances are often moved to be
* adjacent by garbage collectors. To reduce impact, we use field
* padding that works OK on common platforms; this effectively
* trades off slightly slower average field access for the sake of
* avoiding really bad worst-case access. (Until better JVM
* support is in place, this padding is dependent on transient
* properties of JVM field layout rules.) We also take care in
* allocating, sizing and resizing the array. Non-shared queue
* arrays are initialized by workers before use. Others are
* allocated on first use.
*/
static final class WorkQueue {
/**
* Capacity of work-stealing queue array upon initialization.
* Must be a power of two; at least 4, but should be larger to
* reduce or eliminate cacheline sharing among queues.
* Currently, it is much larger, as a partial workaround for
* the fact that JVMs often place arrays in locations that
* share GC bookkeeping (especially cardmarks) such that
* per-write accesses encounter serious memory contention.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum size for queue arrays. Must be a power of two less
* than or equal to 1 << (31 - width of array entry) to ensure
* lack of wraparound of index calculations, but defined to a
* value a bit less than this to help users trap runaway
* programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
int seed; // for random scanning; initialize nonzero
volatile int eventCount; // encoded inactivation count; < 0 if inactive
int nextWait; // encoded record of next event waiter
int hint; // steal or signal hint (index)
int poolIndex; // index of this queue in pool (or 0)
final int mode; // 0: lifo, > 0: fifo, < 0: shared
int nsteals; // number of steals
volatile int qlock; // 1: locked, -1: terminate; else 0
volatile int base; // index of next slot for poll
int top; // index of next slot for push
ForkJoinTask<?>[] array; // the elements (initially unallocated)
final ForkJoinPool pool; // the containing pool (may be null)
final ForkJoinWorkerThread owner; // owning thread or null if shared
volatile Thread parker; // == owner during call to park; else null
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
ForkJoinTask<?> currentSteal; // current non-local task being executed
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
int seed) {
this.pool = pool;
this.owner = owner;
this.mode = mode;
this.seed = seed;
// Place indices in the center of array (that is not yet allocated)
base = top = INITIAL_QUEUE_CAPACITY >>> 1;
}
/**
* Returns the approximate number of tasks in the queue.
*/
final int queueSize() {
int n = base - top; // non-owner callers must read base first
return (n >= 0) ? 0 : -n; // ignore transient negative
}
/**
* Provides a more accurate estimate of whether this queue has
* any tasks than does queueSize, by checking whether a
* near-empty queue has at least one unclaimed task.
*/
final boolean isEmpty() {
ForkJoinTask<?>[] a; int m, s;
int n = base - (s = top);
return (n >= 0 ||
(n == -1 &&
((a = array) == null ||
(m = a.length - 1) < 0 ||
U.getObject
(a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
}
/**
* Pushes a task. Call only by owner in unshared queues. (The
* shared-queue version is embedded in method externalPush.)
*
* @param task the task. Caller must ensure non-null.
* @throws RejectedExecutionException if array cannot be resized
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; ForkJoinPool p;
int s = top, m, n;
if ((a = array) != null) { // ignore if queue removed
int j = (((m = a.length - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
if ((n = (top = s + 1) - base) <= 2) {
if ((p = pool) != null)
p.signalWork(this);
}
else if (n >= m)
growArray();
}
}
/**
* Initializes or doubles the capacity of array. Call either
* by owner or with lock held -- it is OK for base, but not
* top, to move while resizings are in progress.
*/
final ForkJoinTask<?>[] growArray() {
ForkJoinTask<?>[] oldA = array;
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size > MAXIMUM_QUEUE_CAPACITY)
throw new RejectedExecutionException("Queue capacity exceeded");
int oldMask, t, b;
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
(t = top) - (b = base) > 0) {
int mask = size - 1;
do {
ForkJoinTask<?> x;
int oldj = ((b & oldMask) << ASHIFT) + ABASE;
int j = ((b & mask) << ASHIFT) + ABASE;
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
if (x != null &&
U.compareAndSwapObject(oldA, oldj, x, null))
U.putObjectVolatile(a, j, x);
} while (++b != t);
}
return a;
}
/**
* Takes next task, if one exists, in LIFO order. Call only
* by owner in unshared queues.
*/
final ForkJoinTask<?> pop() {
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
if ((a = array) != null && (m = a.length - 1) >= 0) {
for (int s; (s = top - 1) - base >= 0;) {
long j = ((m & s) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
break;
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
return t;
}
}
}
return null;
}
/**
* Takes a task in FIFO order if b is base of queue and a task
* can be claimed without contention. Specialized versions
* appear in ForkJoinPool methods scan and tryHelpStealer.
*/
final ForkJoinTask<?> pollAt(int b) {
ForkJoinTask<?> t; ForkJoinTask<?>[] a;
if ((a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
return t;
}
}
return null;
}
/**
* Takes next task, if one exists, in FIFO order.
*/
final ForkJoinTask<?> poll() {
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
while ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t != null) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
return t;
}
}
else if (base == b) {
if (b + 1 == top)
break;
Thread.yield(); // wait for lagging update (very rare)
}
}
return null;
}
/**
* Takes next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> nextLocalTask() {
return mode == 0 ? pop() : poll();
}
/**
* Returns next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> peek() {
ForkJoinTask<?>[] a = array; int m;
if (a == null || (m = a.length - 1) < 0)
return null;
int i = mode == 0 ? top - 1 : base;
int j = ((i & m) << ASHIFT) + ABASE;
return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
}
/**
* Pops the given task only if it is at the current top.
* (A shared version is available only via FJP.tryExternalUnpush)
*/
final boolean tryUnpush(ForkJoinTask<?> t) {
ForkJoinTask<?>[] a; int s;
if ((a = array) != null && (s = top) != base &&
U.compareAndSwapObject
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
top = s;
return true;
}
return false;
}
/**
* Removes and cancels all known tasks, ignoring any exceptions.
*/
final void cancelAll() {
ForkJoinTask.cancelIgnoringExceptions(currentJoin);
ForkJoinTask.cancelIgnoringExceptions(currentSteal);
for (ForkJoinTask<?> t; (t = poll()) != null; )
ForkJoinTask.cancelIgnoringExceptions(t);
}
/**
* Computes next value for random probes. Scans don't require
* a very high quality generator, but also not a crummy one.
* Marsaglia xor-shift is cheap and works well enough. Note:
* This is manually inlined in its usages in ForkJoinPool to
* avoid writes inside busy scan loops.
*/
final int nextSeed() {
int r = seed;
r ^= r << 13;
r ^= r >>> 17;
return seed = r ^= r << 5;
}
// Specialized execution methods
/**
* Pops and runs tasks until empty.
*/
private void popAndExecAll() {
// A bit faster than repeated pop calls
ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
while ((a = array) != null && (m = a.length - 1) >= 0 &&
(s = top - 1) - base >= 0 &&
(t = ((ForkJoinTask<?>)
U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
!= null) {
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
t.doExec();
}
}
}
/**
* Polls and runs tasks until empty.
*/
private void pollAndExecAll() {
for (ForkJoinTask<?> t; (t = poll()) != null;)
t.doExec();
}
/**
* If present, removes from queue and executes the given task,
* or any other cancelled task. Returns (true) on any CAS
* or consistency check failure so caller can retry.
*
* @return false if no progress can be made, else true
*/
final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
boolean stat = true, removed = false, empty = true;
ForkJoinTask<?>[] a; int m, s, b, n;
if ((a = array) != null && (m = a.length - 1) >= 0 &&
(n = (s = top) - (b = base)) > 0) {
for (ForkJoinTask<?> t;;) { // traverse from s to b
int j = ((--s & m) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t == null) // inconsistent length
break;
else if (t == task) {
if (s + 1 == top) { // pop
if (!U.compareAndSwapObject(a, j, task, null))
break;
top = s;
removed = true;
}
else if (base == b) // replace with proxy
removed = U.compareAndSwapObject(a, j, task,
new EmptyTask());
break;
}
else if (t.status >= 0)
empty = false;
else if (s + 1 == top) { // pop and throw away
if (U.compareAndSwapObject(a, j, t, null))
top = s;
break;
}
if (--n == 0) {
if (!empty && base == b)
stat = false;
break;
}
}
}
if (removed)
task.doExec();
return stat;
}
/**
* Polls for and executes the given task or any other task in
* its CountedCompleter computation.
*/
final boolean pollAndExecCC(ForkJoinTask<?> root) {
ForkJoinTask<?>[] a; int b; Object o;
outer: while ((b = base) - top < 0 && (a = array) != null) {
long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) == null ||
!(o instanceof CountedCompleter))
break;
for (CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
base = b + 1;
t.doExec();
return true;
}
else
break; // restart
}
if ((r = r.completer) == null)
break outer; // not part of root computation
}
}
return false;
}
/**
* Executes a top-level task and any local tasks remaining
* after execution.
*/
final void runTask(ForkJoinTask<?> t) {
if (t != null) {
(currentSteal = t).doExec();
currentSteal = null;
++nsteals;
if (base - top < 0) { // process remaining local tasks
if (mode == 0)
popAndExecAll();
else
pollAndExecAll();
}
}
}
/**
* Executes a non-top-level (stolen) task.
*/
final void runSubtask(ForkJoinTask<?> t) {
if (t != null) {
ForkJoinTask<?> ps = currentSteal;
(currentSteal = t).doExec();
currentSteal = ps;
}
}
/**
* Returns true if owned and not known to be blocked.
*/
final boolean isApparentlyUnblocked() {
Thread wt; Thread.State s;
return (eventCount >= 0 &&
(wt = owner) != null &&
(s = wt.getState()) != Thread.State.BLOCKED &&
s != Thread.State.WAITING &&
s != Thread.State.TIMED_WAITING);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long QLOCK;
private static final int ABASE;
private static final int ASHIFT;
static {
try {
U = getUnsafe();
Class<?> k = WorkQueue.class;
Class<?> ak = ForkJoinTask[].class;
QLOCK = U.objectFieldOffset
(k.getDeclaredField("qlock"));
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
}
// static fields (initialized in static initializer below)
/**
* Creates a new ForkJoinWorkerThread. This factory is used unless
* overridden in ForkJoinPool constructors.
*/
public static final ForkJoinWorkerThreadFactory
defaultForkJoinWorkerThreadFactory;
/**
* Per-thread submission bookkeeping. Shared across all pools
* to reduce ThreadLocal pollution and because random motion
* to avoid contention in one pool is likely to hold for others.
* Lazily initialized on first submission (but null-checked
* in other contexts to avoid unnecessary initialization).
*/
static final ThreadLocal<Submitter> submitters;
/**
* Permission required for callers of methods that may start or
* kill threads.
*/
private static final RuntimePermission modifyThreadPermission;
/**
* Common (static) pool. Non-null for public use unless a static
* construction exception, but internal usages null-check on use
* to paranoically avoid potential initialization circularities
* as well as to simplify generated code.
*/
static final ForkJoinPool common;
/**
* Common pool parallelism. Must equal common.parallelism.
*/
static final int commonParallelism;
/**
* Sequence number for creating workerNamePrefix.
*/
private static int poolNumberSequence;
/**
* Returns the next sequence number. We don't expect this to
* ever contend, so use simple builtin sync.
*/
private static final synchronized int nextPoolId() {
return ++poolNumberSequence;
}
// static constants
/**
* Initial timeout value (in nanoseconds) for the thread
* triggering quiescence to park waiting for new work. On timeout,
* the thread will instead try to shrink the number of
* workers. The value should be large enough to avoid overly
* aggressive shrinkage during most transient stalls (long GCs
* etc).
*/
private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
/**
* Timeout value when there are more threads than parallelism level
*/
private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
/**
* Tolerance for idle timeouts, to cope with timer undershoots
*/
private static final long TIMEOUT_SLOP = 2000000L;
/**
* The maximum stolen->joining link depth allowed in method
* tryHelpStealer. Must be a power of two. Depths for legitimate
* chains are unbounded, but we use a fixed constant to avoid
* (otherwise unchecked) cycles and to bound staleness of
* traversal parameters at the expense of sometimes blocking when
* we could be helping.
*/
private static final int MAX_HELP = 64;
/**
* Increment for seed generators. See class ThreadLocal for
* explanation.
*/
private static final int SEED_INCREMENT = 0x61c88647;
/*
* Bits and masks for control variables
*
* Field ctl is a long packed with:
* AC: Number of active running workers minus target parallelism (16 bits)
* TC: Number of total workers minus target parallelism (16 bits)
* ST: true if pool is terminating (1 bit)
* EC: the wait count of top waiting thread (15 bits)
* ID: poolIndex of top of Treiber stack of waiters (16 bits)
*
* When convenient, we can extract the upper 32 bits of counts and
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
* (int)ctl. The ec field is never accessed alone, but always
* together with id and st. The offsets of counts by the target
* parallelism and the positionings of fields makes it possible to
* perform the most common checks via sign tests of fields: When
* ac is negative, there are not enough active workers, when tc is
* negative, there are not enough total workers, and when e is
* negative, the pool is terminating. To deal with these possibly
* negative fields, we use casts in and out of "short" and/or
* signed shifts to maintain signedness.
*
* When a thread is queued (inactivated), its eventCount field is
* set negative, which is the only way to tell if a worker is
* prevented from executing tasks, even though it must continue to
* scan for them to avoid queuing races. Note however that
* eventCount updates lag releases so usage requires care.
*
* Field plock is an int packed with:
* SHUTDOWN: true if shutdown is enabled (1 bit)
* SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
* SIGNAL: set when threads may be waiting on the lock (1 bit)
*
* The sequence number enables simple consistency checks:
* Staleness of read-only operations on the workQueues array can
* be checked by comparing plock before vs after the reads.
*/
// bit positions/shifts for fields
private static final int AC_SHIFT = 48;
private static final int TC_SHIFT = 32;
private static final int ST_SHIFT = 31;
private static final int EC_SHIFT = 16;
// bounds
private static final int SMASK = 0xffff; // short bits
private static final int MAX_CAP = 0x7fff; // max #workers - 1
private static final int EVENMASK = 0xfffe; // even short bits
private static final int SQMASK = 0x007e; // max 64 (even) slots
private static final int SHORT_SIGN = 1 << 15;
private static final int INT_SIGN = 1 << 31;
// masks
private static final long STOP_BIT = 0x0001L << ST_SHIFT;
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
// units for incrementing and decrementing
private static final long TC_UNIT = 1L << TC_SHIFT;
private static final long AC_UNIT = 1L << AC_SHIFT;
// masks and units for dealing with u = (int)(ctl >>> 32)
private static final int UAC_SHIFT = AC_SHIFT - 32;
private static final int UTC_SHIFT = TC_SHIFT - 32;
private static final int UAC_MASK = SMASK << UAC_SHIFT;
private static final int UTC_MASK = SMASK << UTC_SHIFT;
private static final int UAC_UNIT = 1 << UAC_SHIFT;
private static final int UTC_UNIT = 1 << UTC_SHIFT;
// masks and units for dealing with e = (int)ctl
private static final int E_MASK = 0x7fffffff; // no STOP_BIT
private static final int E_SEQ = 1 << EC_SHIFT;
// plock bits
private static final int SHUTDOWN = 1 << 31;
private static final int PL_LOCK = 2;
private static final int PL_SIGNAL = 1;
private static final int PL_SPINS = 1 << 8;
// access mode for WorkQueue
static final int LIFO_QUEUE = 0;
static final int FIFO_QUEUE = 1;
static final int SHARED_QUEUE = -1;
// bounds for #steps in scan loop -- must be power 2 minus 1
private static final int MIN_SCAN = 0x1ff; // cover estimation slop
private static final int MAX_SCAN = 0x1ffff; // 4 * max workers
// Instance fields
/*
* Field layout of this class tends to matter more than one would
* like. Runtime layout order is only loosely related to
* declaration order and may differ across JVMs, but the following
* empirically works OK on current JVMs.
*/
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
volatile long stealCount; // collects worker counts
volatile long ctl; // main pool control
volatile int plock; // shutdown status and seqLock
volatile int indexSeed; // worker/submitter index seed
final int config; // mode and parallelism level
WorkQueue[] workQueues; // main registry
final ForkJoinWorkerThreadFactory factory;
final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
final String workerNamePrefix; // to create worker name string
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b;
/**
* Acquires the plock lock to protect worker array and related
* updates. This method is called only if an initial CAS on plock
* fails. This acts as a spinlock for normal cases, but falls back
* to builtin monitor to block when (rarely) needed. This would be
* a terrible idea for a highly contended lock, but works fine as
* a more conservative alternative to a pure spinlock.
*/
private int acquirePlock() {
int spins = PL_SPINS, r = 0, ps, nps;
for (;;) {
if (((ps = plock) & PL_LOCK) == 0 &&
U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
return nps;
else if (r == 0) { // randomize spins if possible
Thread t = Thread.currentThread(); WorkQueue w; Submitter z;
if ((t instanceof ForkJoinWorkerThread) &&
(w = ((ForkJoinWorkerThread)t).workQueue) != null)
r = w.seed;
else if ((z = submitters.get()) != null)
r = z.seed;
else
r = 1;
}
else if (spins >= 0) {
r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
if (r >= 0)
--spins;
}
else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
synchronized (this) {
if ((plock & PL_SIGNAL) != 0) {
try {
wait();
} catch (InterruptedException ie) {
try {
Thread.currentThread().interrupt();
} catch (SecurityException ignore) {
}
}
}
else
notifyAll();
}
}
}
}
/**
* Unlocks and signals any thread waiting for plock. Called only
* when CAS of seq value for unlock fails.
*/
private void releasePlock(int ps) {
plock = ps;
synchronized (this) { notifyAll(); }
}
/**
* Tries to create and start one worker if fewer than target
* parallelism level exist. Adjusts counts etc on failure.
*/
private void tryAddWorker() {
long c; int u;
while ((u = (int)((c = ctl) >>> 32)) < 0 &&
(u & SHORT_SIGN) != 0 && (int)c == 0) {
long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
((u + UAC_UNIT) & UAC_MASK)) << 32;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
ForkJoinWorkerThreadFactory fac;
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((fac = factory) != null &&
(wt = fac.newThread(this)) != null) {
wt.start();
break;
}
} catch (Throwable e) {
ex = e;
}
deregisterWorker(wt, ex);
break;
}
}
}
// Registering and deregistering workers
/**
* Callback from ForkJoinWorkerThread to establish and record its
* WorkQueue. To avoid scanning bias due to packing entries in
* front of the workQueues array, we treat the array as a simple
* power-of-two hash table using per-thread seed as hash,
* expanding as needed.
*
* @param wt the worker thread
* @return the worker's queue
*/
final WorkQueue registerWorker(ForkJoinWorkerThread wt) {
Thread.UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps;
wt.setDaemon(true);
if ((handler = ueh) != null)
wt.setUncaughtExceptionHandler(handler);
do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
s += SEED_INCREMENT) ||
s == 0); // skip 0
WorkQueue w = new WorkQueue(this, wt, config >>> 16, s);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
try {
if ((ws = workQueues) != null) { // skip if shutting down
int n = ws.length, m = n - 1;
int r = (s << 1) | 1; // use odd-numbered indices
if (ws[r &= m] != null) { // collision
int probes = 0; // step by approx half size
int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
while (ws[r = (r + step) & m] != null) {
if (++probes >= n) {
workQueues = ws = Arrays.copyOf(ws, n <<= 1);
m = n - 1;
probes = 0;
}
}
}
w.eventCount = w.poolIndex = r; // volatile write orders
ws[r] = w;
}
} finally {
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex)));
return w;
}
/**
* Final callback from terminating worker, as well as upon failure
* to construct or start a worker. Removes record of worker from
* array, and adjusts counts. If pool is shutting down, tries to
* complete termination.
*
* @param wt the worker thread or null if construction failed
* @param ex the exception causing failure, or null if none
*/
final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
WorkQueue w = null;
if (wt != null && (w = wt.workQueue) != null) {
int ps;
w.qlock = -1; // ensure set
long ns = w.nsteals, sc; // collect steal count
do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
sc = stealCount, sc + ns));
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
try {
int idx = w.poolIndex;
WorkQueue[] ws = workQueues;
if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
ws[idx] = null;
} finally {
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
}
long c; // adjust ctl counts
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
((c - TC_UNIT) & TC_MASK) |
(c & ~(AC_MASK|TC_MASK)))));
if (!tryTerminate(false, false) && w != null && w.array != null) {
w.cancelAll(); // cancel remaining tasks
WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e;
while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) {
if (e > 0) { // activate or create replacement
if ((ws = workQueues) == null ||
(i = e & SMASK) >= ws.length ||
(v = ws[i]) == null)
break;
long nc = (((long)(v.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (v.eventCount != (e | INT_SIGN))
break;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
v.eventCount = (e + E_SEQ) & E_MASK;
if ((p = v.parker) != null)
U.unpark(p);
break;
}
}
else {
if ((short)u < 0)
tryAddWorker();
break;
}
}
}
if (ex == null) // help clean refs on way out
ForkJoinTask.helpExpungeStaleExceptions();
else // rethrow
ForkJoinTask.rethrow(ex);
}
// Submissions
/**
* Unless shutting down, adds the given task to a submission queue
* at submitter's current queue index (modulo submission
* range). Only the most common path is directly handled in this
* method. All others are relayed to fullExternalPush.
*
* @param task the task. Caller must ensure non-null.
*/
final void externalPush(ForkJoinTask<?> task) {
WorkQueue[] ws; WorkQueue q; Submitter z; int m; ForkJoinTask<?>[] a;
if ((z = submitters.get()) != null && plock > 0 &&
(ws = workQueues) != null && (m = (ws.length - 1)) >= 0 &&
(q = ws[m & z.seed & SQMASK]) != null &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
int b = q.base, s = q.top, n, an;
if ((a = q.array) != null && (an = a.length) > (n = s + 1 - b)) {
int j = (((an - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
q.top = s + 1; // push on to deque
q.qlock = 0;
if (n <= 2)
signalWork(q);
return;
}
q.qlock = 0;
}
fullExternalPush(task);
}
/**
* Full version of externalPush. This method is called, among
* other times, upon the first submission of the first task to the
* pool, so must perform secondary initialization. It also
* detects first submission by an external thread by looking up
* its ThreadLocal, and creates a new shared queue if the one at
* index if empty or contended. The plock lock body must be
* exception-free (so no try/finally) so we optimistically
* allocate new queues outside the lock and throw them away if
* (very rarely) not needed.
*
* Secondary initialization occurs when plock is zero, to create
* workQueue array and set plock to a valid value. This lock body
* must also be exception-free. Because the plock seq value can
* eventually wrap around zero, this method harmlessly fails to
* reinitialize if workQueues exists, while still advancing plock.
*/
private void fullExternalPush(ForkJoinTask<?> task) {
int r = 0; // random index seed
for (Submitter z = submitters.get();;) {
WorkQueue[] ws; WorkQueue q; int ps, m, k;
if (z == null) {
if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
r += SEED_INCREMENT) && r != 0)
submitters.set(z = new Submitter(r));
}
else if (r == 0) { // move to a different index
r = z.seed;
r ^= r << 13; // same xorshift as WorkQueues
r ^= r >>> 17;
z.seed = r ^ (r << 5);
}
else if ((ps = plock) < 0)
throw new RejectedExecutionException();
else if (ps == 0 || (ws = workQueues) == null ||
(m = ws.length - 1) < 0) { // initialize workQueues
int p = config & SMASK; // find power of two table size
int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
n |= n >>> 1; n |= n >>> 2; n |= n >>> 4;
n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1;
WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ?
new WorkQueue[n] : null);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
if (((ws = workQueues) == null || ws.length == 0) && nws != null)
workQueues = nws;
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
else if ((q = ws[k = r & m & SQMASK]) != null) {
if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) {
ForkJoinTask<?>[] a = q.array;
int s = q.top;
boolean submitted = false;
try { // locked version of push
if ((a != null && a.length > s + 1 - q.base) ||
(a = q.growArray()) != null) { // must presize
int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
U.putOrderedObject(a, j, task);
q.top = s + 1;
submitted = true;
}
} finally {
q.qlock = 0; // unlock
}
if (submitted) {
signalWork(q);
return;
}
}
r = 0; // move on failure
}
else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
q = new WorkQueue(this, null, SHARED_QUEUE, r);
if (((ps = plock) & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
ws[k] = q;
int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
else
r = 0; // try elsewhere while lock held
}
}
// Maintaining ctl counts
/**
* Increments active count; mainly called upon return from blocking.
*/
final void incrementActiveCount() {
long c;
do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
}
/**
* Tries to create or activate a worker if too few are active.
*
* @param q the (non-null) queue holding tasks to be signalled
*/
final void signalWork(WorkQueue q) {
int hint = q.poolIndex;
long c; int e, u, i, n; WorkQueue[] ws; WorkQueue w; Thread p;
while ((u = (int)((c = ctl) >>> 32)) < 0) {
if ((e = (int)c) > 0) {
if ((ws = workQueues) != null && ws.length > (i = e & SMASK) &&
(w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.hint = hint;
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
break;
}
if (q.top - q.base <= 0)
break;
}
else
break;
}
else {
if ((short)u < 0)
tryAddWorker();
break;
}
}
}
// Scanning for tasks
/**
* Top-level runloop for workers, called by ForkJoinWorkerThread.run.
*/
final void runWorker(WorkQueue w) {
w.growArray(); // allocate queue
do { w.runTask(scan(w)); } while (w.qlock >= 0);
}
/**
* Scans for and, if found, returns one task, else possibly
* inactivates the worker. This method operates on single reads of
* volatile state and is designed to be re-invoked continuously,
* in part because it returns upon detecting inconsistencies,
* contention, or state changes that indicate possible success on
* re-invocation.
*
* The scan searches for tasks across queues (starting at a random
* index, and relying on registerWorker to irregularly scatter
* them within array to avoid bias), checking each at least twice.
* The scan terminates upon either finding a non-empty queue, or
* completing the sweep. If the worker is not inactivated, it
* takes and returns a task from this queue. Otherwise, if not
* activated, it signals workers (that may include itself) and
* returns so caller can retry. Also returns for true if the
* worker array may have changed during an empty scan. On failure
* to find a task, we take one of the following actions, after
* which the caller will retry calling this method unless
* terminated.
*
* * If pool is terminating, terminate the worker.
*
* * If not already enqueued, try to inactivate and enqueue the
* worker on wait queue. Or, if inactivating has caused the pool
* to be quiescent, relay to idleAwaitWork to possibly shrink
* pool.
*
* * If already enqueued and none of the above apply, possibly
* park awaiting signal, else lingering to help scan and signal.
*
* * If a non-empty queue discovered or left as a hint,
* help wake up other workers before return.
*
* @param w the worker (via its WorkQueue)
* @return a task or null if none found
*/
private final ForkJoinTask<?> scan(WorkQueue w) {
WorkQueue[] ws; int m;
int ps = plock; // read plock before ws
if (w != null && (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
int ec = w.eventCount; // ec is negative if inactive
int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
w.hint = -1; // update seed and clear hint
int j = ((m + m + 1) | MIN_SCAN) & MAX_SCAN;
do {
WorkQueue q; ForkJoinTask<?>[] a; int b;
if ((q = ws[(r + j) & m]) != null && (b = q.base) - q.top < 0 &&
(a = q.array) != null) { // probably nonempty
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
ForkJoinTask<?> t = (ForkJoinTask<?>)
U.getObjectVolatile(a, i);
if (q.base == b && ec >= 0 && t != null &&
U.compareAndSwapObject(a, i, t, null)) {
if ((q.base = b + 1) - q.top < 0)
signalWork(q);
return t; // taken
}
else if ((ec < 0 || j < m) && (int)(ctl >> AC_SHIFT) <= 0) {
w.hint = (r + j) & m; // help signal below
break; // cannot take
}
}
} while (--j >= 0);
int h, e, ns; long c, sc; WorkQueue q;
if ((ns = w.nsteals) != 0) {
if (U.compareAndSwapLong(this, STEALCOUNT,
sc = stealCount, sc + ns))
w.nsteals = 0; // collect steals and rescan
}
else if (plock != ps) // consistency check
; // skip
else if ((e = (int)(c = ctl)) < 0)
w.qlock = -1; // pool is terminating
else {
if ((h = w.hint) < 0) {
if (ec >= 0) { // try to enqueue/inactivate
long nc = (((long)ec |
((c - AC_UNIT) & (AC_MASK|TC_MASK))));
w.nextWait = e; // link and mark inactive
w.eventCount = ec | INT_SIGN;
if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
w.eventCount = ec; // unmark on CAS failure
else if ((int)(c >> AC_SHIFT) == 1 - (config & SMASK))
idleAwaitWork(w, nc, c);
}
else if (w.eventCount < 0 && ctl == c) {
Thread wt = Thread.currentThread();
Thread.interrupted(); // clear status
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt; // emulate LockSupport.park
if (w.eventCount < 0) // recheck
U.park(false, 0L); // block
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
}
}
if ((h >= 0 || (h = w.hint) >= 0) &&
(ws = workQueues) != null && h < ws.length &&
(q = ws[h]) != null) { // signal others before retry
WorkQueue v; Thread p; int u, i, s;
for (int n = (config & SMASK) - 1;;) {
int idleCount = (w.eventCount < 0) ? 0 : -1;
if (((s = idleCount - q.base + q.top) <= n &&
(n = s) <= 0) ||
(u = (int)((c = ctl) >>> 32)) >= 0 ||
(e = (int)c) <= 0 || m < (i = e & SMASK) ||
(v = ws[i]) == null)
break;
long nc = (((long)(v.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (v.eventCount != (e | INT_SIGN) ||
!U.compareAndSwapLong(this, CTL, c, nc))
break;
v.hint = h;
v.eventCount = (e + E_SEQ) & E_MASK;
if ((p = v.parker) != null)
U.unpark(p);
if (--n <= 0)
break;
}
}
}
}
return null;
}
/**
* If inactivating worker w has caused the pool to become
* quiescent, checks for pool termination, and, so long as this is
* not the only worker, waits for event for up to a given
* duration. On timeout, if ctl has not changed, terminates the
* worker, which will in turn wake up another worker to possibly
* repeat this process.
*
* @param w the calling worker
* @param currentCtl the ctl value triggering possible quiescence
* @param prevCtl the ctl value to restore if thread is terminated
*/
private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
if (w != null && w.eventCount < 0 &&
!tryTerminate(false, false) && (int)prevCtl != 0 &&
ctl == currentCtl) {
int dc = -(short)(currentCtl >>> TC_SHIFT);
long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT;
long deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP;
Thread wt = Thread.currentThread();
while (ctl == currentCtl) {
Thread.interrupted(); // timed variant of version in scan()
U.putObject(wt, PARKBLOCKER, this);
w.parker = wt;
if (ctl == currentCtl)
U.park(false, parkTime);
w.parker = null;
U.putObject(wt, PARKBLOCKER, null);
if (ctl != currentCtl)
break;
if (deadline - System.nanoTime() <= 0L &&
U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
w.hint = -1;
w.qlock = -1; // shrink
break;
}
}
}
}
/**
* Scans through queues looking for work while joining a task; if
* any present, signals. May return early if more signalling is
* detectably unneeded.
*
* @param task return early if done
* @param origin an index to start scan
*/
private void helpSignal(ForkJoinTask<?> task, int origin) {
WorkQueue[] ws; WorkQueue w; Thread p; long c; int m, u, e, i, s;
if (task != null && task.status >= 0 &&
(u = (int)(ctl >>> 32)) < 0 && (u >> UAC_SHIFT) < 0 &&
(ws = workQueues) != null && (m = ws.length - 1) >= 0) {
outer: for (int k = origin, j = m; j >= 0; --j) {
WorkQueue q = ws[k++ & m];
for (int n = m;;) { // limit to at most m signals
if (task.status < 0)
break outer;
if (q == null ||
((s = -q.base + q.top) <= n && (n = s) <= 0))
break;
if ((u = (int)((c = ctl) >>> 32)) >= 0 ||
(e = (int)c) <= 0 || m < (i = e & SMASK) ||
(w = ws[i]) == null)
break outer;
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (w.eventCount != (e | INT_SIGN))
break outer;
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
if (--n <= 0)
break;
}
}
}
}
}
/**
* Tries to locate and execute tasks for a stealer of the given
* task, or in turn one of its stealers, Traces currentSteal ->
* currentJoin links looking for a thread working on a descendant
* of the given task and with a non-empty queue to steal back and
* execute tasks from. The first call to this method upon a
* waiting join will often entail scanning/search, (which is OK
* because the joiner has nothing better to do), but this method
* leaves hints in workers to speed up subsequent calls. The
* implementation is very branchy to cope with potential
* inconsistencies or loops encountering chains that are stale,
* unknown, or so long that they are likely cyclic.
*
* @param joiner the joining worker
* @param task the task to join
* @return 0 if no progress can be made, negative if task
* known complete, else positive
*/
private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
int stat = 0, steps = 0; // bound to avoid cycles
if (joiner != null && task != null) { // hoist null checks
restart: for (;;) {
ForkJoinTask<?> subtask = task; // current target
for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
WorkQueue[] ws; int m, s, h;
if ((s = task.status) < 0) {
stat = s;
break restart;
}
if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
break restart; // shutting down
if ((v = ws[h = (j.hint | 1) & m]) == null ||
v.currentSteal != subtask) {
for (int origin = h;;) { // find stealer
if (((h = (h + 2) & m) & 15) == 1 &&
(subtask.status < 0 || j.currentJoin != subtask))
continue restart; // occasional staleness check
if ((v = ws[h]) != null &&
v.currentSteal == subtask) {
j.hint = h; // save hint
break;
}
if (h == origin)
break restart; // cannot find stealer
}
}
for (;;) { // help stealer or descend to its stealer
ForkJoinTask[] a; int b;
if (subtask.status < 0) // surround probes with
continue restart; // consistency checks
if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
ForkJoinTask<?> t =
(ForkJoinTask<?>)U.getObjectVolatile(a, i);
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
stat = 1; // apparent progress
if (t != null && v.base == b &&
U.compareAndSwapObject(a, i, t, null)) {
v.base = b + 1; // help stealer
joiner.runSubtask(t);
}
else if (v.base == b && ++steps == MAX_HELP)
break restart; // v apparently stalled
}
else { // empty -- try to descend
ForkJoinTask<?> next = v.currentJoin;
if (subtask.status < 0 || j.currentJoin != subtask ||
v.currentSteal != subtask)
continue restart; // stale
else if (next == null || ++steps == MAX_HELP)
break restart; // dead-end or maybe cyclic
else {
subtask = next;
j = v;
break;
}
}
}
}
}
}
return stat;
}
/**
* Analog of tryHelpStealer for CountedCompleters. Tries to steal
* and run tasks within the target's computation.
*
* @param task the task to join
* @param mode if shared, exit upon completing any task
* if all workers are active
*/
private int helpComplete(ForkJoinTask<?> task, int mode) {
WorkQueue[] ws; WorkQueue q; int m, n, s, u;
if (task != null && (ws = workQueues) != null &&
(m = ws.length - 1) >= 0) {
for (int j = 1, origin = j;;) {
if ((s = task.status) < 0)
return s;
if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) {
origin = j;
if (mode == SHARED_QUEUE &&
((u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0))
break;
}
else if ((j = (j + 2) & m) == origin)
break;
}
}
return 0;
}
/**
* Tries to decrement active count (sometimes implicitly) and
* possibly release or create a compensating worker in preparation
* for blocking. Fails on contention or termination. Otherwise,
* adds a new thread if no idle workers are available and pool
* may become starved.
*/
final boolean tryCompensate() {
int pc = config & SMASK, e, i, tc; long c;
WorkQueue[] ws; WorkQueue w; Thread p;
if ((ws = workQueues) != null && (e = (int)(c = ctl)) >= 0) {
if (e != 0 && (i = e & SMASK) < ws.length &&
(w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
long nc = ((long)(w.nextWait & E_MASK) |
(c & (AC_MASK|TC_MASK)));
if (U.compareAndSwapLong(this, CTL, c, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
if ((p = w.parker) != null)
U.unpark(p);
return true; // replace with idle worker
}
}
else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
(int)(c >> AC_SHIFT) + pc > 1) {
long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc))
return true; // no compensation
}
else if (tc + pc < MAX_CAP) {
long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
if (U.compareAndSwapLong(this, CTL, c, nc)) {
ForkJoinWorkerThreadFactory fac;
Throwable ex = null;
ForkJoinWorkerThread wt = null;
try {
if ((fac = factory) != null &&
(wt = fac.newThread(this)) != null) {
wt.start();
return true;
}
} catch (Throwable rex) {
ex = rex;
}
deregisterWorker(wt, ex); // clean up and return false
}
}
}
return false;
}
/**
* Helps and/or blocks until the given task is done.
*
* @param joiner the joining worker
* @param task the task
* @return task status on exit
*/
final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
int s = 0;
if (joiner != null && task != null && (s = task.status) >= 0) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
joiner.tryRemoveAndExec(task)); // process local tasks
if (s >= 0 && (s = task.status) >= 0) {
helpSignal(task, joiner.poolIndex);
if ((s = task.status) >= 0 &&
(task instanceof CountedCompleter))
s = helpComplete(task, LIFO_QUEUE);
}
while (s >= 0 && (s = task.status) >= 0) {
if ((!joiner.isEmpty() || // try helping
(s = tryHelpStealer(joiner, task)) == 0) &&
(s = task.status) >= 0) {
helpSignal(task, joiner.poolIndex);
if ((s = task.status) >= 0 && tryCompensate()) {
if (task.trySetSignal() && (s = task.status) >= 0) {
synchronized (task) {
if (task.status >= 0) {
try { // see ForkJoinTask
task.wait(); // for explanation
} catch (InterruptedException ie) {
}
}
else
task.notifyAll();
}
}
long c; // re-activate
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
}
}
}
joiner.currentJoin = prevJoin;
}
return s;
}
/**
* Stripped-down variant of awaitJoin used by timed joins. Tries
* to help join only while there is continuous progress. (Caller
* will then enter a timed wait.)
*
* @param joiner the joining worker
* @param task the task
*/
final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
int s;
if (joiner != null && task != null && (s = task.status) >= 0) {
ForkJoinTask<?> prevJoin = joiner.currentJoin;
joiner.currentJoin = task;
do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
joiner.tryRemoveAndExec(task));
if (s >= 0 && (s = task.status) >= 0) {
helpSignal(task, joiner.poolIndex);
if ((s = task.status) >= 0 &&
(task instanceof CountedCompleter))
s = helpComplete(task, LIFO_QUEUE);
}
if (s >= 0 && joiner.isEmpty()) {
do {} while (task.status >= 0 &&
tryHelpStealer(joiner, task) > 0);
}
joiner.currentJoin = prevJoin;
}
}
/**
* Returns a (probably) non-empty steal queue, if one is found
* during a scan, else null. This method must be retried by
* caller if, by the time it tries to use the queue, it is empty.
* @param r a (random) seed for scanning
*/
private WorkQueue findNonEmptyStealQueue(int r) {
for (;;) {
int ps = plock, m; WorkQueue[] ws; WorkQueue q;
if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
for (int j = (m + 1) << 2; j >= 0; --j) {
if ((q = ws[(((r + j) << 1) | 1) & m]) != null &&
q.base - q.top < 0)
return q;
}
}
if (plock == ps)
return null;
}
}
/**
* Runs tasks until {@code isQuiescent()}. We piggyback on
* active count ctl maintenance, but rather than blocking
* when tasks cannot be found, we rescan until all others cannot
* find tasks either.
*/
final void helpQuiescePool(WorkQueue w) {
for (boolean active = true;;) {
long c; WorkQueue q; ForkJoinTask<?> t; int b;
while ((t = w.nextLocalTask()) != null) {
if (w.base - w.top < 0)
signalWork(w);
t.doExec();
}
if ((q = findNonEmptyStealQueue(w.nextSeed())) != null) {
if (!active) { // re-establish active count
active = true;
do {} while (!U.compareAndSwapLong
(this, CTL, c = ctl, c + AC_UNIT));
}
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
if (q.base - q.top < 0)
signalWork(q);
w.runSubtask(t);
}
}
else if (active) { // decrement active count without queuing
long nc = (c = ctl) - AC_UNIT;
if ((int)(nc >> AC_SHIFT) + (config & SMASK) == 0)
return; // bypass decrement-then-increment
if (U.compareAndSwapLong(this, CTL, c, nc))
active = false;
}
else if ((int)((c = ctl) >> AC_SHIFT) + (config & SMASK) == 0 &&
U.compareAndSwapLong(this, CTL, c, c + AC_UNIT))
return;
}
}
/**
* Gets and removes a local or stolen task for the given worker.
*
* @return a task, if available
*/
final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
for (ForkJoinTask<?> t;;) {
WorkQueue q; int b;
if ((t = w.nextLocalTask()) != null)
return t;
if ((q = findNonEmptyStealQueue(w.nextSeed())) == null)
return null;
if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
if (q.base - q.top < 0)
signalWork(q);
return t;
}
}
}
/**
* Returns a cheap heuristic guide for task partitioning when
* programmers, frameworks, tools, or languages have little or no
* idea about task granularity. In essence by offering this
* method, we ask users only about tradeoffs in overhead vs
* expected throughput and its variance, rather than how finely to
* partition tasks.
*
* In a steady state strict (tree-structured) computation, each
* thread makes available for stealing enough tasks for other
* threads to remain active. Inductively, if all threads play by
* the same rules, each thread should make available only a
* constant number of tasks.
*
* The minimum useful constant is just 1. But using a value of 1
* would require immediate replenishment upon each steal to
* maintain enough tasks, which is infeasible. Further,
* partitionings/granularities of offered tasks should minimize
* steal rates, which in general means that threads nearer the top
* of computation tree should generate more than those nearer the
* bottom. In perfect steady state, each thread is at
* approximately the same level of computation tree. However,
* producing extra tasks amortizes the uncertainty of progress and
* diffusion assumptions.
*
* So, users will want to use values larger (but not much larger)
* than 1 to both smooth over transient shortages and hedge
* against uneven progress; as traded off against the cost of
* extra task overhead. We leave the user to pick a threshold
* value to compare with the results of this call to guide
* decisions, but recommend values such as 3.
*
* When all threads are active, it is on average OK to estimate
* surplus strictly locally. In steady-state, if one thread is
* maintaining say 2 surplus tasks, then so are others. So we can
* just use estimated queue length. However, this strategy alone
* leads to serious mis-estimates in some non-steady-state
* conditions (ramp-up, ramp-down, other stalls). We can detect
* many of these by further considering the number of "idle"
* threads, that are known to have zero queued tasks, so
* compensate by a factor of (#idle/#active) threads.
*
* Note: The approximation of #busy workers as #active workers is
* not very good under current signalling scheme, and should be
* improved.
*/
static int getSurplusQueuedTaskCount() {
Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).config & SMASK;
int n = (q = wt.workQueue).top - q.base;
int a = (int)(pool.ctl >> AC_SHIFT) + p;
return n - (a > (p >>>= 1) ? 0 :
a > (p >>>= 1) ? 1 :
a > (p >>>= 1) ? 2 :
a > (p >>>= 1) ? 4 :
8);
}
return 0;
}
// Termination
/**
* Possibly initiates and/or completes termination. The caller
* triggering termination runs three passes through workQueues:
* (0) Setting termination status, followed by wakeups of queued
* workers; (1) cancelling all tasks; (2) interrupting lagging
* threads (likely in external tasks, but possibly also blocked in
* joins). Each pass repeats previous steps because of potential
* lagging thread creation.
*
* @param now if true, unconditionally terminate, else only
* if no work and no active workers
* @param enable if true, enable shutdown when next possible
* @return true if now terminating or terminated
*/
private boolean tryTerminate(boolean now, boolean enable) {
int ps;
if (this == common) // cannot shut down
return false;
if ((ps = plock) >= 0) { // enable by setting plock
if (!enable)
return false;
if ((ps & PL_LOCK) != 0 ||
!U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
ps = acquirePlock();
int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN;
if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
releasePlock(nps);
}
for (long c;;) {
if (((c = ctl) & STOP_BIT) != 0) { // already terminating
if ((short)(c >>> TC_SHIFT) == -(config & SMASK)) {
synchronized (this) {
notifyAll(); // signal when 0 workers
}
}
return true;
}
if (!now) { // check if idle & no tasks
WorkQueue[] ws; WorkQueue w;
if ((int)(c >> AC_SHIFT) != -(config & SMASK))
return false;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
if (!w.isEmpty()) { // signal unprocessed tasks
signalWork(w);
return false;
}
if ((i & 1) != 0 && w.eventCount >= 0)
return false; // unqueued inactive worker
}
}
}
}
if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
for (int pass = 0; pass < 3; ++pass) {
WorkQueue[] ws; WorkQueue w; Thread wt;
if ((ws = workQueues) != null) {
int n = ws.length;
for (int i = 0; i < n; ++i) {
if ((w = ws[i]) != null) {
w.qlock = -1;
if (pass > 0) {
w.cancelAll();
if (pass > 1 && (wt = w.owner) != null) {
if (!wt.isInterrupted()) {
try {
wt.interrupt();
} catch (Throwable ignore) {
}
}
U.unpark(wt);
}
}
}
}
// Wake up workers parked on event queue
int i, e; long cc; Thread p;
while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
(i = e & SMASK) < n && i >= 0 &&
(w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
((cc + AC_UNIT) & AC_MASK) |
(cc & (TC_MASK|STOP_BIT)));
if (w.eventCount == (e | INT_SIGN) &&
U.compareAndSwapLong(this, CTL, cc, nc)) {
w.eventCount = (e + E_SEQ) & E_MASK;
w.qlock = -1;
if ((p = w.parker) != null)
U.unpark(p);
}
}
}
}
}
}
}
// external operations on common pool
/**
* Returns common pool queue for a thread that has submitted at
* least one task.
*/
static WorkQueue commonSubmitterQueue() {
ForkJoinPool p; WorkQueue[] ws; int m; Submitter z;
return ((z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0) ?
ws[m & z.seed & SQMASK] : null;
}
/**
* Tries to pop the given task from submitter's queue in common pool.
*/
static boolean tryExternalUnpush(ForkJoinTask<?> t) {
ForkJoinPool p; WorkQueue[] ws; WorkQueue q; Submitter z;
ForkJoinTask<?>[] a; int m, s;
if (t != null &&
(z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0 &&
(q = ws[m & z.seed & SQMASK]) != null &&
(s = q.top) != q.base &&
(a = q.array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if (U.getObject(a, j) == t &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) {
if (q.array == a && q.top == s && // recheck
U.compareAndSwapObject(a, j, t, null)) {
q.top = s - 1;
q.qlock = 0;
return true;
}
q.qlock = 0;
}
}
return false;
}
/**
* Tries to pop and run local tasks within the same computation
* as the given root. On failure, tries to help complete from
* other queues via helpComplete.
*/
private void externalHelpComplete(WorkQueue q, ForkJoinTask<?> root) {
ForkJoinTask<?>[] a; int m;
if (q != null && (a = q.array) != null && (m = (a.length - 1)) >= 0 &&
root != null && root.status >= 0) {
for (;;) {
int s, u; Object o; CountedCompleter<?> task = null;
if ((s = q.top) - q.base > 0) {
long j = ((m & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) != null &&
(o instanceof CountedCompleter)) {
CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;
do {
if (r == root) {
if (U.compareAndSwapInt(q, QLOCK, 0, 1)) {
if (q.array == a && q.top == s &&
U.compareAndSwapObject(a, j, t, null)) {
q.top = s - 1;
task = t;
}
q.qlock = 0;
}
break;
}
} while ((r = r.completer) != null);
}
}
if (task != null)
task.doExec();
if (root.status < 0 ||
(u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0)
break;
if (task == null) {
helpSignal(root, q.poolIndex);
if (root.status >= 0)
helpComplete(root, SHARED_QUEUE);
break;
}
}
}
}
/**
* Tries to help execute or signal availability of the given task
* from submitter's queue in common pool.
*/
static void externalHelpJoin(ForkJoinTask<?> t) {
// Some hard-to-avoid overlap with tryExternalUnpush
ForkJoinPool p; WorkQueue[] ws; WorkQueue q, w; Submitter z;
ForkJoinTask<?>[] a; int m, s, n;
if (t != null &&
(z = submitters.get()) != null &&
(p = common) != null &&
(ws = p.workQueues) != null &&
(m = ws.length - 1) >= 0 &&
(q = ws[m & z.seed & SQMASK]) != null &&
(a = q.array) != null) {
int am = a.length - 1;
if ((s = q.top) != q.base) {
long j = ((am & (s - 1)) << ASHIFT) + ABASE;
if (U.getObject(a, j) == t &&
U.compareAndSwapInt(q, QLOCK, 0, 1)) {
if (q.array == a && q.top == s &&
U.compareAndSwapObject(a, j, t, null)) {
q.top = s - 1;
q.qlock = 0;
t.doExec();
}
else
q.qlock = 0;
}
}
if (t.status >= 0) {
if (t instanceof CountedCompleter)
p.externalHelpComplete(q, t);
else
p.helpSignal(t, q.poolIndex);
}
}
}
// Exported methods
// Constructors
/**
* Creates a {@code ForkJoinPool} with parallelism equal to {@link
* java.lang.Runtime#availableProcessors}, using the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool() {
this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the indicated parallelism
* level, the {@linkplain
* #defaultForkJoinWorkerThreadFactory default thread factory},
* no UncaughtExceptionHandler, and non-async LIFO processing mode.
*
* @param parallelism the parallelism level
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism) {
this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
}
/**
* Creates a {@code ForkJoinPool} with the given parameters.
*
* @param parallelism the parallelism level. For default value,
* use {@link java.lang.Runtime#availableProcessors}.
* @param factory the factory for creating new threads. For default value,
* use {@link #defaultForkJoinWorkerThreadFactory}.
* @param handler the handler for internal worker threads that
* terminate due to unrecoverable errors encountered while executing
* tasks. For default value, use {@code null}.
* @param asyncMode if true,
* establishes local first-in-first-out scheduling mode for forked
* tasks that are never joined. This mode may be more appropriate
* than default locally stack-based mode in applications in which
* worker threads only process event-style asynchronous tasks.
* For default value, use {@code false}.
* @throws IllegalArgumentException if parallelism less than or
* equal to zero, or greater than implementation limit
* @throws NullPointerException if the factory is null
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public ForkJoinPool(int parallelism,
ForkJoinWorkerThreadFactory factory,
Thread.UncaughtExceptionHandler handler,
boolean asyncMode) {
checkPermission();
if (factory == null)
throw new NullPointerException();
if (parallelism <= 0 || parallelism > MAX_CAP)
throw new IllegalArgumentException();
this.factory = factory;
this.ueh = handler;
this.config = parallelism | (asyncMode ? (FIFO_QUEUE << 16) : 0);
long np = (long)(-parallelism); // offset ctl counts
this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
int pn = nextPoolId();
StringBuilder sb = new StringBuilder("ForkJoinPool-");
sb.append(Integer.toString(pn));
sb.append("-worker-");
this.workerNamePrefix = sb.toString();
}
/**
* Constructor for common pool, suitable only for static initialization.
* Basically the same as above, but uses smallest possible initial footprint.
*/
ForkJoinPool(int parallelism, long ctl,
ForkJoinWorkerThreadFactory factory,
Thread.UncaughtExceptionHandler handler) {
this.config = parallelism;
this.ctl = ctl;
this.factory = factory;
this.ueh = handler;
this.workerNamePrefix = "ForkJoinPool.commonPool-worker-";
}
/**
* Returns the common pool instance. This pool is statically
* constructed; its run state is unaffected by attempts to {@link
* #shutdown} or {@link #shutdownNow}. However this pool and any
* ongoing processing are automatically terminated upon program
* {@link System#exit}. Any program that relies on asynchronous
* task processing to complete before program termination should
* invoke {@code commonPool().}{@link #awaitQuiescence}, before
* exit.
*
* @return the common pool instance
* @since 1.8
*/
public static ForkJoinPool commonPool() {
// assert common != null : "static init error";
return common;
}
// Execution methods
/**
* Performs the given task, returning its result upon completion.
* If the computation encounters an unchecked Exception or Error,
* it is rethrown as the outcome of this invocation. Rethrown
* exceptions behave in the same way as regular exceptions, but,
* when possible, contain stack traces (as displayed for example
* using {@code ex.printStackTrace()}) of both the current thread
* as well as the thread actually encountering the exception;
* minimally only the latter.
*
* @param task the task
* @return the task's result
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> T invoke(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
return task.join();
}
/**
* Arranges for (asynchronous) execution of the given task.
*
* @param task the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(ForkJoinTask<?> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
}
// AbstractExecutorService methods
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public void execute(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
externalPush(job);
}
/**
* Submits a ForkJoinTask for execution.
*
* @param task the task to submit
* @return the task
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
if (task == null)
throw new NullPointerException();
externalPush(task);
return task;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Callable<T> task) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
externalPush(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public <T> ForkJoinTask<T> submit(Runnable task, T result) {
ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
externalPush(job);
return job;
}
/**
* @throws NullPointerException if the task is null
* @throws RejectedExecutionException if the task cannot be
* scheduled for execution
*/
public ForkJoinTask<?> submit(Runnable task) {
if (task == null)
throw new NullPointerException();
ForkJoinTask<?> job;
if (task instanceof ForkJoinTask<?>) // avoid re-wrap
job = (ForkJoinTask<?>) task;
else
job = new ForkJoinTask.AdaptedRunnableAction(task);
externalPush(job);
return job;
}
/**
* @throws NullPointerException {@inheritDoc}
* @throws RejectedExecutionException {@inheritDoc}
*/
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
// In previous versions of this class, this method constructed
// a task to run ForkJoinTask.invokeAll, but now external
// invocation of multiple tasks is at least as efficient.
ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
boolean done = false;
try {
for (Callable<T> t : tasks) {
ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
futures.add(f);
externalPush(f);
}
for (int i = 0, size = futures.size(); i < size; i++)
((ForkJoinTask<?>)futures.get(i)).quietlyJoin();
done = true;
return futures;
} finally {
if (!done)
for (int i = 0, size = futures.size(); i < size; i++)
futures.get(i).cancel(false);
}
}
/**
* Returns the factory used for constructing new workers.
*
* @return the factory used for constructing new workers
*/
public ForkJoinWorkerThreadFactory getFactory() {
return factory;
}
/**
* Returns the handler for internal worker threads that terminate
* due to unrecoverable errors encountered while executing tasks.
*
* @return the handler, or {@code null} if none
*/
public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
return ueh;
}
/**
* Returns the targeted parallelism level of this pool.
*
* @return the targeted parallelism level of this pool
*/
public int getParallelism() {
return config & SMASK;
}
/**
* Returns the targeted parallelism level of the common pool.
*
* @return the targeted parallelism level of the common pool
* @since 1.8
*/
public static int getCommonPoolParallelism() {
return commonParallelism;
}
/**
* Returns the number of worker threads that have started but not
* yet terminated. The result returned by this method may differ
* from {@link #getParallelism} when threads are created to
* maintain parallelism when others are cooperatively blocked.
*
* @return the number of worker threads
*/
public int getPoolSize() {
return (config & SMASK) + (short)(ctl >>> TC_SHIFT);
}
/**
* Returns {@code true} if this pool uses local first-in-first-out
* scheduling mode for forked tasks that are never joined.
*
* @return {@code true} if this pool uses async mode
*/
public boolean getAsyncMode() {
return (config >>> 16) == FIFO_QUEUE;
}
/**
* Returns an estimate of the number of worker threads that are
* not blocked waiting to join tasks or for other managed
* synchronization. This method may overestimate the
* number of running threads.
*
* @return the number of worker threads
*/
public int getRunningThreadCount() {
int rc = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null && w.isApparentlyUnblocked())
++rc;
}
}
return rc;
}
/**
* Returns an estimate of the number of threads that are currently
* stealing or executing tasks. This method may overestimate the
* number of active threads.
*
* @return the number of active threads
*/
public int getActiveThreadCount() {
int r = (config & SMASK) + (int)(ctl >> AC_SHIFT);
return (r <= 0) ? 0 : r; // suppress momentarily negative values
}
/**
* Returns {@code true} if all worker threads are currently idle.
* An idle worker is one that cannot obtain a task to execute
* because none are available to steal from other threads, and
* there are no pending submissions to the pool. This method is
* conservative; it might not return {@code true} immediately upon
* idleness of all threads, but will eventually become true if
* threads remain inactive.
*
* @return {@code true} if all threads are currently idle
*/
public boolean isQuiescent() {
return (int)(ctl >> AC_SHIFT) + (config & SMASK) == 0;
}
/**
* Returns an estimate of the total number of tasks stolen from
* one thread's work queue by another. The reported value
* underestimates the actual total number of steals when the pool
* is not quiescent. This value may be useful for monitoring and
* tuning fork/join programs: in general, steal counts should be
* high enough to keep threads busy, but low enough to avoid
* overhead and contention across threads.
*
* @return the number of steals
*/
public long getStealCount() {
long count = stealCount;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.nsteals;
}
}
return count;
}
/**
* Returns an estimate of the total number of tasks currently held
* in queues by worker threads (but not including tasks submitted
* to the pool that have not begun executing). This value is only
* an approximation, obtained by iterating across all threads in
* the pool. This method may be useful for tuning task
* granularities.
*
* @return the number of queued tasks
*/
public long getQueuedTaskCount() {
long count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 1; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns an estimate of the number of tasks submitted to this
* pool that have not yet begun executing. This method may take
* time proportional to the number of submissions.
*
* @return the number of queued submissions
*/
public int getQueuedSubmissionCount() {
int count = 0;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null)
count += w.queueSize();
}
}
return count;
}
/**
* Returns {@code true} if there are any tasks submitted to this
* pool that have not yet begun executing.
*
* @return {@code true} if there are any queued submissions
*/
public boolean hasQueuedSubmissions() {
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && !w.isEmpty())
return true;
}
}
return false;
}
/**
* Removes and returns the next unexecuted submission if one is
* available. This method may be useful in extensions to this
* class that re-assign work in systems with multiple pools.
*
* @return the next submission, or {@code null} if none
*/
protected ForkJoinTask<?> pollSubmission() {
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; i += 2) {
if ((w = ws[i]) != null && (t = w.poll()) != null)
return t;
}
}
return null;
}
/**
* Removes all available unexecuted submitted and forked tasks
* from scheduling queues and adds them to the given collection,
* without altering their execution status. These may include
* artificially generated or wrapped tasks. This method is
* designed to be invoked only when the pool is known to be
* quiescent. Invocations at other times may not remove all
* tasks. A failure encountered while attempting to add elements
* to collection {@code c} may result in elements being in
* neither, either or both collections when the associated
* exception is thrown. The behavior of this operation is
* undefined if the specified collection is modified while the
* operation is in progress.
*
* @param c the collection to transfer elements into
* @return the number of elements transferred
*/
protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
int count = 0;
WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
while ((t = w.poll()) != null) {
c.add(t);
++count;
}
}
}
}
return count;
}
/**
* Returns a string identifying this pool, as well as its state,
* including indications of run state, parallelism level, and
* worker and task counts.
*
* @return a string identifying this pool, as well as its state
*/
public String toString() {
// Use a single pass through workQueues to collect counts
long qt = 0L, qs = 0L; int rc = 0;
long st = stealCount;
long c = ctl;
WorkQueue[] ws; WorkQueue w;
if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
if ((w = ws[i]) != null) {
int size = w.queueSize();
if ((i & 1) == 0)
qs += size;
else {
qt += size;
st += w.nsteals;
if (w.isApparentlyUnblocked())
++rc;
}
}
}
}
int pc = (config & SMASK);
int tc = pc + (short)(c >>> TC_SHIFT);
int ac = pc + (int)(c >> AC_SHIFT);
if (ac < 0) // ignore transient negative
ac = 0;
String level;
if ((c & STOP_BIT) != 0)
level = (tc == 0) ? "Terminated" : "Terminating";
else
level = plock < 0 ? "Shutting down" : "Running";
return super.toString() +
"[" + level +
", parallelism = " + pc +
", size = " + tc +
", active = " + ac +
", running = " + rc +
", steals = " + st +
", tasks = " + qt +
", submissions = " + qs +
"]";
}
/**
* Possibly initiates an orderly shutdown in which previously
* submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no effect on execution state if this
* is the {@link #commonPool()}, and no additional effect if
* already shut down. Tasks that are in the process of being
* submitted concurrently during the course of this method may or
* may not be rejected.
*
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public void shutdown() {
checkPermission();
tryTerminate(false, true);
}
/**
* Possibly attempts to cancel and/or stop all tasks, and reject
* all subsequently submitted tasks. Invocation has no effect on
* execution state if this is the {@link #commonPool()}, and no
* additional effect if already shut down. Otherwise, tasks that
* are in the process of being submitted or executed concurrently
* during the course of this method may or may not be
* rejected. This method cancels both existing and unexecuted
* tasks, in order to permit termination in the presence of task
* dependencies. So the method always returns an empty list
* (unlike the case for some other Executors).
*
* @return an empty list
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
* java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public List<Runnable> shutdownNow() {
checkPermission();
tryTerminate(true, true);
return Collections.emptyList();
}
/**
* Returns {@code true} if all tasks have completed following shut down.
*
* @return {@code true} if all tasks have completed following shut down
*/
public boolean isTerminated() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) == -(config & SMASK));
}
/**
* Returns {@code true} if the process of termination has
* commenced but not yet completed. This method may be useful for
* debugging. A return of {@code true} reported a sufficient
* period after shutdown may indicate that submitted tasks have
* ignored or suppressed interruption, or are waiting for I/O,
* causing this executor not to properly terminate. (See the
* advisory notes for class {@link ForkJoinTask} stating that
* tasks should not normally entail blocking operations. But if
* they do, they must abort them on interrupt.)
*
* @return {@code true} if terminating but not yet terminated
*/
public boolean isTerminating() {
long c = ctl;
return ((c & STOP_BIT) != 0L &&
(short)(c >>> TC_SHIFT) != -(config & SMASK));
}
/**
* Returns {@code true} if this pool has been shut down.
*
* @return {@code true} if this pool has been shut down
*/
public boolean isShutdown() {
return plock < 0;
}
/**
* Blocks until all tasks have completed execution after a
* shutdown request, or the timeout occurs, or the current thread
* is interrupted, whichever happens first. Because the {@link
* #commonPool()} never terminates until program shutdown, when
* applied to the common pool, this method is equivalent to {@link
* #awaitQuiescence} but always returns {@code false}.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if this executor terminated and
* {@code false} if the timeout elapsed before termination
* @throws InterruptedException if interrupted while waiting
*/
public boolean awaitTermination(long timeout, TimeUnit unit)
throws InterruptedException {
if (Thread.interrupted())
throw new InterruptedException();
if (this == common) {
awaitQuiescence(timeout, unit);
return false;
}
long nanos = unit.toNanos(timeout);
if (isTerminated())
return true;
long startTime = System.nanoTime();
boolean terminated = false;
synchronized (this) {
for (long waitTime = nanos, millis = 0L;;) {
if (terminated = isTerminated() ||
waitTime <= 0L ||
(millis = unit.toMillis(waitTime)) <= 0L)
break;
wait(millis);
waitTime = nanos - (System.nanoTime() - startTime);
}
}
return terminated;
}
/**
* If called by a ForkJoinTask operating in this pool, equivalent
* in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
* waits and/or attempts to assist performing tasks until this
* pool {@link #isQuiescent} or the indicated timeout elapses.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
* @return {@code true} if quiescent; {@code false} if the
* timeout elapsed.
*/
public boolean awaitQuiescence(long timeout, TimeUnit unit) {
long nanos = unit.toNanos(timeout);
ForkJoinWorkerThread wt;
Thread thread = Thread.currentThread();
if ((thread instanceof ForkJoinWorkerThread) &&
(wt = (ForkJoinWorkerThread)thread).pool == this) {
helpQuiescePool(wt.workQueue);
return true;
}
long startTime = System.nanoTime();
WorkQueue[] ws;
int r = 0, m;
boolean found = true;
while (!isQuiescent() && (ws = workQueues) != null &&
(m = ws.length - 1) >= 0) {
if (!found) {
if ((System.nanoTime() - startTime) > nanos)
return false;
Thread.yield(); // cannot block
}
found = false;
for (int j = (m + 1) << 2; j >= 0; --j) {
ForkJoinTask<?> t; WorkQueue q; int b;
if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) {
found = true;
if ((t = q.pollAt(b)) != null) {
if (q.base - q.top < 0)
signalWork(q);
t.doExec();
}
break;
}
}
}
return true;
}
/**
* Waits and/or attempts to assist performing tasks indefinitely
* until the {@link #commonPool()} {@link #isQuiescent}.
*/
static void quiesceCommonPool() {
common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
}
/**
* Interface for extending managed parallelism for tasks running
* in {@link ForkJoinPool}s.
*
* <p>A {@code ManagedBlocker} provides two methods. Method
* {@code isReleasable} must return {@code true} if blocking is
* not necessary. Method {@code block} blocks the current thread
* if necessary (perhaps internally invoking {@code isReleasable}
* before actually blocking). These actions are performed by any
* thread invoking {@link ForkJoinPool#managedBlock}. The
* unusual methods in this API accommodate synchronizers that may,
* but don't usually, block for long periods. Similarly, they
* allow more efficient internal handling of cases in which
* additional workers may be, but usually are not, needed to
* ensure sufficient parallelism. Toward this end,
* implementations of method {@code isReleasable} must be amenable
* to repeated invocation.
*
* <p>For example, here is a ManagedBlocker based on a
* ReentrantLock:
* <pre> {@code
* class ManagedLocker implements ManagedBlocker {
* final ReentrantLock lock;
* boolean hasLock = false;
* ManagedLocker(ReentrantLock lock) { this.lock = lock; }
* public boolean block() {
* if (!hasLock)
* lock.lock();
* return true;
* }
* public boolean isReleasable() {
* return hasLock || (hasLock = lock.tryLock());
* }
* }}</pre>
*
* <p>Here is a class that possibly blocks waiting for an
* item on a given queue:
* <pre> {@code
* class QueueTaker<E> implements ManagedBlocker {
* final BlockingQueue<E> queue;
* volatile E item = null;
* QueueTaker(BlockingQueue<E> q) { this.queue = q; }
* public boolean block() throws InterruptedException {
* if (item == null)
* item = queue.take();
* return true;
* }
* public boolean isReleasable() {
* return item != null || (item = queue.poll()) != null;
* }
* public E getItem() { // call after pool.managedBlock completes
* return item;
* }
* }}</pre>
*/
public static interface ManagedBlocker {
/**
* Possibly blocks the current thread, for example waiting for
* a lock or condition.
*
* @return {@code true} if no additional blocking is necessary
* (i.e., if isReleasable would return true)
* @throws InterruptedException if interrupted while waiting
* (the method is not required to do so, but is allowed to)
*/
boolean block() throws InterruptedException;
/**
* Returns {@code true} if blocking is unnecessary.
*/
boolean isReleasable();
}
/**
* Blocks in accord with the given blocker. If the current thread
* is a {@link ForkJoinWorkerThread}, this method possibly
* arranges for a spare thread to be activated if necessary to
* ensure sufficient parallelism while the current thread is blocked.
*
* <p>If the caller is not a {@link ForkJoinTask}, this method is
* behaviorally equivalent to
* <pre> {@code
* while (!blocker.isReleasable())
* if (blocker.block())
* return;
* }</pre>
*
* If the caller is a {@code ForkJoinTask}, then the pool may
* first be expanded to ensure parallelism, and later adjusted.
*
* @param blocker the blocker
* @throws InterruptedException if blocker.block did so
*/
public static void managedBlock(ManagedBlocker blocker)
throws InterruptedException {
Thread t = Thread.currentThread();
if (t instanceof ForkJoinWorkerThread) {
ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
while (!blocker.isReleasable()) { // variant of helpSignal
WorkQueue[] ws; WorkQueue q; int m, u;
if ((ws = p.workQueues) != null && (m = ws.length - 1) >= 0) {
for (int i = 0; i <= m; ++i) {
if (blocker.isReleasable())
return;
if ((q = ws[i]) != null && q.base - q.top < 0) {
p.signalWork(q);
if ((u = (int)(p.ctl >>> 32)) >= 0 ||
(u >> UAC_SHIFT) >= 0)
break;
}
}
}
if (p.tryCompensate()) {
try {
do {} while (!blocker.isReleasable() &&
!blocker.block());
} finally {
p.incrementActiveCount();
}
break;
}
}
}
else {
do {} while (!blocker.isReleasable() &&
!blocker.block());
}
}
// AbstractExecutorService overrides. These rely on undocumented
// fact that ForkJoinTask.adapt returns ForkJoinTasks that also
// implement RunnableFuture.
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
}
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
return new ForkJoinTask.AdaptedCallable<T>(callable);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long CTL;
private static final long PARKBLOCKER;
private static final int ABASE;
private static final int ASHIFT;
private static final long STEALCOUNT;
private static final long PLOCK;
private static final long INDEXSEED;
private static final long QLOCK;
static {
// initialize field offsets for CAS etc
try {
U = getUnsafe();
Class<?> k = ForkJoinPool.class;
CTL = U.objectFieldOffset
(k.getDeclaredField("ctl"));
STEALCOUNT = U.objectFieldOffset
(k.getDeclaredField("stealCount"));
PLOCK = U.objectFieldOffset
(k.getDeclaredField("plock"));
INDEXSEED = U.objectFieldOffset
(k.getDeclaredField("indexSeed"));
Class<?> tk = Thread.class;
PARKBLOCKER = U.objectFieldOffset
(tk.getDeclaredField("parkBlocker"));
Class<?> wk = WorkQueue.class;
QLOCK = U.objectFieldOffset
(wk.getDeclaredField("qlock"));
Class<?> ak = ForkJoinTask[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
submitters = new ThreadLocal<Submitter>();
ForkJoinWorkerThreadFactory fac = defaultForkJoinWorkerThreadFactory =
new DefaultForkJoinWorkerThreadFactory();
modifyThreadPermission = new RuntimePermission("modifyThread");
/*
* Establish common pool parameters. For extra caution,
* computations to set up common pool state are here; the
* constructor just assigns these values to fields.
*/
int par = 0;
Thread.UncaughtExceptionHandler handler = null;
try { // TBD: limit or report ignored exceptions?
String pp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.parallelism");
String hp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
String fp = System.getProperty
("java.util.concurrent.ForkJoinPool.common.threadFactory");
if (fp != null)
fac = ((ForkJoinWorkerThreadFactory)ClassLoader.
getSystemClassLoader().loadClass(fp).newInstance());
if (hp != null)
handler = ((Thread.UncaughtExceptionHandler)ClassLoader.
getSystemClassLoader().loadClass(hp).newInstance());
if (pp != null)
par = Integer.parseInt(pp);
} catch (Exception ignore) {
}
if (par <= 0)
par = Runtime.getRuntime().availableProcessors();
if (par > MAX_CAP)
par = MAX_CAP;
commonParallelism = par;
long np = (long)(-par); // precompute initial ctl value
long ct = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
common = new ForkJoinPool(par, ct, fac, handler);
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true
| src_main_java_jsr166y_ForkJoinPool.java |
277 | @SuppressWarnings("serial")
public abstract class OCommandRequestTextAbstract extends OCommandRequestAbstract implements OCommandRequestText {
protected String text;
protected OCommandRequestTextAbstract() {
}
protected OCommandRequestTextAbstract(final String iText) {
if (iText == null)
throw new IllegalArgumentException("Text cannot be null");
text = iText.trim();
}
/**
* Delegates the execution to the configured command executor.
*/
@SuppressWarnings("unchecked")
public <RET> RET execute(final Object... iArgs) {
setParameters(iArgs);
return (RET) ODatabaseRecordThreadLocal.INSTANCE.get().getStorage().command(this);
}
public String getText() {
return text;
}
public OCommandRequestText setText(final String iText) {
this.text = iText;
return this;
}
public OSerializableStream fromStream(byte[] iStream) throws OSerializationException {
final OMemoryStream buffer = new OMemoryStream(iStream);
fromStream(buffer);
return this;
}
public byte[] toStream() throws OSerializationException {
final OMemoryStream buffer = new OMemoryStream();
return toStream(buffer);
}
@Override
public String toString() {
return "?." + text;
}
protected byte[] toStream(final OMemoryStream buffer) {
buffer.set(text);
if (parameters == null || parameters.size() == 0) {
// simple params are absent
buffer.set(false);
// composite keys are absent
buffer.set(false);
} else {
final Map<Object, Object> params = new HashMap<Object, Object>();
final Map<Object, byte[]> compositeKeyParams = new HashMap<Object, byte[]>();
for (final Entry<Object, Object> paramEntry : parameters.entrySet())
if (paramEntry.getValue() instanceof OCompositeKey) {
final OCompositeKey compositeKey = (OCompositeKey) paramEntry.getValue();
final int bufferSize = OCompositeKeySerializer.INSTANCE.getObjectSize(compositeKey);
final byte[] stream = new byte[bufferSize];
OCompositeKeySerializer.INSTANCE.serialize(compositeKey, stream, 0);
compositeKeyParams.put(paramEntry.getKey(), stream);
} else if (paramEntry.getValue() instanceof String) {
final StringBuilder builder = new StringBuilder();
ORecordSerializerStringAbstract.simpleValueToStream(builder, OType.STRING, paramEntry.getValue());
params.put(paramEntry.getKey(), builder.toString());
} else
params.put(paramEntry.getKey(), paramEntry.getValue());
buffer.set(!params.isEmpty());
if (!params.isEmpty()) {
final ODocument param = new ODocument();
param.field("params", params);
buffer.set(param.toStream());
}
buffer.set(!compositeKeyParams.isEmpty());
if (!compositeKeyParams.isEmpty()) {
final ODocument compositeKey = new ODocument();
compositeKey.field("compositeKeyParams", compositeKeyParams);
buffer.set(compositeKey.toStream());
}
}
return buffer.toByteArray();
}
protected void fromStream(final OMemoryStream buffer) {
text = buffer.getAsString();
parameters = null;
final boolean simpleParams = buffer.getAsBoolean();
if (simpleParams) {
final byte[] paramBuffer = buffer.getAsByteArray();
final ODocument param = new ODocument();
param.fromStream(paramBuffer);
Map<String, Object> params = param.field("params");
parameters = new HashMap<Object, Object>();
for (Entry<String, Object> p : params.entrySet()) {
final Object value;
if (p.getValue() instanceof String)
value = ORecordSerializerStringAbstract.getTypeValue((String) p.getValue());
else
value = p.getValue();
if (Character.isDigit(p.getKey().charAt(0)))
parameters.put(Integer.parseInt(p.getKey()), value);
else
parameters.put(p.getKey(), value);
}
}
final boolean compositeKeyParamsPresent = buffer.getAsBoolean();
if (compositeKeyParamsPresent) {
final byte[] paramBuffer = buffer.getAsByteArray();
final ODocument param = new ODocument();
param.fromStream(paramBuffer);
final Map<String, Object> compositeKeyParams = param.field("compositeKeyParams");
if (parameters == null)
parameters = new HashMap<Object, Object>();
for (final Entry<String, Object> p : compositeKeyParams.entrySet()) {
final Object value = OCompositeKeySerializer.INSTANCE
.deserialize(OStringSerializerHelper.getBinaryContent(p.getValue()), 0);
if (Character.isDigit(p.getKey().charAt(0)))
parameters.put(Integer.parseInt(p.getKey()), value);
else
parameters.put(p.getKey(), value);
}
}
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandRequestTextAbstract.java |
760 | public static final class SBTreeEntry<K, V> implements Map.Entry<K, V>, Comparable<SBTreeEntry<K, V>> {
private final Comparator<? super K> comparator = ODefaultComparator.INSTANCE;
public final OBonsaiBucketPointer leftChild;
public final OBonsaiBucketPointer rightChild;
public final K key;
public final V value;
public SBTreeEntry(OBonsaiBucketPointer leftChild, OBonsaiBucketPointer rightChild, K key, V value) {
this.leftChild = leftChild;
this.rightChild = rightChild;
this.key = key;
this.value = value;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
throw new UnsupportedOperationException("SBTreeEntry.setValue");
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
SBTreeEntry that = (SBTreeEntry) o;
if (!leftChild.equals(that.leftChild))
return false;
if (!rightChild.equals(that.rightChild))
return false;
if (!key.equals(that.key))
return false;
if (value != null ? !value.equals(that.value) : that.value != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = leftChild.hashCode();
result = 31 * result + rightChild.hashCode();
result = 31 * result + key.hashCode();
result = 31 * result + (value != null ? value.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "SBTreeEntry{" + "leftChild=" + leftChild + ", rightChild=" + rightChild + ", key=" + key + ", value=" + value + '}';
}
@Override
public int compareTo(SBTreeEntry<K, V> other) {
return comparator.compare(key, other.key);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_sbtreebonsai_local_OSBTreeBonsaiBucket.java |
1,637 | public static final Validator DOUBLE = new Validator() {
@Override
public String validate(String setting, String value) {
try {
Double.parseDouble(value);
} catch (NumberFormatException ex) {
return "cannot parse value [" + value + "] as a double";
}
return null;
}
}; | 0true
| src_main_java_org_elasticsearch_cluster_settings_Validator.java |
2,709 | cluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return null;
}
@Override
public void doAfterNodes(int numNodes, final Client activeClient) throws Exception {
if (numNodes == 1) {
assertThat(awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object input) {
logger.info("--> running cluster_health (wait for the shards to startup)");
ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(4)).actionGet();
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
}
}, 30, TimeUnit.SECONDS), equalTo(true));
logger.info("--> one node is closed -- index 1 document into the remaining nodes");
activeClient.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get();
assertNoFailures(activeClient.admin().indices().prepareRefresh().get());
for (int i = 0; i < 10; i++) {
assertHitCount(activeClient.prepareCount().setQuery(matchAllQuery()).get(), 3l);
}
}
}
}); | 0true
| src_test_java_org_elasticsearch_gateway_local_QuorumLocalGatewayTests.java |
1,700 | public class HashedBytesArray implements BytesReference {
private final byte[] bytes;
/**
* Cache the hash code for the string
*/
private int hash; // Defaults to 0
public HashedBytesArray(byte[] bytes) {
this.bytes = bytes;
}
@Override
public byte get(int index) {
return bytes[index];
}
@Override
public int length() {
return bytes.length;
}
@Override
public BytesReference slice(int from, int length) {
if (from < 0 || (from + length) > bytes.length) {
throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + bytes.length + "], with slice parameters from [" + from + "], length [" + length + "]");
}
return new BytesArray(bytes, from, length);
}
@Override
public StreamInput streamInput() {
return new BytesStreamInput(bytes, false);
}
@Override
public void writeTo(OutputStream os) throws IOException {
os.write(bytes);
}
@Override
public byte[] toBytes() {
return bytes;
}
@Override
public BytesArray toBytesArray() {
return new BytesArray(bytes);
}
@Override
public BytesArray copyBytesArray() {
byte[] copy = new byte[bytes.length];
System.arraycopy(bytes, 0, copy, 0, bytes.length);
return new BytesArray(copy);
}
@Override
public ChannelBuffer toChannelBuffer() {
return ChannelBuffers.wrappedBuffer(bytes, 0, bytes.length);
}
@Override
public boolean hasArray() {
return true;
}
@Override
public byte[] array() {
return bytes;
}
@Override
public int arrayOffset() {
return 0;
}
@Override
public String toUtf8() {
if (bytes.length == 0) {
return "";
}
return new String(bytes, Charsets.UTF_8);
}
@Override
public BytesRef toBytesRef() {
return new BytesRef(bytes);
}
@Override
public BytesRef copyBytesRef() {
byte[] copy = new byte[bytes.length];
System.arraycopy(bytes, 0, copy, 0, bytes.length);
return new BytesRef(copy);
}
@Override
public int hashCode() {
if (hash == 0) {
hash = Helper.bytesHashCode(this);
}
return hash;
}
@Override
public boolean equals(Object obj) {
return Helper.bytesEqual(this, (BytesReference) obj);
}
} | 1no label
| src_main_java_org_elasticsearch_common_bytes_HashedBytesArray.java |
1,683 | runnable = new Runnable() { public void run() { map.tryRemove(null, 1, TimeUnit.SECONDS); } }; | 0true
| hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java |
447 | trackedSet.addChangeListener(new OMultiValueChangeListener<String, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<String, String> event) {
changed.value = true;
}
}); | 0true
| core_src_test_java_com_orientechnologies_orient_core_db_record_TrackedSetTest.java |
575 | public class OptimizeAction extends IndicesAction<OptimizeRequest, OptimizeResponse, OptimizeRequestBuilder> {
public static final OptimizeAction INSTANCE = new OptimizeAction();
public static final String NAME = "indices/optimize";
private OptimizeAction() {
super(NAME);
}
@Override
public OptimizeResponse newResponse() {
return new OptimizeResponse();
}
@Override
public OptimizeRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new OptimizeRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_optimize_OptimizeAction.java |
1,254 | @Deprecated
public class SimpleTaxModule implements TaxModule {
public static final String MODULENAME = "simpleTaxModule";
protected String name = MODULENAME;
protected Map<String, Double> itemPostalCodeTaxRateMap;
protected Map<String, Double> itemCityTaxRateMap;
protected Map<String, Double> itemStateTaxRateMap;
protected Map<String, Double> itemCountryTaxRateMap;
protected Map<String, Double> fulfillmentGroupPostalCodeTaxRateMap;
protected Map<String, Double> fulfillmentGroupCityTaxRateMap;
protected Map<String, Double> fulfillmentGroupStateTaxRateMap;
protected Map<String, Double> fulfillmentGroupCountryTaxRateMap;
protected Double defaultItemTaxRate;
protected Double defaultFulfillmentGroupTaxRate;
protected boolean taxFees;
@Override
public Order calculateTaxForOrder(Order order) throws TaxException {
for (FulfillmentGroup fulfillmentGroup : order.getFulfillmentGroups()) {
// Set taxes on the fulfillment group items
for (FulfillmentGroupItem fgItem : fulfillmentGroup.getFulfillmentGroupItems()) {
if (isItemTaxable(fgItem)) {
Double factor = determineItemTaxRate(fulfillmentGroup.getAddress());
if (factor != null && factor.compareTo(0d) != 0) {
TaxDetail tax;
checkDetail: {
for (TaxDetail detail : fgItem.getTaxes()) {
if (detail.getType().equals(TaxType.COMBINED)) {
tax = detail;
break checkDetail;
}
}
tax = new TaxDetailImpl();
tax.setType(TaxType.COMBINED);
fgItem.getTaxes().add(tax);
}
tax.setRate(new BigDecimal(factor));
tax.setAmount(fgItem.getTotalItemTaxableAmount().multiply(factor));
}
}
}
for (FulfillmentGroupFee fgFee : fulfillmentGroup.getFulfillmentGroupFees()) {
if (isFeeTaxable(fgFee)) {
Double factor = determineItemTaxRate(fulfillmentGroup.getAddress());
if (factor != null && factor.compareTo(0d) != 0) {
TaxDetail tax;
checkDetail: {
for (TaxDetail detail : fgFee.getTaxes()) {
if (detail.getType().equals(TaxType.COMBINED)) {
tax = detail;
break checkDetail;
}
}
tax = new TaxDetailImpl();
tax.setType(TaxType.COMBINED);
fgFee.getTaxes().add(tax);
}
tax.setRate(new BigDecimal(factor));
tax.setAmount(fgFee.getAmount().multiply(factor));
}
}
}
Double factor = determineTaxRateForFulfillmentGroup(fulfillmentGroup);
if (factor != null && factor.compareTo(0d) != 0) {
TaxDetail tax;
checkDetail: {
for (TaxDetail detail : fulfillmentGroup.getTaxes()) {
if (detail.getType().equals(TaxType.COMBINED)) {
tax = detail;
break checkDetail;
}
}
tax = new TaxDetailImpl();
tax.setType(TaxType.COMBINED);
fulfillmentGroup.getTaxes().add(tax);
}
tax.setRate(new BigDecimal(factor));
tax.setAmount(fulfillmentGroup.getFulfillmentPrice().multiply(factor));
}
}
return order;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
/**
* Returns the taxAmount for the passed in postal code or
* null if no match is found.
*
* @param postalCode
* @return
*/
public Double lookupPostalCodeRate(Map<String,Double> postalCodeTaxRateMap, String postalCode) {
if (postalCodeTaxRateMap != null && postalCode != null) {
return postalCodeTaxRateMap.get(postalCode);
}
return null;
}
/**
* Changes the city to upper case before checking the
* configuration.
*
* Return null if no match is found.
*
* @param cityTaxRateMap, city
* @return
*/
public Double lookupCityRate(Map<String,Double> cityTaxRateMap, String city) {
if (cityTaxRateMap != null && city != null) {
city = city.toUpperCase();
return cityTaxRateMap.get(city);
}
return null;
}
/**
* Returns the taxAmount for the passed in state or
* null if no match is found.
*
* First checks the abbreviation (uppercase) followed by the name (uppercase).
*
* @param stateTaxRateMap, state
* @return
*/
public Double lookupStateRate(Map<String,Double> stateTaxRateMap, State state) {
if (stateTaxRateMap != null && state != null && state.getAbbreviation() != null) {
String stateAbbr = state.getAbbreviation().toUpperCase();
Double rate = stateTaxRateMap.get(stateAbbr);
if (rate == null && state.getName() != null) {
String stateName = state.getName().toUpperCase();
return stateTaxRateMap.get(stateName);
} else {
return rate;
}
}
return null;
}
/**
* Returns the taxAmount for the passed in country or
* null if no match is found.
*
* First checks the abbreviation (uppercase) followed by the name (uppercase).
*
* @param countryTaxRateMap, state
* @return
*/
public Double lookupCountryRate(Map<String,Double> countryTaxRateMap, Country country) {
if (countryTaxRateMap != null && country != null && country.getAbbreviation() != null) {
String cntryAbbr = country.getAbbreviation().toUpperCase();
Double rate = countryTaxRateMap.get(cntryAbbr);
if (rate == null && country.getName() != null) {
String countryName = country.getName().toUpperCase();
return countryTaxRateMap.get(countryName);
} else {
return rate;
}
}
return null;
}
protected boolean isItemTaxable(FulfillmentGroupItem item) {
return item.getOrderItem().isTaxable();
}
protected boolean isFeeTaxable(FulfillmentGroupFee fee) {
return fee.isTaxable();
}
/**
* Uses the passed in address to determine if the item is taxable.
*
* Checks the configured maps in order - (postal code, city, state, country)
*
* @param address
* @return
*/
public Double determineItemTaxRate(Address address) {
if (address != null) {
Double postalCodeRate = lookupPostalCodeRate(itemPostalCodeTaxRateMap, address.getPostalCode());
if (postalCodeRate != null) {
return postalCodeRate;
}
Double cityCodeRate = lookupCityRate(itemCityTaxRateMap, address.getCity());
if (cityCodeRate != null) {
return cityCodeRate;
}
Double stateCodeRate = lookupStateRate(itemStateTaxRateMap, address.getState());
if (stateCodeRate != null) {
return stateCodeRate;
}
Double countryCodeRate = lookupCountryRate(itemCountryTaxRateMap, address.getCountry());
if (countryCodeRate != null) {
return countryCodeRate;
}
}
if (defaultItemTaxRate != null) {
return defaultItemTaxRate;
} else {
return 0d;
}
}
/**
* Uses the passed in address to determine if the item is taxable.
*
* Checks the configured maps in order - (postal code, city, state, country)
*
* @param fulfillmentGroup
* @return
*/
public Double determineTaxRateForFulfillmentGroup(FulfillmentGroup fulfillmentGroup) {
boolean isTaxable = true;
if (fulfillmentGroup.isShippingPriceTaxable() != null) {
isTaxable = fulfillmentGroup.isShippingPriceTaxable();
}
if (isTaxable) {
Address address = fulfillmentGroup.getAddress();
if (address != null) {
Double postalCodeRate = lookupPostalCodeRate(fulfillmentGroupPostalCodeTaxRateMap, address.getPostalCode());
if (postalCodeRate != null) {
return postalCodeRate;
}
Double cityCodeRate = lookupCityRate(fulfillmentGroupCityTaxRateMap, address.getCity());
if (cityCodeRate != null) {
return cityCodeRate;
}
Double stateCodeRate = lookupStateRate(fulfillmentGroupStateTaxRateMap, address.getState());
if (stateCodeRate != null) {
return stateCodeRate;
}
Double countryCodeRate = lookupCountryRate(fulfillmentGroupCountryTaxRateMap, address.getCountry());
if (countryCodeRate != null) {
return countryCodeRate;
}
}
if (defaultFulfillmentGroupTaxRate != null) {
return defaultFulfillmentGroupTaxRate;
}
}
return 0d;
}
public Map<String, Double> getItemPostalCodeTaxRateMap() {
return itemPostalCodeTaxRateMap;
}
public void setItemPostalCodeTaxRateMap(Map<String, Double> itemPostalCodeTaxRateMap) {
this.itemPostalCodeTaxRateMap = itemPostalCodeTaxRateMap;
}
public Map<String, Double> getItemCityTaxRateMap() {
return itemCityTaxRateMap;
}
public void setItemCityTaxRateMap(Map<String, Double> itemCityTaxRateMap) {
this.itemCityTaxRateMap = itemCityTaxRateMap;
}
public Map<String, Double> getItemStateTaxRateMap() {
return itemStateTaxRateMap;
}
public void setItemStateTaxRateMap(Map<String, Double> itemStateTaxRateMap) {
this.itemStateTaxRateMap = itemStateTaxRateMap;
}
public Map<String, Double> getItemCountryTaxRateMap() {
return itemCountryTaxRateMap;
}
public void setItemCountryTaxRateMap(Map<String, Double> itemCountryTaxRateMap) {
this.itemCountryTaxRateMap = itemCountryTaxRateMap;
}
public Map<String, Double> getFulfillmentGroupPostalCodeTaxRateMap() {
return fulfillmentGroupPostalCodeTaxRateMap;
}
public void setFulfillmentGroupPostalCodeTaxRateMap(Map<String, Double> fulfillmentGroupPostalCodeTaxRateMap) {
this.fulfillmentGroupPostalCodeTaxRateMap = fulfillmentGroupPostalCodeTaxRateMap;
}
public Map<String, Double> getFulfillmentGroupCityTaxRateMap() {
return fulfillmentGroupCityTaxRateMap;
}
public void setFulfillmentGroupCityTaxRateMap(Map<String, Double> fulfillmentGroupCityTaxRateMap) {
this.fulfillmentGroupCityTaxRateMap = fulfillmentGroupCityTaxRateMap;
}
public Map<String, Double> getFulfillmentGroupStateTaxRateMap() {
return fulfillmentGroupStateTaxRateMap;
}
public void setFulfillmentGroupStateTaxRateMap(Map<String, Double> fulfillmentGroupStateTaxRateMap) {
this.fulfillmentGroupStateTaxRateMap = fulfillmentGroupStateTaxRateMap;
}
public Map<String, Double> getFulfillmentGroupCountryTaxRateMap() {
return fulfillmentGroupCountryTaxRateMap;
}
public void setFulfillmentGroupCountryTaxRateMap(Map<String, Double> fulfillmentGroupCountryTaxRateMap) {
this.fulfillmentGroupCountryTaxRateMap = fulfillmentGroupCountryTaxRateMap;
}
public Double getDefaultItemTaxRate() {
return defaultItemTaxRate;
}
public void setDefaultItemTaxRate(Double defaultItemTaxRate) {
this.defaultItemTaxRate = defaultItemTaxRate;
}
public Double getDefaultFulfillmentGroupTaxRate() {
return defaultFulfillmentGroupTaxRate;
}
public void setDefaultFulfillmentGroupTaxRate(Double defaultFulfillmentGroupTaxRate) {
this.defaultFulfillmentGroupTaxRate = defaultFulfillmentGroupTaxRate;
}
/**
* Use getDefaultItemTaxRate instead.
* @deprecated
* @return
*/
@Deprecated
public Double getFactor() {
return getDefaultItemTaxRate();
}
/**
* Use setDefaultItemTaxRate instead.
* @deprecated
* @return
*/
@Deprecated
public void setFactor(Double factor) {
setDefaultItemTaxRate(factor);
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_service_module_SimpleTaxModule.java |
3,615 | @Beta
public class XAResourceImpl implements XAResource {
private final TransactionManagerServiceImpl transactionManager;
private final TransactionContextImpl transactionContext;
private final ILogger logger;
private int transactionTimeoutSeconds;
public XAResourceImpl(TransactionManagerServiceImpl transactionManager,
TransactionContextImpl transactionContext, NodeEngineImpl nodeEngine) {
this.transactionManager = transactionManager;
this.transactionContext = transactionContext;
this.logger = nodeEngine.getLogger(XAResourceImpl.class);
}
//XAResource --START
@Override
public synchronized void start(Xid xid, int flags) throws XAException {
nullCheck(xid);
switch (flags) {
case TMNOFLAGS:
if (getTransaction(xid) != null) {
final XAException xaException = new XAException(XAException.XAER_DUPID);
logger.severe("Duplicate xid: " + xid, xaException);
throw xaException;
}
try {
final Transaction transaction = getTransaction();
transactionManager.addManagedTransaction(xid, transaction);
transaction.begin();
} catch (IllegalStateException e) {
logger.severe(e);
throw new XAException(XAException.XAER_INVAL);
}
break;
case TMRESUME:
case TMJOIN:
break;
default:
throw new XAException(XAException.XAER_INVAL);
}
}
@Override
public synchronized void end(Xid xid, int flags) throws XAException {
nullCheck(xid);
final TransactionImpl transaction = (TransactionImpl) getTransaction();
final SerializableXID sXid = transaction.getXid();
if (sXid == null || !sXid.equals(xid)) {
logger.severe("started xid: " + sXid + " and given xid : " + xid + " not equal!!!");
}
validateTx(transaction, State.ACTIVE);
switch (flags) {
case XAResource.TMSUCCESS:
//successfully end.
break;
case XAResource.TMFAIL:
transaction.setRollbackOnly();
throw new XAException(XAException.XA_RBROLLBACK);
// break;
case XAResource.TMSUSPEND:
break;
default:
throw new XAException(XAException.XAER_INVAL);
}
}
@Override
public synchronized int prepare(Xid xid) throws XAException {
nullCheck(xid);
final TransactionImpl transaction = (TransactionImpl) getTransaction();
final SerializableXID sXid = transaction.getXid();
if (sXid == null || !sXid.equals(xid)) {
logger.severe("started xid: " + sXid + " and given xid : " + xid + " not equal!!!");
}
validateTx(transaction, State.ACTIVE);
try {
transaction.prepare();
} catch (TransactionException e) {
throw new XAException(XAException.XAER_RMERR);
}
return XAResource.XA_OK;
}
@Override
public synchronized void commit(Xid xid, boolean onePhase) throws XAException {
nullCheck(xid);
final Transaction transaction = getTransaction(xid);
if (onePhase) {
validateTx(transaction, State.ACTIVE);
transaction.prepare();
}
validateTx(transaction, State.PREPARED);
try {
transaction.commit();
transactionManager.removeManagedTransaction(xid);
} catch (TransactionException e) {
throw new XAException(XAException.XAER_RMERR);
}
}
@Override
public synchronized void rollback(Xid xid) throws XAException {
nullCheck(xid);
final Transaction transaction = getTransaction(xid);
//NO_TXN means do not validate state
validateTx(transaction, State.NO_TXN);
try {
transaction.rollback();
transactionManager.removeManagedTransaction(xid);
} catch (TransactionException e) {
throw new XAException(XAException.XAER_RMERR);
}
}
@Override
public synchronized void forget(Xid xid) throws XAException {
throw new XAException(XAException.XAER_PROTO);
}
@Override
public synchronized boolean isSameRM(XAResource xaResource) throws XAException {
if (xaResource instanceof XAResourceImpl) {
XAResourceImpl other = (XAResourceImpl) xaResource;
return transactionManager.equals(other.transactionManager);
}
return false;
}
@Override
public synchronized Xid[] recover(int flag) throws XAException {
return transactionManager.recover();
}
@Override
public synchronized int getTransactionTimeout() throws XAException {
return transactionTimeoutSeconds;
}
@Override
public synchronized boolean setTransactionTimeout(int seconds) throws XAException {
this.transactionTimeoutSeconds = seconds;
return false;
}
//XAResource --END
private void nullCheck(Xid xid) throws XAException {
if (xid == null) {
final XAException xaException = new XAException(XAException.XAER_INVAL);
logger.severe("Xid cannot be null!!!", xaException);
throw xaException;
}
}
private void validateTx(Transaction tx, State state) throws XAException {
if (tx == null) {
final XAException xaException = new XAException(XAException.XAER_NOTA);
logger.severe("Transaction is not available!!!", xaException);
throw xaException;
}
final State txState = tx.getState();
switch (state) {
case ACTIVE:
if (txState != State.ACTIVE) {
final XAException xaException = new XAException(XAException.XAER_NOTA);
logger.severe("Transaction is not active!!! state: " + txState, xaException);
throw xaException;
}
break;
case PREPARED:
if (txState != State.PREPARED) {
final XAException xaException = new XAException(XAException.XAER_INVAL);
logger.severe("Transaction is not prepared!!! state: " + txState, xaException);
throw xaException;
}
break;
default:
break;
}
}
private Transaction getTransaction(Xid xid) {
return transactionManager.getManagedTransaction(xid);
}
private Transaction getTransaction() {
return transactionContext.getTransaction();
}
@Override
public String toString() {
final String txnId = transactionContext.getTxnId();
final StringBuilder sb = new StringBuilder("XAResourceImpl{");
sb.append("txdId=").append(txnId);
sb.append(", transactionTimeoutSeconds=").append(transactionTimeoutSeconds);
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_transaction_impl_XAResourceImpl.java |
1,453 | public class DiscoveryNodeFilters {
public static enum OpType {
AND,
OR
}
;
public static DiscoveryNodeFilters buildFromSettings(OpType opType, String prefix, Settings settings) {
return buildFromKeyValue(opType, settings.getByPrefix(prefix).getAsMap());
}
public static DiscoveryNodeFilters buildFromKeyValue(OpType opType, Map<String, String> filters) {
Map<String, String[]> bFilters = new HashMap<String, String[]>();
for (Map.Entry<String, String> entry : filters.entrySet()) {
String[] values = Strings.splitStringByCommaToArray(entry.getValue());
if (values.length > 0) {
bFilters.put(entry.getKey(), values);
}
}
if (bFilters.isEmpty()) {
return null;
}
return new DiscoveryNodeFilters(opType, bFilters);
}
private final Map<String, String[]> filters;
private final OpType opType;
DiscoveryNodeFilters(OpType opType, Map<String, String[]> filters) {
this.opType = opType;
this.filters = filters;
}
public boolean match(DiscoveryNode node) {
for (Map.Entry<String, String[]> entry : filters.entrySet()) {
String attr = entry.getKey();
String[] values = entry.getValue();
if ("_ip".equals(attr)) {
for (String value : values) {
if (Regex.simpleMatch(value, node.getHostAddress())) {
if (opType == OpType.OR) {
return true;
}
} else {
if (opType == OpType.AND) {
return false;
}
}
}
} else if ("_host".equals(attr)) {
for (String value : values) {
if (Regex.simpleMatch(value, node.getHostName())) {
if (opType == OpType.OR) {
return true;
}
} else {
if (opType == OpType.AND) {
return false;
}
}
if (Regex.simpleMatch(value, node.getHostAddress())) {
if (opType == OpType.OR) {
return true;
}
} else {
if (opType == OpType.AND) {
return false;
}
}
}
} else if ("_id".equals(attr)) {
for (String value : values) {
if (node.id().equals(value)) {
if (opType == OpType.OR) {
return true;
}
} else {
if (opType == OpType.AND) {
return false;
}
}
}
} else if ("_name".equals(attr) || "name".equals(attr)) {
for (String value : values) {
if (Regex.simpleMatch(value, node.name())) {
if (opType == OpType.OR) {
return true;
}
} else {
if (opType == OpType.AND) {
return false;
}
}
}
} else {
String nodeAttributeValue = node.attributes().get(attr);
if (nodeAttributeValue == null) {
if (opType == OpType.AND) {
return false;
} else {
continue;
}
}
for (String value : values) {
if (Regex.simpleMatch(value, nodeAttributeValue)) {
if (opType == OpType.OR) {
return true;
}
} else {
if (opType == OpType.AND) {
return false;
}
}
}
}
}
if (opType == OpType.OR) {
return false;
} else {
return true;
}
}
/**
* Generates a human-readable string for the DiscoverNodeFilters.
* Example: {@code _id:"id1 OR blah",name:"blah OR name2"}
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
int entryCount = filters.size();
for (Map.Entry<String, String[]> entry : filters.entrySet()) {
String attr = entry.getKey();
String[] values = entry.getValue();
sb.append(attr);
sb.append(":\"");
int valueCount = values.length;
for (String value : values) {
sb.append(value);
if (valueCount > 1) {
sb.append(" " + opType.toString() + " ");
}
valueCount--;
}
sb.append("\"");
if (entryCount > 1) {
sb.append(",");
}
entryCount--;
}
return sb.toString();
}
} | 0true
| src_main_java_org_elasticsearch_cluster_node_DiscoveryNodeFilters.java |
1,876 | boolean b = h1.executeTransaction(options, new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
final TransactionalMap<Object, Object> txMap = context.getMap("default");
txMap.put("3", "3");
assertEquals(3, txMap.keySet().size());
map.put("4", "4");
assertEquals(4, txMap.keySet().size());
txMap.remove("1");
assertEquals(3, txMap.keySet().size());
map.remove("2");
assertEquals(2, txMap.keySet().size());
assertEquals(2, txMap.size());
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java |
265 | public class CassandraThriftKeyColumnValueStore implements KeyColumnValueStore {
private static final Logger logger =
LoggerFactory.getLogger(CassandraThriftKeyColumnValueStore.class);
private static final Pattern BROKEN_BYTE_TOKEN_PATTERN = Pattern.compile("^Token\\(bytes\\[(.+)\\]\\)$");
// Cassandra access
private final CassandraThriftStoreManager storeManager;
private final String keyspace;
private final String columnFamily;
private final CTConnectionPool pool;
private final ThriftGetter entryGetter;
public CassandraThriftKeyColumnValueStore(String keyspace, String columnFamily, CassandraThriftStoreManager storeManager,
CTConnectionPool pool) {
this.storeManager = storeManager;
this.keyspace = keyspace;
this.columnFamily = columnFamily;
this.pool = pool;
this.entryGetter = new ThriftGetter(storeManager.getMetaDataSchema(columnFamily));
}
/**
* Call Cassandra's Thrift get_slice() method.
* <p/>
* When columnEnd equals columnStart and either startInclusive
* or endInclusive is false (or both are false), then this
* method returns an empty list without making any Thrift calls.
* <p/>
* If columnEnd = columnStart + 1, and both startInclusive and
* startExclusive are false, then the arguments effectively form
* an empty interval. In this case, as in the one previous,
* an empty list is returned. However, it may not necessarily
* be handled efficiently; a Thrift call might still be made
* before returning the empty list.
*
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* when columnEnd < columnStart
*/
@Override
public EntryList getSlice(KeySliceQuery query, StoreTransaction txh) throws BackendException {
Map<StaticBuffer, EntryList> result = getNamesSlice(query.getKey(), query, txh);
return Iterables.getOnlyElement(result.values(), EntryList.EMPTY_LIST);
}
@Override
public Map<StaticBuffer, EntryList> getSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
return getNamesSlice(keys, query, txh);
}
public Map<StaticBuffer, EntryList> getNamesSlice(StaticBuffer key,
SliceQuery query, StoreTransaction txh) throws BackendException {
return getNamesSlice(ImmutableList.of(key),query,txh);
}
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys,
SliceQuery query,
StoreTransaction txh) throws BackendException {
ColumnParent parent = new ColumnParent(columnFamily);
/*
* Cassandra cannot handle columnStart = columnEnd.
* Cassandra's Thrift getSlice() throws InvalidRequestException
* if columnStart = columnEnd.
*/
if (query.getSliceStart().compareTo(query.getSliceEnd()) >= 0) {
// Check for invalid arguments where columnEnd < columnStart
if (query.getSliceEnd().compareTo(query.getSliceStart())<0) {
throw new PermanentBackendException("columnStart=" + query.getSliceStart() +
" is greater than columnEnd=" + query.getSliceEnd() + ". " +
"columnStart must be less than or equal to columnEnd");
}
if (0 != query.getSliceStart().length() && 0 != query.getSliceEnd().length()) {
logger.debug("Return empty list due to columnEnd==columnStart and neither empty");
return KCVSUtil.emptyResults(keys);
}
}
assert query.getSliceStart().compareTo(query.getSliceEnd()) < 0;
ConsistencyLevel consistency = getTx(txh).getReadConsistencyLevel().getThrift();
SlicePredicate predicate = new SlicePredicate();
SliceRange range = new SliceRange();
range.setCount(query.getLimit() + (query.hasLimit()?1:0)); //Add one for potentially removed last column
range.setStart(query.getSliceStart().asByteBuffer());
range.setFinish(query.getSliceEnd().asByteBuffer());
predicate.setSlice_range(range);
CTConnection conn = null;
try {
conn = pool.borrowObject(keyspace);
Cassandra.Client client = conn.getClient();
Map<ByteBuffer, List<ColumnOrSuperColumn>> rows = client.multiget_slice(CassandraHelper.convert(keys),
parent,
predicate,
consistency);
/*
* The final size of the "result" List may be at most rows.size().
* However, "result" could also be up to two elements smaller than
* rows.size(), depending on startInclusive and endInclusive
*/
Map<StaticBuffer, EntryList> results = new HashMap<StaticBuffer, EntryList>();
for (ByteBuffer key : rows.keySet()) {
results.put(StaticArrayBuffer.of(key),
CassandraHelper.makeEntryList(rows.get(key), entryGetter, query.getSliceEnd(), query.getLimit()));
}
return results;
} catch (Exception e) {
throw convertException(e);
} finally {
pool.returnObjectUnsafe(keyspace, conn);
}
}
private static class ThriftGetter implements StaticArrayEntry.GetColVal<ColumnOrSuperColumn,ByteBuffer> {
private final EntryMetaData[] schema;
private ThriftGetter(EntryMetaData[] schema) {
this.schema = schema;
}
@Override
public ByteBuffer getColumn(ColumnOrSuperColumn element) {
return element.getColumn().bufferForName();
}
@Override
public ByteBuffer getValue(ColumnOrSuperColumn element) {
return element.getColumn().bufferForValue();
}
@Override
public EntryMetaData[] getMetaSchema(ColumnOrSuperColumn element) {
return schema;
}
@Override
public Object getMetaData(ColumnOrSuperColumn element, EntryMetaData meta) {
switch(meta) {
case TIMESTAMP:
return element.getColumn().getTimestamp();
case TTL:
return element.getColumn().getTtl();
default:
throw new UnsupportedOperationException("Unsupported meta data: " + meta);
}
}
}
@Override
public void close() {
// Do nothing
}
@Override
public void acquireLock(StaticBuffer key, StaticBuffer column, StaticBuffer expectedValue,
StoreTransaction txh) throws BackendException {
throw new UnsupportedOperationException();
}
@Override
public KeyIterator getKeys(@Nullable SliceQuery sliceQuery, StoreTransaction txh) throws BackendException {
final IPartitioner<? extends Token<?>> partitioner = storeManager.getCassandraPartitioner();
if (!(partitioner instanceof RandomPartitioner) && !(partitioner instanceof Murmur3Partitioner))
throw new PermanentBackendException("This operation is only allowed when random partitioner (md5 or murmur3) is used.");
try {
return new AllTokensIterator<Token<?>>(partitioner, sliceQuery, storeManager.getPageSize());
} catch (Exception e) {
throw convertException(e);
}
}
@Override
public KeyIterator getKeys(KeyRangeQuery keyRangeQuery, StoreTransaction txh) throws BackendException {
final IPartitioner<? extends Token<?>> partitioner = storeManager.getCassandraPartitioner();
// see rant about the reason of this limitation in Astyanax implementation of this method.
if (!(partitioner instanceof AbstractByteOrderedPartitioner))
throw new PermanentBackendException("This operation is only allowed when byte-ordered partitioner is used.");
try {
return new KeyRangeIterator<Token<?>>(partitioner, keyRangeQuery, storeManager.getPageSize(),
keyRangeQuery.getKeyStart().asByteBuffer(),
keyRangeQuery.getKeyEnd().asByteBuffer());
} catch (Exception e) {
throw convertException(e);
}
}
@Override
public String getName() {
return columnFamily;
}
@Override
public void mutate(StaticBuffer key, List<Entry> additions, List<StaticBuffer> deletions, StoreTransaction txh) throws BackendException {
Map<StaticBuffer, KCVMutation> mutations = ImmutableMap.of(key, new KCVMutation(additions, deletions));
mutateMany(mutations, txh);
}
public void mutateMany(Map<StaticBuffer, KCVMutation> mutations, StoreTransaction txh) throws BackendException {
storeManager.mutateMany(ImmutableMap.of(columnFamily, mutations), txh);
}
static BackendException convertException(Throwable e) {
if (e instanceof TException) {
return new PermanentBackendException(e);
} else if (e instanceof TimedOutException) {
return new TemporaryBackendException(e);
} else if (e instanceof UnavailableException) {
return new TemporaryBackendException(e);
} else if (e instanceof InvalidRequestException) {
return new PermanentBackendException(e);
} else {
return new PermanentBackendException(e);
}
}
@Override
public String toString() {
return "CassandraThriftKeyColumnValueStore[ks="
+ keyspace + ", cf=" + columnFamily + "]";
}
private List<KeySlice> getKeySlice(ByteBuffer startKey,
ByteBuffer endKey,
SliceQuery columnSlice,
int count) throws BackendException {
return getRangeSlices(new org.apache.cassandra.thrift.KeyRange().setStart_key(startKey).setEnd_key(endKey).setCount(count), columnSlice);
}
private <T extends Token<?>> List<KeySlice> getTokenSlice(T startToken, T endToken,
SliceQuery sliceQuery, int count) throws BackendException {
String st = sanitizeBrokenByteToken(startToken);
String et = sanitizeBrokenByteToken(endToken);
org.apache.cassandra.thrift.KeyRange kr = new org.apache.cassandra.thrift.KeyRange().setStart_token(st).setEnd_token(et).setCount(count);
return getRangeSlices(kr, sliceQuery);
}
private String sanitizeBrokenByteToken(Token<?> tok) {
/*
* Background: https://issues.apache.org/jira/browse/CASSANDRA-5566
*
* This check is useful for compatibility with Cassandra server versions
* 1.2.4 and earlier.
*/
String st = tok.toString();
if (!(tok instanceof BytesToken))
return st;
// Do a cheap 1-character startsWith before unleashing the regex
if (st.startsWith("T")) {
Matcher m = BROKEN_BYTE_TOKEN_PATTERN.matcher(st);
if (!m.matches()) {
logger.warn("Unknown token string format: \"{}\"", st);
} else {
String old = st;
st = m.group(1);
logger.debug("Rewrote token string: \"{}\" -> \"{}\"", old, st);
}
}
return st;
}
private List<KeySlice> getRangeSlices(org.apache.cassandra.thrift.KeyRange keyRange, @Nullable SliceQuery sliceQuery) throws BackendException {
SliceRange sliceRange = new SliceRange();
if (sliceQuery == null) {
sliceRange.setStart(ArrayUtils.EMPTY_BYTE_ARRAY)
.setFinish(ArrayUtils.EMPTY_BYTE_ARRAY)
.setCount(5);
} else {
sliceRange.setStart(sliceQuery.getSliceStart().asByteBuffer())
.setFinish(sliceQuery.getSliceEnd().asByteBuffer())
.setCount((sliceQuery.hasLimit()) ? sliceQuery.getLimit() : Integer.MAX_VALUE);
}
CTConnection connection = null;
try {
connection = pool.borrowObject(keyspace);
List<KeySlice> slices =
connection.getClient().get_range_slices(new ColumnParent(columnFamily),
new SlicePredicate()
.setSlice_range(sliceRange),
keyRange,
ConsistencyLevel.QUORUM);
for (KeySlice s : slices) {
logger.debug("Key {}", ByteBufferUtil.toString(s.key, "-"));
}
/* Note: we need to fetch columns for each row as well to remove "range ghosts" */
List<KeySlice> result = new ArrayList<KeySlice>(slices.size());
KeyIterationPredicate pred = new KeyIterationPredicate();
for (KeySlice ks : slices)
if (pred.apply(ks))
result.add(ks);
return result;
} catch (Exception e) {
throw convertException(e);
} finally {
if (connection != null)
pool.returnObjectUnsafe(keyspace, connection);
}
}
private static class KeyIterationPredicate implements Predicate<KeySlice> {
@Override
public boolean apply(@Nullable KeySlice row) {
return (row != null) && row.getColumns().size() > 0;
}
}
/**
* Slices rows and columns using tokens. Recall that the partitioner turns
* keys into tokens. For instance, under RandomPartitioner, tokens are the
* MD5 hashes of keys.
*/
public class AbstractBufferedRowIter<T extends Token<?>> implements KeyIterator {
private final int pageSize;
private final SliceQuery columnSlice;
private boolean isClosed;
private boolean seenEnd;
protected Iterator<KeySlice> ksIter;
private KeySlice mostRecentRow;
private final IPartitioner<? extends T> partitioner;
private T nextStartToken;
private final T endToken;
private ByteBuffer nextStartKey;
private boolean omitEndToken;
public AbstractBufferedRowIter(IPartitioner<? extends T> partitioner,
SliceQuery columnSlice, int pageSize, T startToken, T endToken, boolean omitEndToken) {
this.pageSize = pageSize;
this.partitioner = partitioner;
this.nextStartToken = startToken;
this.endToken = endToken;
this.columnSlice = columnSlice;
this.seenEnd = false;
this.isClosed = false;
this.ksIter = Iterators.emptyIterator();
this.mostRecentRow = null;
this.omitEndToken = omitEndToken;
}
@Override
public boolean hasNext() {
ensureOpen();
if (!ksIter.hasNext() && !seenEnd) {
try {
ksIter = rebuffer().iterator();
} catch (BackendException e) {
throw new RuntimeException(e);
}
}
return ksIter.hasNext();
}
@Override
public StaticBuffer next() {
ensureOpen();
if (!hasNext())
throw new NoSuchElementException();
mostRecentRow = ksIter.next();
Preconditions.checkNotNull(mostRecentRow);
return StaticArrayBuffer.of(mostRecentRow.bufferForKey());
}
@Override
public void close() {
closeIterator();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public RecordIterator<Entry> getEntries() {
ensureOpen();
return new RecordIterator<Entry>() {
final Iterator<Entry> columns =
CassandraHelper.makeEntryIterator(mostRecentRow.getColumns(),
entryGetter, columnSlice.getSliceEnd(),
columnSlice.getLimit());
@Override
public boolean hasNext() {
ensureOpen();
return columns.hasNext();
}
@Override
public Entry next() {
ensureOpen();
return columns.next();
}
@Override
public void close() {
closeIterator();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
private void ensureOpen() {
if (isClosed)
throw new IllegalStateException("Iterator has been closed.");
}
private void closeIterator() {
if (!isClosed) {
isClosed = true;
}
}
private List<KeySlice> rebuffer() throws BackendException {
Preconditions.checkArgument(!seenEnd);
return checkFreshSlices(getNextKeySlices());
}
protected List<KeySlice> checkFreshSlices(List<KeySlice> ks) {
if (0 == ks.size()) {
seenEnd = true;
return Collections.emptyList();
}
nextStartKey = ks.get(ks.size() - 1).bufferForKey();
nextStartToken = partitioner.getToken(nextStartKey);
if (nextStartToken.equals(endToken)) {
seenEnd = true;
if (omitEndToken)
ks.remove(ks.size() - 1);
}
return ks;
}
protected final List<KeySlice> getNextKeySlices() throws BackendException {
return getTokenSlice(nextStartToken, endToken, columnSlice, pageSize);
}
}
private final class AllTokensIterator<T extends Token<?>> extends AbstractBufferedRowIter<T> {
public AllTokensIterator(IPartitioner<? extends T> partitioner, SliceQuery columnSlice, int pageSize) {
super(partitioner, columnSlice, pageSize, partitioner.getMinimumToken(), partitioner.getMinimumToken(), false);
}
}
private final class KeyRangeIterator<T extends Token<?>> extends AbstractBufferedRowIter<T> {
public KeyRangeIterator(IPartitioner<? extends T> partitioner, SliceQuery columnSlice,
int pageSize, ByteBuffer startKey, ByteBuffer endKey) throws BackendException {
super(partitioner, columnSlice, pageSize, partitioner.getToken(startKey), partitioner.getToken(endKey), true);
Preconditions.checkArgument(partitioner instanceof AbstractByteOrderedPartitioner);
// Get first slice with key range instead of token range. Token
// ranges are start-exclusive, key ranges are start-inclusive. Both
// are end-inclusive. If we don't make the call below, then we will
// erroneously miss startKey.
List<KeySlice> ks = getKeySlice(startKey, endKey, columnSlice, pageSize);
this.ksIter = checkFreshSlices(ks).iterator();
}
}
} | 0true
| titan-cassandra_src_main_java_com_thinkaurelius_titan_diskstorage_cassandra_thrift_CassandraThriftKeyColumnValueStore.java |
1,399 | @XmlRootElement(name = "paymentInfo")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class OfferWrapper extends BaseWrapper implements APIWrapper<Offer> {
@XmlElement
protected Long offerId;
@XmlElement
protected String marketingMessage;;
@XmlElement
protected BroadleafEnumerationTypeWrapper offerType;;
@XmlElement
protected BroadleafEnumerationTypeWrapper discountType;;
@Override
public void wrapDetails(Offer model, HttpServletRequest request) {
this.marketingMessage = model.getMarketingMessage();
this.offerType = (BroadleafEnumerationTypeWrapper) context.getBean(BroadleafEnumerationTypeWrapper.class.getName());
this.offerType.wrapDetails(model.getType(), request);
this.discountType = (BroadleafEnumerationTypeWrapper) context.getBean(BroadleafEnumerationTypeWrapper.class.getName());
this.discountType.wrapDetails(model.getDiscountType(), request);
}
@Override
public void wrapSummary(Offer model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_OfferWrapper.java |
240 | .setExceptionCallback(new ExceptionCallback() {
private int retries = 0;
@Override
public boolean onException(ConnectionException e) {
try {
return retries > 2; // make 3 re-tries
} finally {
retries++;
}
}
}).execute(); | 0true
| titan-cassandra_src_main_java_com_thinkaurelius_titan_diskstorage_cassandra_astyanax_AstyanaxKeyColumnValueStore.java |
806 | class TransportHandler extends BaseTransportRequestHandler<MultiPercolateRequest> {
@Override
public MultiPercolateRequest newInstance() {
return new MultiPercolateRequest();
}
@Override
public void messageReceived(final MultiPercolateRequest request, final TransportChannel channel) throws Exception {
// no need to use threaded listener, since we just send a response
request.listenerThreaded(false);
execute(request, new ActionListener<MultiPercolateResponse>() {
@Override
public void onResponse(MultiPercolateResponse response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [mpercolate] and request [" + request + "]", e1);
}
}
});
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
} | 0true
| src_main_java_org_elasticsearch_action_percolate_TransportMultiPercolateAction.java |
673 | @Test
public class LocalHashTableIterationTest {
private static final int KEYS_COUNT = 1600000;
private ODatabaseDocumentTx databaseDocumentTx;
private OLocalHashTable<Integer, String> localHashTable;
@BeforeClass
public void beforeClass() {
String buildDirectory = System.getProperty("buildDirectory");
if (buildDirectory == null)
buildDirectory = ".";
databaseDocumentTx = new ODatabaseDocumentTx("local:" + buildDirectory + "/localHashTableIterationTest");
if (databaseDocumentTx.exists()) {
databaseDocumentTx.open("admin", "admin");
databaseDocumentTx.drop();
}
databaseDocumentTx.create();
OHashFunction<Integer> hashFunction = new OHashFunction<Integer>() {
@Override
public long hashCode(Integer value) {
return Long.MAX_VALUE / 2 + value;
}
};
localHashTable = new OLocalHashTable<Integer, String>(".imc", ".tsc", ".obf", hashFunction);
localHashTable.create("localHashTableIterationTest", OIntegerSerializer.INSTANCE, OStringSerializer.INSTANCE, null,
(OStorageLocal) databaseDocumentTx.getStorage());
}
@AfterClass
public void afterClass() throws Exception {
localHashTable.clear();
localHashTable.delete();
databaseDocumentTx.drop();
}
@BeforeMethod
public void beforeMethod() {
}
@AfterMethod
public void afterMethod() {
localHashTable.clear();
}
public void testNextHaveRightOrder() throws Exception {
SortedSet<Integer> keys = new TreeSet<Integer>();
keys.clear();
final MersenneTwisterFast random = new MersenneTwisterFast();
while (keys.size() < KEYS_COUNT) {
int key = random.nextInt();
if (localHashTable.get(key) == null) {
localHashTable.put(key, key + "");
keys.add(key);
Assert.assertEquals(localHashTable.get(key), "" + key);
}
}
OHashIndexBucket.Entry<Integer, String>[] entries = localHashTable.ceilingEntries(Integer.MIN_VALUE);
int curPos = 0;
for (int key : keys) {
int sKey = entries[curPos].key;
Assert.assertEquals(key, sKey, "" + key);
curPos++;
if (curPos >= entries.length) {
entries = localHashTable.higherEntries(entries[entries.length - 1].key);
curPos = 0;
}
}
}
public void testNextSkipsRecordValid() throws Exception {
List<Integer> keys = new ArrayList<Integer>();
keys.clear();
final MersenneTwisterFast random = new MersenneTwisterFast();
while (keys.size() < KEYS_COUNT) {
int key = random.nextInt();
if (localHashTable.get(key) == null) {
localHashTable.put(key, key + "");
keys.add(key);
Assert.assertEquals(localHashTable.get(key), "" + key);
}
}
Collections.sort(keys);
OHashIndexBucket.Entry<Integer, String>[] entries = localHashTable.ceilingEntries(keys.get(10));
int curPos = 0;
for (int key : keys) {
if (key < keys.get(10)) {
continue;
}
int sKey = entries[curPos].key;
Assert.assertEquals(key, sKey);
curPos++;
if (curPos >= entries.length) {
entries = localHashTable.higherEntries(entries[entries.length - 1].key);
curPos = 0;
}
}
}
@Test(enabled = false)
public void testNextHaveRightOrderUsingNextMethod() throws Exception {
List<Integer> keys = new ArrayList<Integer>();
keys.clear();
MersenneTwisterFast random = new MersenneTwisterFast();
while (keys.size() < KEYS_COUNT) {
int key = random.nextInt();
if (localHashTable.get(key) == null) {
localHashTable.put(key, key + "");
keys.add(key);
Assert.assertEquals(localHashTable.get(key), key + "");
}
}
Collections.sort(keys);
for (int key : keys) {
OHashIndexBucket.Entry<Integer, String>[] entries = localHashTable.ceilingEntries(key);
Assert.assertTrue(key == entries[0].key);
}
for (int j = 0, keysSize = keys.size() - 1; j < keysSize; j++) {
int key = keys.get(j);
int sKey = localHashTable.higherEntries(key)[0].key;
Assert.assertTrue(sKey == keys.get(j + 1));
}
}
} | 0true
| core_src_test_java_com_orientechnologies_orient_core_index_hashindex_local_LocalHashTableIterationTest.java |
581 | getValuesMinor(toKey, isInclusive, new IndexValuesResultListener() {
@Override
public boolean addResult(OIdentifiable value) {
result.add(value);
return true;
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_OIndexAbstract.java |
2,751 | public interface HttpChannel extends RestChannel {
} | 0true
| src_main_java_org_elasticsearch_http_HttpChannel.java |
3,108 | public interface IndexEngine extends IndexComponent {
void close();
} | 0true
| src_main_java_org_elasticsearch_index_engine_IndexEngine.java |
1,185 | @Service("blPaymentInfoTypeService")
public class BroadleafPaymentInfoTypeServiceImpl implements BroadleafPaymentInfoTypeService {
/* Services */
@Resource(name = "blSecurePaymentInfoService")
protected SecurePaymentInfoService securePaymentInfoService;
@Override
public Map<PaymentInfo, Referenced> getPaymentsMap(Order order) {
Map<PaymentInfo, Referenced> payments = new HashMap<PaymentInfo, Referenced>();
for(PaymentInfo paymentInfo : order.getPaymentInfos()){
if(PaymentInfoType.ACCOUNT.equals(paymentInfo.getType())){
Referenced referenceInfo = createAccountReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.BANK_ACCOUNT.equals(paymentInfo.getType())){
Referenced referenceInfo = createBankAccountReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.CHECK.equals(paymentInfo.getType())){
Referenced referenceInfo = createCheckReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.CREDIT_CARD.equals(paymentInfo.getType())){
Referenced referenceInfo = createCreditCardReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.CUSTOMER_CREDIT.equals(paymentInfo.getType())){
Referenced referenceInfo = createCustomerCreditReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.ELECTRONIC_CHECK.equals(paymentInfo.getType())){
Referenced referenceInfo = createElectronicCheckReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.GIFT_CARD.equals(paymentInfo.getType())){
Referenced referenceInfo = createGiftCardReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.MONEY_ORDER.equals(paymentInfo.getType())){
Referenced referenceInfo = createMoneyOrderReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.PAYPAL.equals(paymentInfo.getType())){
Referenced referenceInfo = createPayPalReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
if(PaymentInfoType.WIRE.equals(paymentInfo.getType())){
Referenced referenceInfo = createWireReferenceInfo(paymentInfo);
payments.put(paymentInfo, referenceInfo);
}
}
return payments;
}
public Referenced createAccountReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createBankAccountReferenceInfo(PaymentInfo paymentInfo){
BankAccountPaymentInfo blankReference = (BankAccountPaymentInfo) securePaymentInfoService.create(PaymentInfoType.BANK_ACCOUNT);
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createCheckReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createCreditCardReferenceInfo(PaymentInfo paymentInfo){
CreditCardPaymentInfo blankReference = (CreditCardPaymentInfo) securePaymentInfoService.create(PaymentInfoType.CREDIT_CARD);
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createCustomerCreditReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createElectronicCheckReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createGiftCardReferenceInfo(PaymentInfo paymentInfo){
GiftCardPaymentInfo blankReference = (GiftCardPaymentInfo) securePaymentInfoService.create(PaymentInfoType.GIFT_CARD);
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createMoneyOrderReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createPayPalReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
public Referenced createWireReferenceInfo(PaymentInfo paymentInfo){
Referenced blankReference = new EmptyReferenced();
blankReference.setReferenceNumber(paymentInfo.getReferenceNumber());
return blankReference;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_BroadleafPaymentInfoTypeServiceImpl.java |
1,251 | addOperation(operations, new Runnable() {
public void run() {
IMap map = hazelcast.getMap("myMap");
map.remove(random.nextInt(SIZE), new Customer(random.nextInt(100), String.valueOf(random.nextInt(10000))));
}
}, 10); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
0 | {
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
System.out.println( "Entered cluster:" + clusterConfiguration );
}
@Override
public void joinedCluster( InstanceId instanceId, URI member )
{
System.out.println( "Joined cluster:" + instanceId + " (at URI " + member +")" );
}
@Override
public void leftCluster( InstanceId instanceId )
{
System.out.println( "Left cluster:" + instanceId );
}
@Override
public void leftCluster()
{
System.out.println( "Left cluster" );
}
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
System.out.println( instanceId + " at URI " + electedMember + " was elected as " + role );
}
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
System.out.println( instanceId + " at URI " + electedMember + " was removed from " + role );
}
} ); | 1no label
| enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_MultiPaxosServer.java |
964 | public class ONothingCompression implements OCompression {
public static final String NAME = "nothing";
public static final ONothingCompression INSTANCE = new ONothingCompression();
@Override
public byte[] compress(byte[] content) {
byte[] result = new byte[content.length];
System.arraycopy(content, 0, result, 0, content.length);
return result;
}
@Override
public byte[] uncompress(byte[] content) {
byte[] result = new byte[content.length];
System.arraycopy(content, 0, result, 0, content.length);
return result;
}
@Override
public String name() {
return NAME;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_compression_impl_ONothingCompression.java |
490 | public class ClearIndicesCacheResponse extends BroadcastOperationResponse {
ClearIndicesCacheResponse() {
}
ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_cache_clear_ClearIndicesCacheResponse.java |
1,449 | public static class ShardSnapshotStatus {
private State state;
private String nodeId;
private String reason;
private ShardSnapshotStatus() {
}
public ShardSnapshotStatus(String nodeId) {
this(nodeId, State.INIT);
}
public ShardSnapshotStatus(String nodeId, State state) {
this(nodeId, state, null);
}
public ShardSnapshotStatus(String nodeId, State state, String reason) {
this.nodeId = nodeId;
this.state = state;
this.reason = reason;
}
public State state() {
return state;
}
public String nodeId() {
return nodeId;
}
public String reason() {
return reason;
}
public static ShardSnapshotStatus readShardSnapshotStatus(StreamInput in) throws IOException {
ShardSnapshotStatus shardSnapshotStatus = new ShardSnapshotStatus();
shardSnapshotStatus.readFrom(in);
return shardSnapshotStatus;
}
public void readFrom(StreamInput in) throws IOException {
nodeId = in.readOptionalString();
state = State.fromValue(in.readByte());
reason = in.readOptionalString();
}
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(nodeId);
out.writeByte(state.value);
out.writeOptionalString(reason);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ShardSnapshotStatus status = (ShardSnapshotStatus) o;
if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false;
if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false;
if (state != status.state) return false;
return true;
}
@Override
public int hashCode() {
int result = state != null ? state.hashCode() : 0;
result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0);
result = 31 * result + (reason != null ? reason.hashCode() : 0);
return result;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_SnapshotMetaData.java |
1,486 | public class IntervalFilterMap {
public static final String CLASS = Tokens.makeNamespace(IntervalFilterMap.class) + ".class";
public static final String KEY = Tokens.makeNamespace(IntervalFilterMap.class) + ".key";
public static final String START_VALUE = Tokens.makeNamespace(IntervalFilterMap.class) + ".startValue";
public static final String END_VALUE = Tokens.makeNamespace(IntervalFilterMap.class) + ".endValue";
public static final String VALUE_CLASS = Tokens.makeNamespace(IntervalFilterMap.class) + ".valueClass";
public enum Counters {
VERTICES_FILTERED,
EDGES_FILTERED
}
public static Configuration createConfiguration(final Class<? extends Element> klass, final String key, final Object startValue, final Object endValue) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.set(KEY, key);
if (startValue instanceof String) {
configuration.set(VALUE_CLASS, String.class.getName());
configuration.set(START_VALUE, (String) startValue);
configuration.set(END_VALUE, (String) endValue);
} else if (startValue instanceof Number) {
configuration.set(VALUE_CLASS, Float.class.getName());
configuration.setFloat(START_VALUE, ((Number) startValue).floatValue());
configuration.setFloat(END_VALUE, ((Number) endValue).floatValue());
} else if (startValue instanceof Boolean) {
configuration.set(VALUE_CLASS, Boolean.class.getName());
configuration.setBoolean(START_VALUE, (Boolean) startValue);
configuration.setBoolean(END_VALUE, (Boolean) endValue);
} else {
throw new RuntimeException("Unknown value class: " + startValue.getClass().getName());
}
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean isVertex;
private ElementChecker startChecker;
private ElementChecker endChecker;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
final String key = context.getConfiguration().get(KEY);
final Class valueClass = context.getConfiguration().getClass(VALUE_CLASS, String.class);
final Object startValue;
final Object endValue;
if (valueClass.equals(String.class)) {
startValue = context.getConfiguration().get(START_VALUE);
endValue = context.getConfiguration().get(END_VALUE);
} else if (Number.class.isAssignableFrom((valueClass))) {
startValue = context.getConfiguration().getFloat(START_VALUE, Float.MIN_VALUE);
endValue = context.getConfiguration().getFloat(END_VALUE, Float.MAX_VALUE);
} else {
throw new IOException("Class " + valueClass + " is an unsupported value class");
}
this.startChecker = new ElementChecker(key, Compare.GREATER_THAN_EQUAL, startValue);
this.endChecker = new ElementChecker(key, Compare.LESS_THAN, endValue);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths() && !(this.startChecker.isLegal(value) && this.endChecker.isLegal(value))) {
value.clearPaths();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_FILTERED, 1L);
}
} else {
long counter = 0;
for (final Edge e : value.getEdges(Direction.BOTH)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths() && !(this.startChecker.isLegal(edge) && this.endChecker.isLegal(edge))) {
edge.clearPaths();
counter++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_FILTERED, counter);
}
context.write(NullWritable.get(), value);
}
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_filter_IntervalFilterMap.java |
3,074 | public class DeleteByQueryFailedEngineException extends EngineException {
public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) {
super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause);
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_DeleteByQueryFailedEngineException.java |
1,730 | public class Lifecycle {
public static enum State {
INITIALIZED,
STOPPED,
STARTED,
CLOSED
}
private volatile State state = State.INITIALIZED;
public State state() {
return this.state;
}
/**
* Returns <tt>true</tt> if the state is initialized.
*/
public boolean initialized() {
return state == State.INITIALIZED;
}
/**
* Returns <tt>true</tt> if the state is started.
*/
public boolean started() {
return state == State.STARTED;
}
/**
* Returns <tt>true</tt> if the state is stopped.
*/
public boolean stopped() {
return state == State.STOPPED;
}
/**
* Returns <tt>true</tt> if the state is closed.
*/
public boolean closed() {
return state == State.CLOSED;
}
public boolean stoppedOrClosed() {
Lifecycle.State state = this.state;
return state == State.STOPPED || state == State.CLOSED;
}
public boolean canMoveToStarted() throws ElasticsearchIllegalStateException {
State localState = this.state;
if (localState == State.INITIALIZED || localState == State.STOPPED) {
return true;
}
if (localState == State.STARTED) {
return false;
}
if (localState == State.CLOSED) {
throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
}
throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
}
public boolean moveToStarted() throws ElasticsearchIllegalStateException {
State localState = this.state;
if (localState == State.INITIALIZED || localState == State.STOPPED) {
state = State.STARTED;
return true;
}
if (localState == State.STARTED) {
return false;
}
if (localState == State.CLOSED) {
throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
}
throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
}
public boolean canMoveToStopped() throws ElasticsearchIllegalStateException {
State localState = state;
if (localState == State.STARTED) {
return true;
}
if (localState == State.INITIALIZED || localState == State.STOPPED) {
return false;
}
if (localState == State.CLOSED) {
throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
}
throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
}
public boolean moveToStopped() throws ElasticsearchIllegalStateException {
State localState = state;
if (localState == State.STARTED) {
state = State.STOPPED;
return true;
}
if (localState == State.INITIALIZED || localState == State.STOPPED) {
return false;
}
if (localState == State.CLOSED) {
throw new ElasticsearchIllegalStateException("Can't move to started state when closed");
}
throw new ElasticsearchIllegalStateException("Can't move to started with unknown state");
}
public boolean canMoveToClosed() throws ElasticsearchIllegalStateException {
State localState = state;
if (localState == State.CLOSED) {
return false;
}
if (localState == State.STARTED) {
throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode");
}
return true;
}
public boolean moveToClosed() throws ElasticsearchIllegalStateException {
State localState = state;
if (localState == State.CLOSED) {
return false;
}
if (localState == State.STARTED) {
throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode");
}
state = State.CLOSED;
return true;
}
@Override
public String toString() {
return state.toString();
}
} | 0true
| src_main_java_org_elasticsearch_common_component_Lifecycle.java |
2,780 | public class IndexRequestBuilderTests extends ElasticsearchIntegrationTest {
@Test
public void testSetSource() throws InterruptedException, ExecutionException {
createIndex("test");
ensureYellow();
Map<String, Object> map = new HashMap<String, Object>();
map.put("test_field", "foobar");
IndexRequestBuilder[] builders = new IndexRequestBuilder[] {
client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar"),
client().prepareIndex("test", "test").setSource("{\"test_field\" : \"foobar\"}"),
client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}")),
client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), randomBoolean()),
client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}").toBytes()),
client().prepareIndex("test", "test").setSource(map)
};
indexRandom(true, builders);
SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")).get();
ElasticsearchAssertions.assertHitCount(searchResponse, builders.length);
}
@Test(expected = IllegalArgumentException.class)
public void testOddNumberOfSourceObjetc() {
client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar", new Object());
}
} | 0true
| src_test_java_org_elasticsearch_index_IndexRequestBuilderTests.java |
2,950 | @SuppressWarnings("deprecation")
public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet stopWords;
private final boolean ignoreCase;
private final boolean enablePositionIncrements;
private final boolean removeTrailing;
@Inject
public StopTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, version, ignoreCase);
this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true);
if (!enablePositionIncrements && version.onOrAfter(Version.LUCENE_44)) {
throw new ElasticsearchIllegalArgumentException("[enable_position_increments: false] is not supported anymore as of Lucene 4.4 as it can create broken token streams."
+ " Please fix your analysis chain or use an older compatibility version (<=4.3) but beware that it might cause unexpected behavior.");
}
}
@Override
public TokenStream create(TokenStream tokenStream) {
if (removeTrailing) {
StopFilter filter = new StopFilter(version, tokenStream, stopWords);
filter.setEnablePositionIncrements(enablePositionIncrements);
return filter;
} else {
return new SuggestStopFilter(tokenStream, stopWords);
}
}
public Set<?> stopWords() {
return stopWords;
}
public boolean ignoreCase() {
return ignoreCase;
}
public boolean enablePositionIncrements() {
return this.enablePositionIncrements;
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_StopTokenFilterFactory.java |
1,329 | final Future waitingInQueue = executorService.submit(new Runnable() {
public void run() {
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
1,461 | @Component("blUSShippingInfoFormValidator")
public class USShippingInfoFormValidator extends ShippingInfoFormValidator {
@SuppressWarnings("rawtypes")
public boolean supports(Class clazz) {
return clazz.equals(USShippingInfoFormValidator.class);
}
public void validate(Object obj, Errors errors) {
super.validate(obj, errors);
ValidationUtils.rejectIfEmptyOrWhitespace(errors, "address.state", "state.required");
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_checkout_validator_USShippingInfoFormValidator.java |
132 | assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(1, service.getConnectedClients().size());
}
}, 5); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_ClientServiceTest.java |
767 | public class IndexAction extends Action<IndexRequest, IndexResponse, IndexRequestBuilder> {
public static final IndexAction INSTANCE = new IndexAction();
public static final String NAME = "index";
private IndexAction() {
super(NAME);
}
@Override
public IndexResponse newResponse() {
return new IndexResponse();
}
@Override
public IndexRequestBuilder newRequestBuilder(Client client) {
return new IndexRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_index_IndexAction.java |
939 | new Thread(new Runnable() {
public void run() {
final ILock lock = instance2.getLock(key);
lock.lock();
latch.countDown();
}
}).start(); | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_lock_LockTest.java |
1,415 | clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new TimeoutClusterStateUpdateTask() {
@Override
public TimeValue timeout() {
return request.masterTimeout;
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
if (request.create && currentState.metaData().templates().containsKey(request.name)) {
throw new IndexTemplateAlreadyExistsException(request.name);
}
MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template);
return ClusterState.builder(currentState).metaData(builder).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(new PutResponse(true, template));
}
}); | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaDataIndexTemplateService.java |
273 | public class EmailServiceMDP implements MessageListener {
@Resource(name = "blMessageCreator")
private MessageCreator messageCreator;
/*
* (non-Javadoc)
* @see javax.jms.MessageListener#onMessage(javax.jms.Message)
*/
@SuppressWarnings("unchecked")
public void onMessage(Message message) {
try {
HashMap props = (HashMap) ((ObjectMessage) message).getObject();
messageCreator.sendMessage(props);
} catch (MailAuthenticationException e) {
throw new EmailException(e);
} catch (MailPreparationException e) {
throw new EmailException(e);
} catch (MailParseException e) {
throw new EmailException(e);
} catch (MailSendException e) {
/*
* TODO find the specific exception that results from the smtp
* server being down, and throw this as an EmailException.
* Otherwise, log and then swallow this exception, as it may have
* been possible that this email was actually sent.
*/
throw new EmailException(e);
} catch (JMSException e) {
throw new EmailException(e);
}
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_email_service_jms_EmailServiceMDP.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.