Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
24 |
public class ErrorCommand extends AbstractTextCommand {
ByteBuffer response;
private final String message;
public ErrorCommand(TextCommandType type) {
this(type, null);
}
public ErrorCommand(TextCommandType type, String message) {
super(type);
byte[] error = ERROR;
if (type == TextCommandType.ERROR_CLIENT) {
error = CLIENT_ERROR;
} else if (type == TextCommandType.ERROR_SERVER) {
error = SERVER_ERROR;
}
this.message = message;
byte[] msg = (message == null) ? null : stringToBytes(message);
int total = error.length;
if (msg != null) {
total += msg.length;
}
total += 2;
response = ByteBuffer.allocate(total);
response.put(error);
if (msg != null) {
response.put(msg);
}
response.put(RETURN);
response.flip();
}
public boolean readFrom(ByteBuffer cb) {
return true;
}
public boolean writeTo(ByteBuffer bb) {
IOUtil.copyToHeapBuffer(response, bb);
return !response.hasRemaining();
}
@Override
public String toString() {
return "ErrorCommand{"
+ "type=" + type
+ ", msg=" + message
+ '}'
+ super.toString();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_ErrorCommand.java
|
938 |
public class OfferType implements Serializable, BroadleafEnumerationType, Comparable<OfferType> {
private static final long serialVersionUID = 1L;
private static final Map<String, OfferType> TYPES = new LinkedHashMap<String, OfferType>();
public static final OfferType ORDER_ITEM = new OfferType("ORDER_ITEM", "Order Item", 1000);
public static final OfferType ORDER = new OfferType("ORDER", "Order", 2000);
public static final OfferType FULFILLMENT_GROUP = new OfferType("FULFILLMENT_GROUP", "Fulfillment Group", 3000);
public static OfferType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
private int order;
public OfferType() {
//do nothing
}
public OfferType(final String type, final String friendlyType, int order) {
this.friendlyType = friendlyType;
setType(type);
setOrder(order);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
public int getOrder() {
return order;
}
public void setOrder(int order) {
this.order = order;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferType other = (OfferType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
@Override
public int compareTo(OfferType arg0) {
return this.order - arg0.order;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_type_OfferType.java
|
324 |
public class NodesInfoAction extends ClusterAction<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
public static final NodesInfoAction INSTANCE = new NodesInfoAction();
public static final String NAME = "cluster/nodes/info";
private NodesInfoAction() {
super(NAME);
}
@Override
public NodesInfoResponse newResponse() {
return new NodesInfoResponse();
}
@Override
public NodesInfoRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new NodesInfoRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_NodesInfoAction.java
|
329 |
static final class Fields {
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
static final XContentBuilderString URL = new XContentBuilderString("url");
static final XContentBuilderString JVM = new XContentBuilderString("jvm");
static final XContentBuilderString SITE = new XContentBuilderString("site");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_PluginInfo.java
|
137 |
final class ReadWriteLockView implements ReadWriteLock {
public Lock readLock() { return asReadLock(); }
public Lock writeLock() { return asWriteLock(); }
}
| 0true
|
src_main_java_jsr166e_StampedLock.java
|
1,079 |
public class UpdateHelper extends AbstractComponent {
private final IndicesService indicesService;
private final ScriptService scriptService;
@Inject
public UpdateHelper(Settings settings, IndicesService indicesService, ScriptService scriptService) {
super(settings);
this.indicesService = indicesService;
this.scriptService = scriptService;
}
/**
* Prepares an update request by converting it into an index or delete request or an update response (no action).
*/
public Result prepare(UpdateRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(request.shardId());
return prepare(request, indexShard);
}
public Result prepare(UpdateRequest request, IndexShard indexShard) {
long getDate = System.currentTimeMillis();
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME},
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
if (!getResult.isExists()) {
if (request.upsertRequest() == null && !request.docAsUpsert()) {
throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
}
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
indexRequest.index(request.index()).type(request.type()).id(request.id())
// it has to be a "create!"
.create(true)
.routing(request.routing())
.refresh(request.refresh())
.replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel());
indexRequest.operationThreaded(false);
if (request.versionType() == VersionType.EXTERNAL) {
// in external versioning mode, we want to create the new document using the given version.
indexRequest.version(request.version()).versionType(VersionType.EXTERNAL);
}
return new Result(indexRequest, Operation.UPSERT, null, null);
}
long updateVersion = getResult.getVersion();
if (request.versionType() == VersionType.EXTERNAL) {
updateVersion = request.version(); // remember, match_any is excluded by the conflict test
}
if (getResult.internalSourceRef() == null) {
// no source, we can't do nothing, through a failure...
throw new DocumentSourceMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
}
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
String operation = null;
String timestamp = null;
Long ttl = null;
Object fetchedTTL = null;
final Map<String, Object> updatedSourceAsMap;
final XContentType updateSourceContentType = sourceAndContent.v1();
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
String parent = getResult.getFields().containsKey(ParentFieldMapper.NAME) ? getResult.field(ParentFieldMapper.NAME).getValue().toString() : null;
if (request.script() == null && request.doc() != null) {
IndexRequest indexRequest = request.doc();
updatedSourceAsMap = sourceAndContent.v2();
if (indexRequest.ttl() > 0) {
ttl = indexRequest.ttl();
}
timestamp = indexRequest.timestamp();
if (indexRequest.routing() != null) {
routing = indexRequest.routing();
}
if (indexRequest.parent() != null) {
parent = indexRequest.parent();
}
XContentHelper.update(updatedSourceAsMap, indexRequest.sourceAsMap());
} else {
Map<String, Object> ctx = new HashMap<String, Object>(2);
ctx.put("_source", sourceAndContent.v2());
try {
ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptParams);
script.setNextVar("ctx", ctx);
script.run();
// we need to unwrap the ctx...
ctx = (Map<String, Object>) script.unwrap(ctx);
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to execute script", e);
}
operation = (String) ctx.get("op");
timestamp = (String) ctx.get("_timestamp");
fetchedTTL = ctx.get("_ttl");
if (fetchedTTL != null) {
if (fetchedTTL instanceof Number) {
ttl = ((Number) fetchedTTL).longValue();
} else {
ttl = TimeValue.parseTimeValue((String) fetchedTTL, null).millis();
}
}
updatedSourceAsMap = (Map<String, Object>) ctx.get("_source");
}
// apply script to update the source
// No TTL has been given in the update script so we keep previous TTL value if there is one
if (ttl == null) {
ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
if (ttl != null) {
ttl = ttl - (System.currentTimeMillis() - getDate); // It is an approximation of exact TTL value, could be improved
}
}
if (operation == null || "index".equals(operation)) {
final IndexRequest indexRequest = Requests.indexRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
.source(updatedSourceAsMap, updateSourceContentType)
.version(updateVersion).versionType(request.versionType())
.replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel())
.timestamp(timestamp).ttl(ttl)
.refresh(request.refresh());
indexRequest.operationThreaded(false);
return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType);
} else if ("delete".equals(operation)) {
DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
.version(updateVersion).versionType(request.versionType())
.replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel());
deleteRequest.operationThreaded(false);
return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);
} else if ("none".equals(operation)) {
UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false);
update.setGetResult(extractGetResult(request, getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, null));
return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
} else {
logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script);
UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false);
return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
}
}
/**
* Extracts the fields from the updated document to be returned in a update response
*/
public GetResult extractGetResult(final UpdateRequest request, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
if (request.fields() == null || request.fields().length == 0) {
return null;
}
boolean sourceRequested = false;
Map<String, GetField> fields = null;
if (request.fields() != null && request.fields().length > 0) {
SourceLookup sourceLookup = new SourceLookup();
sourceLookup.setNextSource(source);
for (String field : request.fields()) {
if (field.equals("_source")) {
sourceRequested = true;
continue;
}
Object value = sourceLookup.extractValue(field);
if (value != null) {
if (fields == null) {
fields = newHashMapWithExpectedSize(2);
}
GetField getField = fields.get(field);
if (getField == null) {
getField = new GetField(field, new ArrayList<Object>(2));
fields.put(field, getField);
}
getField.getValues().add(value);
}
}
}
// TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType)
return new GetResult(request.index(), request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields);
}
public static class Result {
private final Streamable action;
private final Operation operation;
private final Map<String, Object> updatedSourceAsMap;
private final XContentType updateSourceContentType;
public Result(Streamable action, Operation operation, Map<String, Object> updatedSourceAsMap, XContentType updateSourceContentType) {
this.action = action;
this.operation = operation;
this.updatedSourceAsMap = updatedSourceAsMap;
this.updateSourceContentType = updateSourceContentType;
}
@SuppressWarnings("unchecked")
public <T extends Streamable> T action() {
return (T) action;
}
public Operation operation() {
return operation;
}
public Map<String, Object> updatedSourceAsMap() {
return updatedSourceAsMap;
}
public XContentType updateSourceContentType() {
return updateSourceContentType;
}
}
public static enum Operation {
UPSERT,
INDEX,
DELETE,
NONE
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_update_UpdateHelper.java
|
3,195 |
static class Key {
final Object readerKey;
@Nullable
Listener listener; // optional stats listener
long sizeInBytes = -1; // optional size in bytes (we keep it here in case the values are soft references)
Key(Object readerKey) {
this.readerKey = readerKey;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
Key key = (Key) o;
if (!readerKey.equals(key.readerKey)) return false;
return true;
}
@Override
public int hashCode() {
return readerKey.hashCode();
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_IndexFieldDataCache.java
|
15 |
public class TextCommandServiceImpl implements TextCommandService, TextCommandConstants {
private final Node node;
private final TextCommandProcessor[] textCommandProcessors = new TextCommandProcessor[100];
private final HazelcastInstance hazelcast;
private final AtomicLong sets = new AtomicLong();
private final AtomicLong touches = new AtomicLong();
private final AtomicLong getHits = new AtomicLong();
private final AtomicLong getMisses = new AtomicLong();
private final AtomicLong deleteMisses = new AtomicLong();
private final AtomicLong deleteHits = new AtomicLong();
private final AtomicLong incrementHits = new AtomicLong();
private final AtomicLong incrementMisses = new AtomicLong();
private final AtomicLong decrementHits = new AtomicLong();
private final AtomicLong decrementMisses = new AtomicLong();
private final long startTime = Clock.currentTimeMillis();
private final ILogger logger;
private volatile ResponseThreadRunnable responseThreadRunnable;
private volatile boolean running = true;
public TextCommandServiceImpl(Node node) {
this.node = node;
this.hazelcast = node.hazelcastInstance;
this.logger = node.getLogger(this.getClass().getName());
textCommandProcessors[GET.getValue()] = new GetCommandProcessor(this, true);
textCommandProcessors[PARTIAL_GET.getValue()] = new GetCommandProcessor(this, false);
textCommandProcessors[SET.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[APPEND.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[PREPEND.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[ADD.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[REPLACE.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[GET_END.getValue()] = new NoOpCommandProcessor(this);
textCommandProcessors[DELETE.getValue()] = new DeleteCommandProcessor(this);
textCommandProcessors[QUIT.getValue()] = new SimpleCommandProcessor(this);
textCommandProcessors[STATS.getValue()] = new StatsCommandProcessor(this);
textCommandProcessors[UNKNOWN.getValue()] = new ErrorCommandProcessor(this);
textCommandProcessors[VERSION.getValue()] = new VersionCommandProcessor(this);
textCommandProcessors[TOUCH.getValue()] = new TouchCommandProcessor(this);
textCommandProcessors[INCREMENT.getValue()] = new IncrementCommandProcessor(this);
textCommandProcessors[DECREMENT.getValue()] = new IncrementCommandProcessor(this);
textCommandProcessors[ERROR_CLIENT.getValue()] = new ErrorCommandProcessor(this);
textCommandProcessors[ERROR_SERVER.getValue()] = new ErrorCommandProcessor(this);
textCommandProcessors[HTTP_GET.getValue()] = new HttpGetCommandProcessor(this);
textCommandProcessors[HTTP_POST.getValue()] = new HttpPostCommandProcessor(this);
textCommandProcessors[HTTP_PUT.getValue()] = new HttpPostCommandProcessor(this);
textCommandProcessors[HTTP_DELETE.getValue()] = new HttpDeleteCommandProcessor(this);
textCommandProcessors[NO_OP.getValue()] = new NoOpCommandProcessor(this);
}
@Override
public Node getNode() {
return node;
}
@Override
public byte[] toByteArray(Object value) {
Data data = node.getSerializationService().toData(value);
return data.getBuffer();
}
@Override
public Stats getStats() {
Stats stats = new Stats();
stats.uptime = (int) ((Clock.currentTimeMillis() - startTime) / 1000);
stats.cmd_get = getMisses.get() + getHits.get();
stats.cmd_set = sets.get();
stats.cmd_touch = touches.get();
stats.get_hits = getHits.get();
stats.get_misses = getMisses.get();
stats.delete_hits = deleteHits.get();
stats.delete_misses = deleteMisses.get();
stats.incr_hits = incrementHits.get();
stats.incr_misses = incrementMisses.get();
stats.decr_hits = decrementHits.get();
stats.decr_misses = decrementMisses.get();
stats.curr_connections = node.connectionManager.getCurrentClientConnections();
stats.total_connections = node.connectionManager.getAllTextConnections();
return stats;
}
@Override
public long incrementDeleteHitCount(int inc) {
return deleteHits.addAndGet(inc);
}
@Override
public long incrementDeleteMissCount() {
return deleteMisses.incrementAndGet();
}
@Override
public long incrementGetHitCount() {
return getHits.incrementAndGet();
}
@Override
public long incrementGetMissCount() {
return getMisses.incrementAndGet();
}
@Override
public long incrementSetCount() {
return sets.incrementAndGet();
}
@Override
public long incrementIncHitCount() {
return incrementHits.incrementAndGet();
}
@Override
public long incrementIncMissCount() {
return incrementMisses.incrementAndGet();
}
@Override
public long incrementDecrHitCount() {
return decrementHits.incrementAndGet();
}
@Override
public long incrementDecrMissCount() {
return decrementMisses.incrementAndGet();
}
@Override
public long incrementTouchCount() {
return touches.incrementAndGet();
}
@Override
public void processRequest(TextCommand command) {
if (responseThreadRunnable == null) {
synchronized (this) {
if (responseThreadRunnable == null) {
responseThreadRunnable = new ResponseThreadRunnable();
String threadNamePrefix = node.getThreadNamePrefix("ascii.service.response");
Thread thread = new Thread(node.threadGroup, responseThreadRunnable, threadNamePrefix);
thread.start();
}
}
}
node.nodeEngine.getExecutionService().execute("hz:text", new CommandExecutor(command));
}
@Override
public Object get(String mapName, String key) {
return hazelcast.getMap(mapName).get(key);
}
@Override
public int getAdjustedTTLSeconds(int ttl) {
if (ttl <= MONTH_SECONDS) {
return ttl;
} else {
return ttl - (int) (Clock.currentTimeMillis() / 1000);
}
}
@Override
public byte[] getByteArray(String mapName, String key) {
Object value = hazelcast.getMap(mapName).get(key);
byte[] result = null;
if (value != null) {
if (value instanceof RestValue) {
RestValue restValue = (RestValue) value;
result = restValue.getValue();
} else if (value instanceof byte[]) {
result = (byte[]) value;
} else {
result = toByteArray(value);
}
}
return result;
}
@Override
public Object put(String mapName, String key, Object value) {
return hazelcast.getMap(mapName).put(key, value);
}
@Override
public Object put(String mapName, String key, Object value, int ttlSeconds) {
return hazelcast.getMap(mapName).put(key, value, ttlSeconds, TimeUnit.SECONDS);
}
@Override
public Object putIfAbsent(String mapName, String key, Object value, int ttlSeconds) {
return hazelcast.getMap(mapName).putIfAbsent(key, value, ttlSeconds, TimeUnit.SECONDS);
}
@Override
public Object replace(String mapName, String key, Object value) {
return hazelcast.getMap(mapName).replace(key, value);
}
@Override
public void lock(String mapName, String key) throws InterruptedException {
if (!hazelcast.getMap(mapName).tryLock(key, 1, TimeUnit.MINUTES)) {
throw new RuntimeException("Memcache client could not get the lock for map:"
+ mapName + " key:" + key + " in 1 minute");
}
}
@Override
public void unlock(String mapName, String key) {
hazelcast.getMap(mapName).unlock(key);
}
@Override
public void deleteAll(String mapName) {
final IMap<Object, Object> map = hazelcast.getMap(mapName);
map.clear();
}
@Override
public Object delete(String mapName, String key) {
return hazelcast.getMap(mapName).remove(key);
}
@Override
public boolean offer(String queueName, Object value) {
return hazelcast.getQueue(queueName).offer(value);
}
@Override
public Object poll(String queueName, int seconds) {
try {
return hazelcast.getQueue(queueName).poll(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
return null;
}
}
@Override
public Object poll(String queueName) {
return hazelcast.getQueue(queueName).poll();
}
@Override
public int size(String queueName) {
return hazelcast.getQueue(queueName).size();
}
@Override
public void sendResponse(TextCommand textCommand) {
if (!textCommand.shouldReply() || textCommand.getRequestId() == -1) {
throw new RuntimeException("Shouldn't reply " + textCommand);
}
responseThreadRunnable.sendResponse(textCommand);
}
public void stop() {
final ResponseThreadRunnable rtr = responseThreadRunnable;
if (rtr != null) {
rtr.stop();
}
}
class CommandExecutor implements Runnable {
final TextCommand command;
CommandExecutor(TextCommand command) {
this.command = command;
}
@Override
public void run() {
try {
TextCommandType type = command.getType();
TextCommandProcessor textCommandProcessor = textCommandProcessors[type.getValue()];
textCommandProcessor.handle(command);
} catch (Throwable e) {
logger.warning(e);
}
}
}
private class ResponseThreadRunnable implements Runnable {
private final BlockingQueue<TextCommand> blockingQueue = new ArrayBlockingQueue<TextCommand>(200);
private final Object stopObject = new Object();
@edu.umd.cs.findbugs.annotations.SuppressWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
public void sendResponse(TextCommand textCommand) {
blockingQueue.offer(textCommand);
}
@Override
public void run() {
while (running) {
try {
TextCommand textCommand = blockingQueue.take();
if (TextCommandConstants.TextCommandType.STOP == textCommand.getType()) {
synchronized (stopObject) {
stopObject.notify();
}
} else {
SocketTextWriter socketTextWriter = textCommand.getSocketTextWriter();
socketTextWriter.enqueue(textCommand);
}
} catch (InterruptedException e) {
return;
} catch (OutOfMemoryError e) {
OutOfMemoryErrorDispatcher.onOutOfMemory(e);
throw e;
}
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
void stop() {
running = false;
synchronized (stopObject) {
try {
blockingQueue.offer(new AbstractTextCommand(TextCommandConstants.TextCommandType.STOP) {
@Override
public boolean readFrom(ByteBuffer cb) {
return true;
}
@Override
public boolean writeTo(ByteBuffer bb) {
return true;
}
});
//noinspection WaitNotInLoop
stopObject.wait(1000);
} catch (Exception ignored) {
}
}
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_TextCommandServiceImpl.java
|
249 |
service.submitToMembers(runnable, collection, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
responseLatch.countDown();
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
6,304 |
public class MockFSDirectoryService extends FsDirectoryService {
private final MockDirectoryHelper helper;
private FsDirectoryService delegateService;
@Inject
public MockFSDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) {
super(shardId, indexSettings, indexStore);
helper = new MockDirectoryHelper(shardId, indexSettings, logger);
delegateService = helper.randomDirectorService(indexStore);
}
@Override
public Directory[] build() throws IOException {
return helper.wrapAllInplace(delegateService.build());
}
@Override
protected synchronized FSDirectory newFSDirectory(File location, LockFactory lockFactory) throws IOException {
throw new UnsupportedOperationException();
}
}
| 1no label
|
src_test_java_org_elasticsearch_test_store_MockFSDirectoryService.java
|
1,689 |
public class URLBlobStore extends AbstractComponent implements BlobStore {
private final Executor executor;
private final URL path;
private final int bufferSizeInBytes;
/**
* Constructs new read-only URL-based blob store
* <p/>
* The following settings are supported
* <dl>
* <dt>buffer_size</dt>
* <dd>- size of the read buffer, defaults to 100KB</dd>
* </dl>
*
* @param settings settings
* @param executor executor for read operations
* @param path base URL
*/
public URLBlobStore(Settings settings, Executor executor, URL path) {
super(settings);
this.path = path;
this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
this.executor = executor;
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return path.toString();
}
/**
* Returns base URL
*
* @return base URL
*/
public URL path() {
return path;
}
/**
* Returns read buffer size
*
* @return read buffer size
*/
public int bufferSizeInBytes() {
return this.bufferSizeInBytes;
}
/**
* Returns executor used for read operations
*
* @return executor
*/
public Executor executor() {
return executor;
}
/**
* {@inheritDoc}
*/
@Override
public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
try {
return new URLImmutableBlobContainer(this, path, buildPath(path));
} catch (MalformedURLException ex) {
throw new BlobStoreException("malformed URL " + path, ex);
}
}
/**
* This operation is not supported by URL Blob Store
*
* @param path
*/
@Override
public void delete(BlobPath path) {
throw new UnsupportedOperationException("URL repository is read only");
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
// nothing to do here...
}
/**
* Builds URL using base URL and specified path
*
* @param path relative path
* @return Base URL + path
* @throws MalformedURLException
*/
private URL buildPath(BlobPath path) throws MalformedURLException {
String[] paths = path.toArray();
if (paths.length == 0) {
return path();
}
URL blobPath = new URL(this.path, paths[0] + "/");
if (paths.length > 1) {
for (int i = 1; i < paths.length; i++) {
blobPath = new URL(blobPath, paths[i] + "/");
}
}
return blobPath;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_blobstore_url_URLBlobStore.java
|
114 |
static final class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
| 0true
|
src_main_java_jsr166e_ForkJoinPool.java
|
16 |
public static class TransactionTimeSpanPruneStrategy extends AbstractPruneStrategy
{
private final int timeToKeep;
private final TimeUnit unit;
public TransactionTimeSpanPruneStrategy( FileSystemAbstraction fileSystem, int timeToKeep, TimeUnit unit )
{
super( fileSystem );
this.timeToKeep = timeToKeep;
this.unit = unit;
}
@Override
protected Threshold newThreshold()
{
return new Threshold()
{
private long lowerLimit = System.currentTimeMillis() - unit.toMillis( timeToKeep );
@Override
public boolean reached( File file, long version, LogLoader source )
{
try
{
return source.getFirstStartRecordTimestamp( version ) < lowerLimit;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
}
}
| 1no label
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogPruneStrategies.java
|
274 |
public interface JMSEmailServiceProducer extends EmailServiceProducer {
/**
* @return the emailServiceTemplate
*/
public JmsTemplate getEmailServiceTemplate();
/**
* @param emailServiceTemplate the emailServiceTemplate to set
*/
public void setEmailServiceTemplate(JmsTemplate emailServiceTemplate);
/**
* @return the emailServiceDestination
*/
public Destination getEmailServiceDestination();
/**
* @param emailServiceDestination the emailServiceDestination to set
*/
public void setEmailServiceDestination(Destination emailServiceDestination);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_jms_JMSEmailServiceProducer.java
|
2,367 |
public class ReducerTask<Key, Chunk>
implements Runnable {
private final AtomicBoolean cancelled = new AtomicBoolean();
private final JobSupervisor supervisor;
private final Queue<ReducerChunk<Key, Chunk>> reducerQueue;
private final String name;
private final String jobId;
private AtomicBoolean active = new AtomicBoolean();
public ReducerTask(String name, String jobId, JobSupervisor supervisor) {
this.name = name;
this.jobId = jobId;
this.supervisor = supervisor;
this.reducerQueue = new ConcurrentLinkedQueue<ReducerChunk<Key, Chunk>>();
}
public String getName() {
return name;
}
public String getJobId() {
return jobId;
}
public void cancel() {
cancelled.set(true);
}
public void processChunk(Map<Key, Chunk> chunk) {
processChunk(-1, null, chunk);
}
public void processChunk(int partitionId, Address sender, Map<Key, Chunk> chunk) {
if (cancelled.get()) {
return;
}
reducerQueue.offer(new ReducerChunk<Key, Chunk>(chunk, partitionId, sender));
if (active.compareAndSet(false, true)) {
MapReduceService mapReduceService = supervisor.getMapReduceService();
ExecutorService es = mapReduceService.getExecutorService(name);
es.submit(this);
}
}
@Override
public void run() {
try {
ReducerChunk<Key, Chunk> reducerChunk;
while ((reducerChunk = reducerQueue.poll()) != null) {
if (cancelled.get()) {
return;
}
reduceChunk(reducerChunk.chunk);
processProcessedState(reducerChunk);
}
} catch (Throwable t) {
notifyRemoteException(supervisor, t);
if (t instanceof Error) {
ExceptionUtil.sneakyThrow(t);
}
} finally {
active.compareAndSet(true, false);
}
}
private void reduceChunk(Map<Key, Chunk> chunk) {
for (Map.Entry<Key, Chunk> entry : chunk.entrySet()) {
Reducer reducer = supervisor.getReducerByKey(entry.getKey());
if (reducer != null) {
Chunk chunkValue = entry.getValue();
if (chunkValue instanceof List) {
for (Object value : (List) chunkValue) {
reducer.reduce(value);
}
} else {
reducer.reduce(chunkValue);
}
}
}
}
private void processProcessedState(ReducerChunk<Key, Chunk> reducerChunk) {
// If partitionId is set this was the last chunk for this partition
if (reducerChunk.partitionId != -1) {
MapReduceService mapReduceService = supervisor.getMapReduceService();
ReducingFinishedNotification notification = new ReducingFinishedNotification(mapReduceService.getLocalAddress(), name,
jobId, reducerChunk.partitionId);
mapReduceService.sendNotification(reducerChunk.sender, notification);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_ReducerTask.java
|
532 |
@Deprecated
public class GatewaySnapshotRequest extends BroadcastOperationRequest<GatewaySnapshotRequest> {
GatewaySnapshotRequest() {
}
/**
* Constructs a new gateway snapshot against one or more indices. No indices means the gateway snapshot
* will be executed against all indices.
*/
public GatewaySnapshotRequest(String... indices) {
this.indices = indices;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_gateway_snapshot_GatewaySnapshotRequest.java
|
1,505 |
@SuppressWarnings("unchecked")
@PrivateApi
public final class HazelcastInstanceImpl
implements HazelcastInstance {
public final Node node;
final ILogger logger;
final String name;
final ManagementService managementService;
final LifecycleServiceImpl lifecycleService;
final ManagedContext managedContext;
final ThreadMonitoringService threadMonitoringService;
final ThreadGroup threadGroup;
final ConcurrentMap<String, Object> userContext = new ConcurrentHashMap<String, Object>();
HazelcastInstanceImpl(String name, Config config, NodeContext nodeContext)
throws Exception {
this.name = name;
this.threadGroup = new ThreadGroup(name);
threadMonitoringService = new ThreadMonitoringService(threadGroup);
lifecycleService = new LifecycleServiceImpl(this);
ManagedContext configuredManagedContext = config.getManagedContext();
managedContext = new HazelcastManagedContext(this, configuredManagedContext);
//we are going to copy the user-context map of the Config so that each HazelcastInstance will get its own
//user-context map instance instead of having a shared map instance. So changes made to the user-context map
//in one HazelcastInstance will not reflect on other the user-context of other HazelcastInstances.
userContext.putAll(config.getUserContext());
node = new Node(this, config, nodeContext);
logger = node.getLogger(getClass().getName());
lifecycleService.fireLifecycleEvent(STARTING);
node.start();
if (!node.isActive()) {
node.connectionManager.shutdown();
throw new IllegalStateException("Node failed to start!");
}
managementService = new ManagementService(this);
if (configuredManagedContext != null) {
if (configuredManagedContext instanceof HazelcastInstanceAware) {
((HazelcastInstanceAware) configuredManagedContext).setHazelcastInstance(this);
}
}
initHealthMonitor();
}
private void initHealthMonitor() {
String healthMonitorLevelString = node.getGroupProperties().HEALTH_MONITORING_LEVEL.getString();
HealthMonitorLevel healthLevel = HealthMonitorLevel.valueOf(healthMonitorLevelString);
if (healthLevel != HealthMonitorLevel.OFF) {
logger.finest("Starting health monitor");
int delaySeconds = node.getGroupProperties().HEALTH_MONITORING_DELAY_SECONDS.getInteger();
new HealthMonitor(this, healthLevel, delaySeconds).start();
}
}
public ManagementService getManagementService() {
return managementService;
}
@Override
public String getName() {
return name;
}
@Override
public <K, V> IMap<K, V> getMap(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a map instance with a null name is not allowed!");
}
return getDistributedObject(MapService.SERVICE_NAME, name);
}
@Override
public <E> IQueue<E> getQueue(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a queue instance with a null name is not allowed!");
}
return getDistributedObject(QueueService.SERVICE_NAME, name);
}
@Override
public <E> ITopic<E> getTopic(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a topic instance with a null name is not allowed!");
}
return getDistributedObject(TopicService.SERVICE_NAME, name);
}
@Override
public <E> ISet<E> getSet(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a set instance with a null name is not allowed!");
}
return getDistributedObject(SetService.SERVICE_NAME, name);
}
@Override
public <E> IList<E> getList(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a list instance with a null name is not allowed!");
}
return getDistributedObject(ListService.SERVICE_NAME, name);
}
@Override
public <K, V> MultiMap<K, V> getMultiMap(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a multi-map instance with a null name is not allowed!");
}
return getDistributedObject(MultiMapService.SERVICE_NAME, name);
}
@Override
public JobTracker getJobTracker(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a job tracker instance with a null name is not allowed!");
}
return getDistributedObject(MapReduceService.SERVICE_NAME, name);
}
@Deprecated
public ILock getLock(Object key) {
//this method will be deleted in the near future.
if (key == null) {
throw new NullPointerException("Retrieving a lock instance with a null key is not allowed!");
}
String name = LockProxy.convertToStringKey(key, node.getSerializationService());
return getLock(name);
}
@Override
public ILock getLock(String key) {
if (key == null) {
throw new NullPointerException("Retrieving a lock instance with a null key is not allowed!");
}
return getDistributedObject(LockService.SERVICE_NAME, key);
}
@Override
public <T> T executeTransaction(TransactionalTask<T> task)
throws TransactionException {
return executeTransaction(TransactionOptions.getDefault(), task);
}
@Override
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task)
throws TransactionException {
TransactionManagerService transactionManagerService = node.nodeEngine.getTransactionManagerService();
return transactionManagerService.executeTransaction(options, task);
}
@Override
public TransactionContext newTransactionContext() {
return newTransactionContext(TransactionOptions.getDefault());
}
@Override
public TransactionContext newTransactionContext(TransactionOptions options) {
TransactionManagerService transactionManagerService = node.nodeEngine.getTransactionManagerService();
return transactionManagerService.newTransactionContext(options);
}
@Override
public IExecutorService getExecutorService(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an executor instance with a null name is not allowed!");
}
return getDistributedObject(DistributedExecutorService.SERVICE_NAME, name);
}
@Override
public IdGenerator getIdGenerator(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an id-generator instance with a null name is not allowed!");
}
return getDistributedObject(IdGeneratorService.SERVICE_NAME, name);
}
@Override
public IAtomicLong getAtomicLong(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an atomic-long instance with a null name is not allowed!");
}
return getDistributedObject(AtomicLongService.SERVICE_NAME, name);
}
@Override
public <E> IAtomicReference<E> getAtomicReference(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an atomic-reference instance with a null name is not allowed!");
}
return getDistributedObject(AtomicReferenceService.SERVICE_NAME, name);
}
@Override
public ICountDownLatch getCountDownLatch(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving a countdown-latch instance with a null name is not allowed!");
}
return getDistributedObject(CountDownLatchService.SERVICE_NAME, name);
}
@Override
public ISemaphore getSemaphore(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving a semaphore instance with a null name is not allowed!");
}
return getDistributedObject(SemaphoreService.SERVICE_NAME, name);
}
@Override
public <K, V> ReplicatedMap<K, V> getReplicatedMap(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving a replicated map instance with a null name is not allowed!");
}
return getDistributedObject(ReplicatedMapService.SERVICE_NAME, name);
}
@Override
public Cluster getCluster() {
return node.clusterService.getClusterProxy();
}
@Override
public Member getLocalEndpoint() {
return node.clusterService.getLocalMember();
}
@Override
public Collection<DistributedObject> getDistributedObjects() {
ProxyService proxyService = node.nodeEngine.getProxyService();
return proxyService.getAllDistributedObjects();
}
@Override
public Config getConfig() {
return node.getConfig();
}
@Override
public ConcurrentMap<String, Object> getUserContext() {
return userContext;
}
@Override
public PartitionService getPartitionService() {
return node.partitionService.getPartitionServiceProxy();
}
@Override
public ClientService getClientService() {
return node.clientEngine.getClientService();
}
@Override
public LoggingService getLoggingService() {
return node.loggingService;
}
@Override
public LifecycleServiceImpl getLifecycleService() {
return lifecycleService;
}
@Override
public void shutdown() {
getLifecycleService().shutdown();
}
@Override
@Deprecated
public <T extends DistributedObject> T getDistributedObject(String serviceName, Object id) {
if (id instanceof String) {
return (T) node.nodeEngine.getProxyService().getDistributedObject(serviceName, (String) id);
}
throw new IllegalArgumentException("'id' must be type of String!");
}
@Override
public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) {
ProxyService proxyService = node.nodeEngine.getProxyService();
return (T) proxyService.getDistributedObject(serviceName, name);
}
@Override
public String addDistributedObjectListener(DistributedObjectListener distributedObjectListener) {
final ProxyService proxyService = node.nodeEngine.getProxyService();
return proxyService.addProxyListener(distributedObjectListener);
}
@Override
public boolean removeDistributedObjectListener(String registrationId) {
final ProxyService proxyService = node.nodeEngine.getProxyService();
return proxyService.removeProxyListener(registrationId);
}
public ThreadGroup getThreadGroup() {
return threadGroup;
}
public SerializationService getSerializationService() {
return node.getSerializationService();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || !(o instanceof HazelcastInstance)) {
return false;
}
HazelcastInstance that = (HazelcastInstance) o;
return !(name != null ? !name.equals(that.getName()) : that.getName() != null);
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("HazelcastInstance");
sb.append("{name='").append(name).append('\'');
sb.append(", node=").append(node.getThisAddress());
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_instance_HazelcastInstanceImpl.java
|
5,368 |
public class InternalStats extends MetricsAggregation.MultiValue implements Stats {
public final static Type TYPE = new Type("stats");
public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalStats readResult(StreamInput in) throws IOException {
InternalStats result = new InternalStats();
result.readFrom(in);
return result;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
enum Metrics {
count, sum, min, max, avg;
public static Metrics resolve(String name) {
return Metrics.valueOf(name);
}
}
protected long count;
protected double min;
protected double max;
protected double sum;
protected InternalStats() {} // for serialization
public InternalStats(String name, long count, double sum, double min, double max) {
super(name);
this.count = count;
this.sum = sum;
this.min = min;
this.max = max;
}
@Override
public long getCount() {
return count;
}
@Override
public double getMin() {
return min;
}
@Override
public double getMax() {
return max;
}
@Override
public double getAvg() {
return sum / count;
}
@Override
public double getSum() {
return sum;
}
@Override
public Type type() {
return TYPE;
}
@Override
public double value(String name) {
Metrics metrics = Metrics.valueOf(name);
switch (metrics) {
case min: return this.min;
case max: return this.max;
case avg: return this.getAvg();
case count: return this.count;
case sum: return this.sum;
default:
throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
}
}
@Override
public InternalStats reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return (InternalStats) aggregations.get(0);
}
InternalStats reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
if (((InternalStats) aggregation).count != 0) {
reduced = (InternalStats) aggregation;
}
} else {
if (((InternalStats) aggregation).count != 0) {
reduced.count += ((InternalStats) aggregation).count;
reduced.min = Math.min(reduced.min, ((InternalStats) aggregation).min);
reduced.max = Math.max(reduced.max, ((InternalStats) aggregation).max);
reduced.sum += ((InternalStats) aggregation).sum;
mergeOtherStats(reduced, aggregation);
}
}
}
if (reduced != null) {
return reduced;
}
return (InternalStats) aggregations.get(0);
}
protected void mergeOtherStats(InternalStats to, InternalAggregation from) {
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
valueFormatter = ValueFormatterStreams.readOptional(in);
count = in.readVLong();
min = in.readDouble();
max = in.readDouble();
sum = in.readDouble();
readOtherStatsFrom(in);
}
public void readOtherStatsFrom(StreamInput in) throws IOException {
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(valueFormatter, out);
out.writeVLong(count);
out.writeDouble(min);
out.writeDouble(max);
out.writeDouble(sum);
writeOtherStatsTo(out);
}
protected void writeOtherStatsTo(StreamOutput out) throws IOException {
}
static class Fields {
public static final XContentBuilderString COUNT = new XContentBuilderString("count");
public static final XContentBuilderString MIN = new XContentBuilderString("min");
public static final XContentBuilderString MIN_AS_STRING = new XContentBuilderString("min_as_string");
public static final XContentBuilderString MAX = new XContentBuilderString("max");
public static final XContentBuilderString MAX_AS_STRING = new XContentBuilderString("max_as_string");
public static final XContentBuilderString AVG = new XContentBuilderString("avg");
public static final XContentBuilderString AVG_AS_STRING = new XContentBuilderString("avg_as_string");
public static final XContentBuilderString SUM = new XContentBuilderString("sum");
public static final XContentBuilderString SUM_AS_STRING = new XContentBuilderString("sum_as_string");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(Fields.COUNT, count);
builder.field(Fields.MIN, count != 0 ? min : null);
builder.field(Fields.MAX, count != 0 ? max : null);
builder.field(Fields.AVG, count != 0 ? getAvg() : null);
builder.field(Fields.SUM, count != 0 ? sum : null);
if (count != 0 && valueFormatter != null) {
builder.field(Fields.MIN_AS_STRING, valueFormatter.format(min));
builder.field(Fields.MAX_AS_STRING, valueFormatter.format(max));
builder.field(Fields.AVG_AS_STRING, valueFormatter.format(getAvg()));
builder.field(Fields.SUM_AS_STRING, valueFormatter.format(sum));
}
otherStatsToXCotent(builder, params);
builder.endObject();
return builder;
}
protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params params) throws IOException {
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_stats_InternalStats.java
|
368 |
public class GetRepositoriesAction extends ClusterAction<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction();
public static final String NAME = "cluster/repository/get";
private GetRepositoriesAction() {
super(NAME);
}
@Override
public GetRepositoriesResponse newResponse() {
return new GetRepositoriesResponse();
}
@Override
public GetRepositoriesRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new GetRepositoriesRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_get_GetRepositoriesAction.java
|
39 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
648 |
indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() {
@Override
public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) {
listener.onResponse(new DeleteIndexTemplateResponse(response.acknowledged()));
}
@Override
public void onFailure(Throwable t) {
logger.debug("failed to delete templates [{}]", t, request.name());
listener.onFailure(t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_delete_TransportDeleteIndexTemplateAction.java
|
2,628 |
public class ZenPingService extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
private volatile ImmutableList<? extends ZenPing> zenPings = ImmutableList.of();
// here for backward comp. with discovery plugins
public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService,
@Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
this(settings, threadPool, transportService, clusterName, networkService, Version.CURRENT, unicastHostsProviders);
}
@Inject
public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService,
Version version, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
super(settings);
ImmutableList.Builder<ZenPing> zenPingsBuilder = ImmutableList.builder();
if (componentSettings.getAsBoolean("multicast.enabled", true)) {
zenPingsBuilder.add(new MulticastZenPing(settings, threadPool, transportService, clusterName, networkService, version));
}
// always add the unicast hosts, so it will be able to receive unicast requests even when working in multicast
zenPingsBuilder.add(new UnicastZenPing(settings, threadPool, transportService, clusterName, version, unicastHostsProviders));
this.zenPings = zenPingsBuilder.build();
}
public ImmutableList<? extends ZenPing> zenPings() {
return this.zenPings;
}
public void zenPings(ImmutableList<? extends ZenPing> pings) {
this.zenPings = pings;
if (lifecycle.started()) {
for (ZenPing zenPing : zenPings) {
zenPing.start();
}
} else if (lifecycle.stopped()) {
for (ZenPing zenPing : zenPings) {
zenPing.stop();
}
}
}
@Override
public void setNodesProvider(DiscoveryNodesProvider nodesProvider) {
if (lifecycle.started()) {
throw new ElasticsearchIllegalStateException("Can't set nodes provider when started");
}
for (ZenPing zenPing : zenPings) {
zenPing.setNodesProvider(nodesProvider);
}
}
@Override
protected void doStart() throws ElasticsearchException {
for (ZenPing zenPing : zenPings) {
zenPing.start();
}
}
@Override
protected void doStop() throws ElasticsearchException {
for (ZenPing zenPing : zenPings) {
zenPing.stop();
}
}
@Override
protected void doClose() throws ElasticsearchException {
for (ZenPing zenPing : zenPings) {
zenPing.close();
}
}
public PingResponse[] pingAndWait(TimeValue timeout) {
final AtomicReference<PingResponse[]> response = new AtomicReference<PingResponse[]>();
final CountDownLatch latch = new CountDownLatch(1);
ping(new PingListener() {
@Override
public void onPing(PingResponse[] pings) {
response.set(pings);
latch.countDown();
}
}, timeout);
try {
latch.await();
return response.get();
} catch (InterruptedException e) {
logger.trace("pingAndWait interrupted");
return null;
}
}
@Override
public void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException {
ImmutableList<? extends ZenPing> zenPings = this.zenPings;
CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings);
for (ZenPing zenPing : zenPings) {
try {
zenPing.ping(compoundPingListener, timeout);
} catch (EsRejectedExecutionException ex) {
logger.debug("Ping execution rejected", ex);
compoundPingListener.onPing(null);
}
}
}
private static class CompoundPingListener implements PingListener {
private final PingListener listener;
private final AtomicInteger counter;
private ConcurrentMap<DiscoveryNode, PingResponse> responses = ConcurrentCollections.newConcurrentMap();
private CompoundPingListener(PingListener listener, ImmutableList<? extends ZenPing> zenPings) {
this.listener = listener;
this.counter = new AtomicInteger(zenPings.size());
}
@Override
public void onPing(PingResponse[] pings) {
if (pings != null) {
for (PingResponse pingResponse : pings) {
responses.put(pingResponse.target(), pingResponse);
}
}
if (counter.decrementAndGet() == 0) {
listener.onPing(responses.values().toArray(new PingResponse[responses.size()]));
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_ping_ZenPingService.java
|
371 |
public class GetRepositoriesResponse extends ActionResponse implements Iterable<RepositoryMetaData> {
private ImmutableList<RepositoryMetaData> repositories = ImmutableList.of();
GetRepositoriesResponse() {
}
GetRepositoriesResponse(ImmutableList<RepositoryMetaData> repositories) {
this.repositories = repositories;
}
/**
* List of repositories to return
*
* @return list or repositories
*/
public ImmutableList<RepositoryMetaData> repositories() {
return repositories;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
for (int j = 0; j < size; j++) {
repositoryListBuilder.add(new RepositoryMetaData(
in.readString(),
in.readString(),
ImmutableSettings.readSettingsFromStream(in))
);
}
repositories = repositoryListBuilder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(repositories.size());
for (RepositoryMetaData repository : repositories) {
out.writeString(repository.name());
out.writeString(repository.type());
ImmutableSettings.writeSettingsToStream(repository.settings(), out);
}
}
/**
* Iterator over the repositories data
*
* @return iterator over the repositories data
*/
@Override
public Iterator<RepositoryMetaData> iterator() {
return repositories.iterator();
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_get_GetRepositoriesResponse.java
|
134 |
public class StampedLock implements java.io.Serializable {
/*
* Algorithmic notes:
*
* The design employs elements of Sequence locks
* (as used in linux kernels; see Lameter's
* http://www.lameter.com/gelato2005.pdf
* and elsewhere; see
* Boehm's http://www.hpl.hp.com/techreports/2012/HPL-2012-68.html)
* and Ordered RW locks (see Shirako et al
* http://dl.acm.org/citation.cfm?id=2312015)
*
* Conceptually, the primary state of the lock includes a sequence
* number that is odd when write-locked and even otherwise.
* However, this is offset by a reader count that is non-zero when
* read-locked. The read count is ignored when validating
* "optimistic" seqlock-reader-style stamps. Because we must use
* a small finite number of bits (currently 7) for readers, a
* supplementary reader overflow word is used when the number of
* readers exceeds the count field. We do this by treating the max
* reader count value (RBITS) as a spinlock protecting overflow
* updates.
*
* Waiters use a modified form of CLH lock used in
* AbstractQueuedSynchronizer (see its internal documentation for
* a fuller account), where each node is tagged (field mode) as
* either a reader or writer. Sets of waiting readers are grouped
* (linked) under a common node (field cowait) so act as a single
* node with respect to most CLH mechanics. By virtue of the
* queue structure, wait nodes need not actually carry sequence
* numbers; we know each is greater than its predecessor. This
* simplifies the scheduling policy to a mainly-FIFO scheme that
* incorporates elements of Phase-Fair locks (see Brandenburg &
* Anderson, especially http://www.cs.unc.edu/~bbb/diss/). In
* particular, we use the phase-fair anti-barging rule: If an
* incoming reader arrives while read lock is held but there is a
* queued writer, this incoming reader is queued. (This rule is
* responsible for some of the complexity of method acquireRead,
* but without it, the lock becomes highly unfair.) Method release
* does not (and sometimes cannot) itself wake up cowaiters. This
* is done by the primary thread, but helped by any other threads
* with nothing better to do in methods acquireRead and
* acquireWrite.
*
* These rules apply to threads actually queued. All tryLock forms
* opportunistically try to acquire locks regardless of preference
* rules, and so may "barge" their way in. Randomized spinning is
* used in the acquire methods to reduce (increasingly expensive)
* context switching while also avoiding sustained memory
* thrashing among many threads. We limit spins to the head of
* queue. A thread spin-waits up to SPINS times (where each
* iteration decreases spin count with 50% probability) before
* blocking. If, upon wakening it fails to obtain lock, and is
* still (or becomes) the first waiting thread (which indicates
* that some other thread barged and obtained lock), it escalates
* spins (up to MAX_HEAD_SPINS) to reduce the likelihood of
* continually losing to barging threads.
*
* Nearly all of these mechanics are carried out in methods
* acquireWrite and acquireRead, that, as typical of such code,
* sprawl out because actions and retries rely on consistent sets
* of locally cached reads.
*
* As noted in Boehm's paper (above), sequence validation (mainly
* method validate()) requires stricter ordering rules than apply
* to normal volatile reads (of "state"). In the absence of (but
* continual hope for) explicit JVM support of intrinsics with
* double-sided reordering prohibition, or corresponding fence
* intrinsics, we for now uncomfortably rely on the fact that the
* Unsafe.getXVolatile intrinsic must have this property
* (syntactic volatile reads do not) for internal purposes anyway,
* even though it is not documented.
*
* The memory layout keeps lock state and queue pointers together
* (normally on the same cache line). This usually works well for
* read-mostly loads. In most other cases, the natural tendency of
* adaptive-spin CLH locks to reduce memory contention lessens
* motivation to further spread out contended locations, but might
* be subject to future improvements.
*/
private static final long serialVersionUID = -6001602636862214147L;
/** Number of processors, for spin control */
private static final int NCPU = Runtime.getRuntime().availableProcessors();
/** Maximum number of retries before enqueuing on acquisition */
private static final int SPINS = (NCPU > 1) ? 1 << 6 : 0;
/** Maximum number of retries before blocking at head on acquisition */
private static final int HEAD_SPINS = (NCPU > 1) ? 1 << 10 : 0;
/** Maximum number of retries before re-blocking */
private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 16 : 0;
/** The period for yielding when waiting for overflow spinlock */
private static final int OVERFLOW_YIELD_RATE = 7; // must be power 2 - 1
/** The number of bits to use for reader count before overflowing */
private static final int LG_READERS = 7;
// Values for lock state and stamp operations
private static final long RUNIT = 1L;
private static final long WBIT = 1L << LG_READERS;
private static final long RBITS = WBIT - 1L;
private static final long RFULL = RBITS - 1L;
private static final long ABITS = RBITS | WBIT;
private static final long SBITS = ~RBITS; // note overlap with ABITS
// Initial value for lock state; avoid failure value zero
private static final long ORIGIN = WBIT << 1;
// Special value from cancelled acquire methods so caller can throw IE
private static final long INTERRUPTED = 1L;
// Values for node status; order matters
private static final int WAITING = -1;
private static final int CANCELLED = 1;
// Modes for nodes (int not boolean to allow arithmetic)
private static final int RMODE = 0;
private static final int WMODE = 1;
/** Wait nodes */
static final class WNode {
volatile WNode prev;
volatile WNode next;
volatile WNode cowait; // list of linked readers
volatile Thread thread; // non-null while possibly parked
volatile int status; // 0, WAITING, or CANCELLED
final int mode; // RMODE or WMODE
WNode(int m, WNode p) { mode = m; prev = p; }
}
/** Head of CLH queue */
private transient volatile WNode whead;
/** Tail (last) of CLH queue */
private transient volatile WNode wtail;
// views
transient ReadLockView readLockView;
transient WriteLockView writeLockView;
transient ReadWriteLockView readWriteLockView;
/** Lock sequence/state */
private transient volatile long state;
/** extra reader count when state read count saturated */
private transient int readerOverflow;
/**
* Creates a new lock, initially in unlocked state.
*/
public StampedLock() {
state = ORIGIN;
}
/**
* Exclusively acquires the lock, blocking if necessary
* until available.
*
* @return a stamp that can be used to unlock or convert mode
*/
public long writeLock() {
long s, next; // bypass acquireWrite in fully unlocked case only
return ((((s = state) & ABITS) == 0L &&
U.compareAndSwapLong(this, STATE, s, next = s + WBIT)) ?
next : acquireWrite(false, 0L));
}
/**
* Exclusively acquires the lock if it is immediately available.
*
* @return a stamp that can be used to unlock or convert mode,
* or zero if the lock is not available
*/
public long tryWriteLock() {
long s, next;
return ((((s = state) & ABITS) == 0L &&
U.compareAndSwapLong(this, STATE, s, next = s + WBIT)) ?
next : 0L);
}
/**
* Exclusively acquires the lock if it is available within the
* given time and the current thread has not been interrupted.
* Behavior under timeout and interruption matches that specified
* for method {@link Lock#tryLock(long,TimeUnit)}.
*
* @param time the maximum time to wait for the lock
* @param unit the time unit of the {@code time} argument
* @return a stamp that can be used to unlock or convert mode,
* or zero if the lock is not available
* @throws InterruptedException if the current thread is interrupted
* before acquiring the lock
*/
public long tryWriteLock(long time, TimeUnit unit)
throws InterruptedException {
long nanos = unit.toNanos(time);
if (!Thread.interrupted()) {
long next, deadline;
if ((next = tryWriteLock()) != 0L)
return next;
if (nanos <= 0L)
return 0L;
if ((deadline = System.nanoTime() + nanos) == 0L)
deadline = 1L;
if ((next = acquireWrite(true, deadline)) != INTERRUPTED)
return next;
}
throw new InterruptedException();
}
/**
* Exclusively acquires the lock, blocking if necessary
* until available or the current thread is interrupted.
* Behavior under interruption matches that specified
* for method {@link Lock#lockInterruptibly()}.
*
* @return a stamp that can be used to unlock or convert mode
* @throws InterruptedException if the current thread is interrupted
* before acquiring the lock
*/
public long writeLockInterruptibly() throws InterruptedException {
long next;
if (!Thread.interrupted() &&
(next = acquireWrite(true, 0L)) != INTERRUPTED)
return next;
throw new InterruptedException();
}
/**
* Non-exclusively acquires the lock, blocking if necessary
* until available.
*
* @return a stamp that can be used to unlock or convert mode
*/
public long readLock() {
long s = state, next; // bypass acquireRead on common uncontended case
return ((whead == wtail && (s & ABITS) < RFULL &&
U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) ?
next : acquireRead(false, 0L));
}
/**
* Non-exclusively acquires the lock if it is immediately available.
*
* @return a stamp that can be used to unlock or convert mode,
* or zero if the lock is not available
*/
public long tryReadLock() {
for (;;) {
long s, m, next;
if ((m = (s = state) & ABITS) == WBIT)
return 0L;
else if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT))
return next;
}
else if ((next = tryIncReaderOverflow(s)) != 0L)
return next;
}
}
/**
* Non-exclusively acquires the lock if it is available within the
* given time and the current thread has not been interrupted.
* Behavior under timeout and interruption matches that specified
* for method {@link Lock#tryLock(long,TimeUnit)}.
*
* @param time the maximum time to wait for the lock
* @param unit the time unit of the {@code time} argument
* @return a stamp that can be used to unlock or convert mode,
* or zero if the lock is not available
* @throws InterruptedException if the current thread is interrupted
* before acquiring the lock
*/
public long tryReadLock(long time, TimeUnit unit)
throws InterruptedException {
long s, m, next, deadline;
long nanos = unit.toNanos(time);
if (!Thread.interrupted()) {
if ((m = (s = state) & ABITS) != WBIT) {
if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT))
return next;
}
else if ((next = tryIncReaderOverflow(s)) != 0L)
return next;
}
if (nanos <= 0L)
return 0L;
if ((deadline = System.nanoTime() + nanos) == 0L)
deadline = 1L;
if ((next = acquireRead(true, deadline)) != INTERRUPTED)
return next;
}
throw new InterruptedException();
}
/**
* Non-exclusively acquires the lock, blocking if necessary
* until available or the current thread is interrupted.
* Behavior under interruption matches that specified
* for method {@link Lock#lockInterruptibly()}.
*
* @return a stamp that can be used to unlock or convert mode
* @throws InterruptedException if the current thread is interrupted
* before acquiring the lock
*/
public long readLockInterruptibly() throws InterruptedException {
long next;
if (!Thread.interrupted() &&
(next = acquireRead(true, 0L)) != INTERRUPTED)
return next;
throw new InterruptedException();
}
/**
* Returns a stamp that can later be validated, or zero
* if exclusively locked.
*
* @return a stamp, or zero if exclusively locked
*/
public long tryOptimisticRead() {
long s;
return (((s = state) & WBIT) == 0L) ? (s & SBITS) : 0L;
}
/**
* Returns true if the lock has not been exclusively acquired
* since issuance of the given stamp. Always returns false if the
* stamp is zero. Always returns true if the stamp represents a
* currently held lock. Invoking this method with a value not
* obtained from {@link #tryOptimisticRead} or a locking method
* for this lock has no defined effect or result.
*
* @param stamp a stamp
* @return {@code true} if the lock has not been exclusively acquired
* since issuance of the given stamp; else false
*/
public boolean validate(long stamp) {
// See above about current use of getLongVolatile here
return (stamp & SBITS) == (U.getLongVolatile(this, STATE) & SBITS);
}
/**
* If the lock state matches the given stamp, releases the
* exclusive lock.
*
* @param stamp a stamp returned by a write-lock operation
* @throws IllegalMonitorStateException if the stamp does
* not match the current state of this lock
*/
public void unlockWrite(long stamp) {
WNode h;
if (state != stamp || (stamp & WBIT) == 0L)
throw new IllegalMonitorStateException();
state = (stamp += WBIT) == 0L ? ORIGIN : stamp;
if ((h = whead) != null && h.status != 0)
release(h);
}
/**
* If the lock state matches the given stamp, releases the
* non-exclusive lock.
*
* @param stamp a stamp returned by a read-lock operation
* @throws IllegalMonitorStateException if the stamp does
* not match the current state of this lock
*/
public void unlockRead(long stamp) {
long s, m; WNode h;
for (;;) {
if (((s = state) & SBITS) != (stamp & SBITS) ||
(stamp & ABITS) == 0L || (m = s & ABITS) == 0L || m == WBIT)
throw new IllegalMonitorStateException();
if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
if (m == RUNIT && (h = whead) != null && h.status != 0)
release(h);
break;
}
}
else if (tryDecReaderOverflow(s) != 0L)
break;
}
}
/**
* If the lock state matches the given stamp, releases the
* corresponding mode of the lock.
*
* @param stamp a stamp returned by a lock operation
* @throws IllegalMonitorStateException if the stamp does
* not match the current state of this lock
*/
public void unlock(long stamp) {
long a = stamp & ABITS, m, s; WNode h;
while (((s = state) & SBITS) == (stamp & SBITS)) {
if ((m = s & ABITS) == 0L)
break;
else if (m == WBIT) {
if (a != m)
break;
state = (s += WBIT) == 0L ? ORIGIN : s;
if ((h = whead) != null && h.status != 0)
release(h);
return;
}
else if (a == 0L || a >= WBIT)
break;
else if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
if (m == RUNIT && (h = whead) != null && h.status != 0)
release(h);
return;
}
}
else if (tryDecReaderOverflow(s) != 0L)
return;
}
throw new IllegalMonitorStateException();
}
/**
* If the lock state matches the given stamp, performs one of
* the following actions. If the stamp represents holding a write
* lock, returns it. Or, if a read lock, if the write lock is
* available, releases the read lock and returns a write stamp.
* Or, if an optimistic read, returns a write stamp only if
* immediately available. This method returns zero in all other
* cases.
*
* @param stamp a stamp
* @return a valid write stamp, or zero on failure
*/
public long tryConvertToWriteLock(long stamp) {
long a = stamp & ABITS, m, s, next;
while (((s = state) & SBITS) == (stamp & SBITS)) {
if ((m = s & ABITS) == 0L) {
if (a != 0L)
break;
if (U.compareAndSwapLong(this, STATE, s, next = s + WBIT))
return next;
}
else if (m == WBIT) {
if (a != m)
break;
return stamp;
}
else if (m == RUNIT && a != 0L) {
if (U.compareAndSwapLong(this, STATE, s,
next = s - RUNIT + WBIT))
return next;
}
else
break;
}
return 0L;
}
/**
* If the lock state matches the given stamp, performs one of
* the following actions. If the stamp represents holding a write
* lock, releases it and obtains a read lock. Or, if a read lock,
* returns it. Or, if an optimistic read, acquires a read lock and
* returns a read stamp only if immediately available. This method
* returns zero in all other cases.
*
* @param stamp a stamp
* @return a valid read stamp, or zero on failure
*/
public long tryConvertToReadLock(long stamp) {
long a = stamp & ABITS, m, s, next; WNode h;
while (((s = state) & SBITS) == (stamp & SBITS)) {
if ((m = s & ABITS) == 0L) {
if (a != 0L)
break;
else if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT))
return next;
}
else if ((next = tryIncReaderOverflow(s)) != 0L)
return next;
}
else if (m == WBIT) {
if (a != m)
break;
state = next = s + (WBIT + RUNIT);
if ((h = whead) != null && h.status != 0)
release(h);
return next;
}
else if (a != 0L && a < WBIT)
return stamp;
else
break;
}
return 0L;
}
/**
* If the lock state matches the given stamp then, if the stamp
* represents holding a lock, releases it and returns an
* observation stamp. Or, if an optimistic read, returns it if
* validated. This method returns zero in all other cases, and so
* may be useful as a form of "tryUnlock".
*
* @param stamp a stamp
* @return a valid optimistic read stamp, or zero on failure
*/
public long tryConvertToOptimisticRead(long stamp) {
long a = stamp & ABITS, m, s, next; WNode h;
for (;;) {
s = U.getLongVolatile(this, STATE); // see above
if (((s = state) & SBITS) != (stamp & SBITS))
break;
if ((m = s & ABITS) == 0L) {
if (a != 0L)
break;
return s;
}
else if (m == WBIT) {
if (a != m)
break;
state = next = (s += WBIT) == 0L ? ORIGIN : s;
if ((h = whead) != null && h.status != 0)
release(h);
return next;
}
else if (a == 0L || a >= WBIT)
break;
else if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, next = s - RUNIT)) {
if (m == RUNIT && (h = whead) != null && h.status != 0)
release(h);
return next & SBITS;
}
}
else if ((next = tryDecReaderOverflow(s)) != 0L)
return next & SBITS;
}
return 0L;
}
/**
* Releases the write lock if it is held, without requiring a
* stamp value. This method may be useful for recovery after
* errors.
*
* @return {@code true} if the lock was held, else false
*/
public boolean tryUnlockWrite() {
long s; WNode h;
if (((s = state) & WBIT) != 0L) {
state = (s += WBIT) == 0L ? ORIGIN : s;
if ((h = whead) != null && h.status != 0)
release(h);
return true;
}
return false;
}
/**
* Releases one hold of the read lock if it is held, without
* requiring a stamp value. This method may be useful for recovery
* after errors.
*
* @return {@code true} if the read lock was held, else false
*/
public boolean tryUnlockRead() {
long s, m; WNode h;
while ((m = (s = state) & ABITS) != 0L && m < WBIT) {
if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
if (m == RUNIT && (h = whead) != null && h.status != 0)
release(h);
return true;
}
}
else if (tryDecReaderOverflow(s) != 0L)
return true;
}
return false;
}
// status monitoring methods
/**
* Returns combined state-held and overflow read count for given
* state s.
*/
private int getReadLockCount(long s) {
long readers;
if ((readers = s & RBITS) >= RFULL)
readers = RFULL + readerOverflow;
return (int) readers;
}
/**
* Returns {@code true} if the lock is currently held exclusively.
*
* @return {@code true} if the lock is currently held exclusively
*/
public boolean isWriteLocked() {
return (state & WBIT) != 0L;
}
/**
* Returns {@code true} if the lock is currently held non-exclusively.
*
* @return {@code true} if the lock is currently held non-exclusively
*/
public boolean isReadLocked() {
return (state & RBITS) != 0L;
}
/**
* Queries the number of read locks held for this lock. This
* method is designed for use in monitoring system state, not for
* synchronization control.
* @return the number of read locks held
*/
public int getReadLockCount() {
return getReadLockCount(state);
}
/**
* Returns a string identifying this lock, as well as its lock
* state. The state, in brackets, includes the String {@code
* "Unlocked"} or the String {@code "Write-locked"} or the String
* {@code "Read-locks:"} followed by the current number of
* read-locks held.
*
* @return a string identifying this lock, as well as its lock state
*/
public String toString() {
long s = state;
return super.toString() +
((s & ABITS) == 0L ? "[Unlocked]" :
(s & WBIT) != 0L ? "[Write-locked]" :
"[Read-locks:" + getReadLockCount(s) + "]");
}
// views
/**
* Returns a plain {@link Lock} view of this StampedLock in which
* the {@link Lock#lock} method is mapped to {@link #readLock},
* and similarly for other methods. The returned Lock does not
* support a {@link Condition}; method {@link
* Lock#newCondition()} throws {@code
* UnsupportedOperationException}.
*
* @return the lock
*/
public Lock asReadLock() {
ReadLockView v;
return ((v = readLockView) != null ? v :
(readLockView = new ReadLockView()));
}
/**
* Returns a plain {@link Lock} view of this StampedLock in which
* the {@link Lock#lock} method is mapped to {@link #writeLock},
* and similarly for other methods. The returned Lock does not
* support a {@link Condition}; method {@link
* Lock#newCondition()} throws {@code
* UnsupportedOperationException}.
*
* @return the lock
*/
public Lock asWriteLock() {
WriteLockView v;
return ((v = writeLockView) != null ? v :
(writeLockView = new WriteLockView()));
}
/**
* Returns a {@link ReadWriteLock} view of this StampedLock in
* which the {@link ReadWriteLock#readLock()} method is mapped to
* {@link #asReadLock()}, and {@link ReadWriteLock#writeLock()} to
* {@link #asWriteLock()}.
*
* @return the lock
*/
public ReadWriteLock asReadWriteLock() {
ReadWriteLockView v;
return ((v = readWriteLockView) != null ? v :
(readWriteLockView = new ReadWriteLockView()));
}
// view classes
final class ReadLockView implements Lock {
public void lock() { readLock(); }
public void lockInterruptibly() throws InterruptedException {
readLockInterruptibly();
}
public boolean tryLock() { return tryReadLock() != 0L; }
public boolean tryLock(long time, TimeUnit unit)
throws InterruptedException {
return tryReadLock(time, unit) != 0L;
}
public void unlock() { unstampedUnlockRead(); }
public Condition newCondition() {
throw new UnsupportedOperationException();
}
}
final class WriteLockView implements Lock {
public void lock() { writeLock(); }
public void lockInterruptibly() throws InterruptedException {
writeLockInterruptibly();
}
public boolean tryLock() { return tryWriteLock() != 0L; }
public boolean tryLock(long time, TimeUnit unit)
throws InterruptedException {
return tryWriteLock(time, unit) != 0L;
}
public void unlock() { unstampedUnlockWrite(); }
public Condition newCondition() {
throw new UnsupportedOperationException();
}
}
final class ReadWriteLockView implements ReadWriteLock {
public Lock readLock() { return asReadLock(); }
public Lock writeLock() { return asWriteLock(); }
}
// Unlock methods without stamp argument checks for view classes.
// Needed because view-class lock methods throw away stamps.
final void unstampedUnlockWrite() {
WNode h; long s;
if (((s = state) & WBIT) == 0L)
throw new IllegalMonitorStateException();
state = (s += WBIT) == 0L ? ORIGIN : s;
if ((h = whead) != null && h.status != 0)
release(h);
}
final void unstampedUnlockRead() {
for (;;) {
long s, m; WNode h;
if ((m = (s = state) & ABITS) == 0L || m >= WBIT)
throw new IllegalMonitorStateException();
else if (m < RFULL) {
if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) {
if (m == RUNIT && (h = whead) != null && h.status != 0)
release(h);
break;
}
}
else if (tryDecReaderOverflow(s) != 0L)
break;
}
}
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
state = ORIGIN; // reset to unlocked state
}
// internals
/**
* Tries to increment readerOverflow by first setting state
* access bits value to RBITS, indicating hold of spinlock,
* then updating, then releasing.
*
* @param s a reader overflow stamp: (s & ABITS) >= RFULL
* @return new stamp on success, else zero
*/
private long tryIncReaderOverflow(long s) {
// assert (s & ABITS) >= RFULL;
if ((s & ABITS) == RFULL) {
if (U.compareAndSwapLong(this, STATE, s, s | RBITS)) {
++readerOverflow;
state = s;
return s;
}
}
else if ((ThreadLocalRandom.current().nextInt() &
OVERFLOW_YIELD_RATE) == 0)
Thread.yield();
return 0L;
}
/**
* Tries to decrement readerOverflow.
*
* @param s a reader overflow stamp: (s & ABITS) >= RFULL
* @return new stamp on success, else zero
*/
private long tryDecReaderOverflow(long s) {
// assert (s & ABITS) >= RFULL;
if ((s & ABITS) == RFULL) {
if (U.compareAndSwapLong(this, STATE, s, s | RBITS)) {
int r; long next;
if ((r = readerOverflow) > 0) {
readerOverflow = r - 1;
next = s;
}
else
next = s - RUNIT;
state = next;
return next;
}
}
else if ((ThreadLocalRandom.current().nextInt() &
OVERFLOW_YIELD_RATE) == 0)
Thread.yield();
return 0L;
}
/**
* Wakes up the successor of h (normally whead). This is normally
* just h.next, but may require traversal from wtail if next
* pointers are lagging. This may fail to wake up an acquiring
* thread when one or more have been cancelled, but the cancel
* methods themselves provide extra safeguards to ensure liveness.
*/
private void release(WNode h) {
if (h != null) {
WNode q; Thread w;
U.compareAndSwapInt(h, WSTATUS, WAITING, 0);
if ((q = h.next) == null || q.status == CANCELLED) {
for (WNode t = wtail; t != null && t != h; t = t.prev)
if (t.status <= 0)
q = t;
}
if (q != null && (w = q.thread) != null)
U.unpark(w);
}
}
/**
* See above for explanation.
*
* @param interruptible true if should check interrupts and if so
* return INTERRUPTED
* @param deadline if nonzero, the System.nanoTime value to timeout
* at (and return zero)
* @return next state, or INTERRUPTED
*/
private long acquireWrite(boolean interruptible, long deadline) {
WNode node = null, p;
for (int spins = -1;;) { // spin while enqueuing
long m, s, ns;
if ((m = (s = state) & ABITS) == 0L) {
if (U.compareAndSwapLong(this, STATE, s, ns = s + WBIT))
return ns;
}
else if (spins < 0)
spins = (m == WBIT && wtail == whead) ? SPINS : 0;
else if (spins > 0) {
if (ThreadLocalRandom.current().nextInt() >= 0)
--spins;
}
else if ((p = wtail) == null) { // initialize queue
WNode hd = new WNode(WMODE, null);
if (U.compareAndSwapObject(this, WHEAD, null, hd))
wtail = hd;
}
else if (node == null)
node = new WNode(WMODE, p);
else if (node.prev != p)
node.prev = p;
else if (U.compareAndSwapObject(this, WTAIL, p, node)) {
p.next = node;
break;
}
}
for (int spins = -1;;) {
WNode h, np, pp; int ps;
if ((h = whead) == p) {
if (spins < 0)
spins = HEAD_SPINS;
else if (spins < MAX_HEAD_SPINS)
spins <<= 1;
for (int k = spins;;) { // spin at head
long s, ns;
if (((s = state) & ABITS) == 0L) {
if (U.compareAndSwapLong(this, STATE, s,
ns = s + WBIT)) {
whead = node;
node.prev = null;
return ns;
}
}
else if (ThreadLocalRandom.current().nextInt() >= 0 &&
--k <= 0)
break;
}
}
else if (h != null) { // help release stale waiters
WNode c; Thread w;
while ((c = h.cowait) != null) {
if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) &&
(w = c.thread) != null)
U.unpark(w);
}
}
if (whead == h) {
if ((np = node.prev) != p) {
if (np != null)
(p = np).next = node; // stale
}
else if ((ps = p.status) == 0)
U.compareAndSwapInt(p, WSTATUS, 0, WAITING);
else if (ps == CANCELLED) {
if ((pp = p.prev) != null) {
node.prev = pp;
pp.next = node;
}
}
else {
long time; // 0 argument to park means no timeout
if (deadline == 0L)
time = 0L;
else if ((time = deadline - System.nanoTime()) <= 0L)
return cancelWaiter(node, node, false);
Thread wt = Thread.currentThread();
U.putObject(wt, PARKBLOCKER, this);
node.thread = wt;
if (p.status < 0 && (p != h || (state & ABITS) != 0L) &&
whead == h && node.prev == p)
U.park(false, time); // emulate LockSupport.park
node.thread = null;
U.putObject(wt, PARKBLOCKER, null);
if (interruptible && Thread.interrupted())
return cancelWaiter(node, node, true);
}
}
}
}
/**
* See above for explanation.
*
* @param interruptible true if should check interrupts and if so
* return INTERRUPTED
* @param deadline if nonzero, the System.nanoTime value to timeout
* at (and return zero)
* @return next state, or INTERRUPTED
*/
private long acquireRead(boolean interruptible, long deadline) {
WNode node = null, p;
for (int spins = -1;;) {
WNode h;
if ((h = whead) == (p = wtail)) {
for (long m, s, ns;;) {
if ((m = (s = state) & ABITS) < RFULL ?
U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) :
(m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L))
return ns;
else if (m >= WBIT) {
if (spins > 0) {
if (ThreadLocalRandom.current().nextInt() >= 0)
--spins;
}
else {
if (spins == 0) {
WNode nh = whead, np = wtail;
if ((nh == h && np == p) || (h = nh) != (p = np))
break;
}
spins = SPINS;
}
}
}
}
if (p == null) { // initialize queue
WNode hd = new WNode(WMODE, null);
if (U.compareAndSwapObject(this, WHEAD, null, hd))
wtail = hd;
}
else if (node == null)
node = new WNode(RMODE, p);
else if (h == p || p.mode != RMODE) {
if (node.prev != p)
node.prev = p;
else if (U.compareAndSwapObject(this, WTAIL, p, node)) {
p.next = node;
break;
}
}
else if (!U.compareAndSwapObject(p, WCOWAIT,
node.cowait = p.cowait, node))
node.cowait = null;
else {
for (;;) {
WNode pp, c; Thread w;
if ((h = whead) != null && (c = h.cowait) != null &&
U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) &&
(w = c.thread) != null) // help release
U.unpark(w);
if (h == (pp = p.prev) || h == p || pp == null) {
long m, s, ns;
do {
if ((m = (s = state) & ABITS) < RFULL ?
U.compareAndSwapLong(this, STATE, s,
ns = s + RUNIT) :
(m < WBIT &&
(ns = tryIncReaderOverflow(s)) != 0L))
return ns;
} while (m < WBIT);
}
if (whead == h && p.prev == pp) {
long time;
if (pp == null || h == p || p.status > 0) {
node = null; // throw away
break;
}
if (deadline == 0L)
time = 0L;
else if ((time = deadline - System.nanoTime()) <= 0L)
return cancelWaiter(node, p, false);
Thread wt = Thread.currentThread();
U.putObject(wt, PARKBLOCKER, this);
node.thread = wt;
if ((h != pp || (state & ABITS) == WBIT) &&
whead == h && p.prev == pp)
U.park(false, time);
node.thread = null;
U.putObject(wt, PARKBLOCKER, null);
if (interruptible && Thread.interrupted())
return cancelWaiter(node, p, true);
}
}
}
}
for (int spins = -1;;) {
WNode h, np, pp; int ps;
if ((h = whead) == p) {
if (spins < 0)
spins = HEAD_SPINS;
else if (spins < MAX_HEAD_SPINS)
spins <<= 1;
for (int k = spins;;) { // spin at head
long m, s, ns;
if ((m = (s = state) & ABITS) < RFULL ?
U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) :
(m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) {
WNode c; Thread w;
whead = node;
node.prev = null;
while ((c = node.cowait) != null) {
if (U.compareAndSwapObject(node, WCOWAIT,
c, c.cowait) &&
(w = c.thread) != null)
U.unpark(w);
}
return ns;
}
else if (m >= WBIT &&
ThreadLocalRandom.current().nextInt() >= 0 && --k <= 0)
break;
}
}
else if (h != null) {
WNode c; Thread w;
while ((c = h.cowait) != null) {
if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) &&
(w = c.thread) != null)
U.unpark(w);
}
}
if (whead == h) {
if ((np = node.prev) != p) {
if (np != null)
(p = np).next = node; // stale
}
else if ((ps = p.status) == 0)
U.compareAndSwapInt(p, WSTATUS, 0, WAITING);
else if (ps == CANCELLED) {
if ((pp = p.prev) != null) {
node.prev = pp;
pp.next = node;
}
}
else {
long time;
if (deadline == 0L)
time = 0L;
else if ((time = deadline - System.nanoTime()) <= 0L)
return cancelWaiter(node, node, false);
Thread wt = Thread.currentThread();
U.putObject(wt, PARKBLOCKER, this);
node.thread = wt;
if (p.status < 0 &&
(p != h || (state & ABITS) == WBIT) &&
whead == h && node.prev == p)
U.park(false, time);
node.thread = null;
U.putObject(wt, PARKBLOCKER, null);
if (interruptible && Thread.interrupted())
return cancelWaiter(node, node, true);
}
}
}
}
/**
* If node non-null, forces cancel status and unsplices it from
* queue if possible and wakes up any cowaiters (of the node, or
* group, as applicable), and in any case helps release current
* first waiter if lock is free. (Calling with null arguments
* serves as a conditional form of release, which is not currently
* needed but may be needed under possible future cancellation
* policies). This is a variant of cancellation methods in
* AbstractQueuedSynchronizer (see its detailed explanation in AQS
* internal documentation).
*
* @param node if nonnull, the waiter
* @param group either node or the group node is cowaiting with
* @param interrupted if already interrupted
* @return INTERRUPTED if interrupted or Thread.interrupted, else zero
*/
private long cancelWaiter(WNode node, WNode group, boolean interrupted) {
if (node != null && group != null) {
Thread w;
node.status = CANCELLED;
// unsplice cancelled nodes from group
for (WNode p = group, q; (q = p.cowait) != null;) {
if (q.status == CANCELLED) {
U.compareAndSwapObject(p, WCOWAIT, q, q.cowait);
p = group; // restart
}
else
p = q;
}
if (group == node) {
for (WNode r = group.cowait; r != null; r = r.cowait) {
if ((w = r.thread) != null)
U.unpark(w); // wake up uncancelled co-waiters
}
for (WNode pred = node.prev; pred != null; ) { // unsplice
WNode succ, pp; // find valid successor
while ((succ = node.next) == null ||
succ.status == CANCELLED) {
WNode q = null; // find successor the slow way
for (WNode t = wtail; t != null && t != node; t = t.prev)
if (t.status != CANCELLED)
q = t; // don't link if succ cancelled
if (succ == q || // ensure accurate successor
U.compareAndSwapObject(node, WNEXT,
succ, succ = q)) {
if (succ == null && node == wtail)
U.compareAndSwapObject(this, WTAIL, node, pred);
break;
}
}
if (pred.next == node) // unsplice pred link
U.compareAndSwapObject(pred, WNEXT, node, succ);
if (succ != null && (w = succ.thread) != null) {
succ.thread = null;
U.unpark(w); // wake up succ to observe new pred
}
if (pred.status != CANCELLED || (pp = pred.prev) == null)
break;
node.prev = pp; // repeat if new pred wrong/cancelled
U.compareAndSwapObject(pp, WNEXT, pred, succ);
pred = pp;
}
}
}
WNode h; // Possibly release first waiter
while ((h = whead) != null) {
long s; WNode q; // similar to release() but check eligibility
if ((q = h.next) == null || q.status == CANCELLED) {
for (WNode t = wtail; t != null && t != h; t = t.prev)
if (t.status <= 0)
q = t;
}
if (h == whead) {
if (q != null && h.status == 0 &&
((s = state) & ABITS) != WBIT && // waiter is eligible
(s == 0L || q.mode == RMODE))
release(h);
break;
}
}
return (interrupted || Thread.interrupted()) ? INTERRUPTED : 0L;
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long STATE;
private static final long WHEAD;
private static final long WTAIL;
private static final long WNEXT;
private static final long WSTATUS;
private static final long WCOWAIT;
private static final long PARKBLOCKER;
static {
try {
U = getUnsafe();
Class<?> k = StampedLock.class;
Class<?> wk = WNode.class;
STATE = U.objectFieldOffset
(k.getDeclaredField("state"));
WHEAD = U.objectFieldOffset
(k.getDeclaredField("whead"));
WTAIL = U.objectFieldOffset
(k.getDeclaredField("wtail"));
WSTATUS = U.objectFieldOffset
(wk.getDeclaredField("status"));
WNEXT = U.objectFieldOffset
(wk.getDeclaredField("next"));
WCOWAIT = U.objectFieldOffset
(wk.getDeclaredField("cowait"));
Class<?> tk = Thread.class;
PARKBLOCKER = U.objectFieldOffset
(tk.getDeclaredField("parkBlocker"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166e_StampedLock.java
|
4,890 |
public class RestNodesAction extends AbstractCatAction {
@Inject
public RestNodesAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/_cat/nodes", this);
}
@Override
void documentation(StringBuilder sb) {
sb.append("/_cat/nodes\n");
}
@Override
public void doRequest(final RestRequest request, final RestChannel channel) {
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().nodes(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
@Override
public void onResponse(final ClusterStateResponse clusterStateResponse) {
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
nodesInfoRequest.clear().jvm(true).os(true).process(true);
client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
@Override
public void onResponse(final NodesInfoResponse nodesInfoResponse) {
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest();
nodesStatsRequest.clear().jvm(true).os(true).fs(true).indices(true);
client.admin().cluster().nodesStats(nodesStatsRequest, new ActionListener<NodesStatsResponse>() {
@Override
public void onResponse(NodesStatsResponse nodesStatsResponse) {
try {
channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), request, channel));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
Table getTableWithHeader(final RestRequest request) {
Table table = new Table();
table.startHeaders();
table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id");
table.addCell("pid", "default:false;alias:p;desc:process id");
table.addCell("host", "alias:h;desc:host name");
table.addCell("ip", "alias:i;desc:ip address");
table.addCell("port", "default:false;alias:po;desc:bound transport port");
table.addCell("version", "default:false;alias:v;desc:es version");
table.addCell("build", "default:false;alias:b;desc:es build hash");
table.addCell("jdk", "default:false;alias:j;desc:jdk version");
table.addCell("disk.avail", "default:false;alias:d,disk,diskAvail;text-align:right;desc:available disk space");
table.addCell("heap.percent", "alias:hp,heapPercent;text-align:right;desc:used heap ratio");
table.addCell("heap.max", "default:false;alias:hm,heapMax;text-align:right;desc:max configured heap");
table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio");
table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory");
table.addCell("load", "alias:l;text-align:right;desc:most recent load avg");
table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime");
table.addCell("node.role", "alias:r,role,dc,nodeRole;desc:d:data node, c:client node");
table.addCell("master", "alias:m;desc:m:master-eligible, *:current master");
table.addCell("name", "alias:n;desc:node name");
table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
table.addCell("get.current", "alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops");
table.addCell("get.time", "alias:gti,getTime;default:false;text-align:right;desc:time spent in get");
table.addCell("get.total", "alias:gto,getTotal;default:false;text-align:right;desc:number of get ops");
table.addCell("get.exists_time", "alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets");
table.addCell("get.exists_total", "alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets");
table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
table.addCell("id_cache.memory_size", "alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing");
table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops");
table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges");
table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations");
table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations");
table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating");
table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations");
table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
table.addCell("search.fetch_total", "alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops");
table.addCell("search.open_contexts", "alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts");
table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops");
table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase");
table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops");
table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
table.endHeaders();
return table;
}
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
boolean fullId = req.paramAsBoolean("full_id", false);
DiscoveryNodes nodes = state.getState().nodes();
String masterId = nodes.masterNodeId();
Table table = getTableWithHeader(req);
for (DiscoveryNode node : nodes) {
NodeInfo info = nodesInfo.getNodesMap().get(node.id());
NodeStats stats = nodesStats.getNodesMap().get(node.id());
table.startRow();
table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
table.addCell(info == null ? null : info.getProcess().id());
table.addCell(node.getHostName());
table.addCell(node.getHostAddress());
if (node.address() instanceof InetSocketTransportAddress) {
table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
} else {
table.addCell("-");
}
table.addCell(info == null ? null : info.getVersion().number());
table.addCell(info == null ? null : info.getBuild().hashShort());
table.addCell(info == null ? null : info.getJvm().version());
table.addCell(stats == null ? null : stats.getFs() == null ? null : stats.getFs().total().getAvailable());
table.addCell(stats == null ? null : stats.getJvm().getMem().getHeapUsedPrecent());
table.addCell(info == null ? null : info.getJvm().getMem().getHeapMax());
table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().usedPercent());
table.addCell(info == null ? null : info.getOs().mem() == null ? null : info.getOs().mem().total()); // sigar fails to load in IntelliJ
table.addCell(stats == null ? null : stats.getOs() == null ? null : stats.getOs().getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, "%.2f", stats.getOs().getLoadAverage()[0]));
table.addCell(stats == null ? null : stats.getJvm().uptime());
table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-");
table.addCell(masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-");
table.addCell(node.name());
table.addCell(stats == null ? null : stats.getIndices().getCompletion().getSize());
table.addCell(stats == null ? null : stats.getIndices().getFieldData().getMemorySize());
table.addCell(stats == null ? null : stats.getIndices().getFieldData().getEvictions());
table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getMemorySize());
table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getEvictions());
table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotal());
table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotalTime());
table.addCell(stats == null ? null : stats.getIndices().getGet().current());
table.addCell(stats == null ? null : stats.getIndices().getGet().getTime());
table.addCell(stats == null ? null : stats.getIndices().getGet().getCount());
table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsTime());
table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsCount());
table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingTime());
table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingCount());
table.addCell(stats == null ? null : stats.getIndices().getIdCache().getMemorySize());
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCurrent());
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteTime());
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCount());
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCurrent());
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexTime());
table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCount());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrent());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentNumDocs());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentSize());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotal());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalNumDocs());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalSize());
table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalTime());
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCurrent());
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getMemorySize());
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getNumQueries());
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getTime());
table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCount());
table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotal());
table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotalTime());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCurrent());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchTime());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCount());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getOpenContexts());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCurrent());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryTime());
table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCount());
table.addCell(stats == null ? null : stats.getIndices().getSegments().getCount());
table.addCell(stats == null ? null : stats.getIndices().getSegments().getMemory());
table.endRow();
}
return table;
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_cat_RestNodesAction.java
|
399 |
class CacheRecord<K> {
final K key;
final Object value;
volatile long lastAccessTime;
final long creationTime;
final AtomicInteger hit;
CacheRecord(K key, Object value) {
this.key = key;
this.value = value;
long time = Clock.currentTimeMillis();
this.lastAccessTime = time;
this.creationTime = time;
this.hit = new AtomicInteger(0);
}
void access() {
hit.incrementAndGet();
clientNearCacheStats.incrementHits();
lastAccessTime = Clock.currentTimeMillis();
}
public long getCost() {
// todo find object size if not a Data instance.
if (!(value instanceof Data)) {
return 0;
}
if (!(key instanceof Data)) {
return 0;
}
// value is Data
return ((Data) key).getHeapCost()
+ ((Data) value).getHeapCost()
+ 2 * (Long.SIZE / Byte.SIZE)
// sizeof atomic integer
+ (Integer.SIZE / Byte.SIZE)
// object references (key, value, hit)
+ 3 * (Integer.SIZE / Byte.SIZE);
}
boolean expired() {
long time = Clock.currentTimeMillis();
return (maxIdleMillis > 0 && time > lastAccessTime + maxIdleMillis)
|| (timeToLiveMillis > 0 && time > creationTime + timeToLiveMillis);
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_nearcache_ClientNearCache.java
|
948 |
public abstract class TransportMasterNodeOperationAction<Request extends MasterNodeOperationRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;
final String transportAction;
final String executor;
protected TransportMasterNodeOperationAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, threadPool);
this.transportService = transportService;
this.clusterService = clusterService;
this.transportAction = transportAction();
this.executor = executor();
transportService.registerHandler(transportAction, new TransportHandler());
}
protected abstract String transportAction();
protected abstract String executor();
protected abstract Request newRequest();
protected abstract Response newResponse();
protected abstract void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws ElasticsearchException;
protected boolean localExecute(Request request) {
return false;
}
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return null;
}
protected void processBeforeDelegationToMaster(Request request, ClusterState state) {
}
@Override
public void execute(Request request, ActionListener<Response> listener) {
// since the callback is async, we typically can get called from within an event in the cluster service
// or something similar, so make sure we are threaded so we won't block it.
request.listenerThreaded(true);
super.execute(request, listener);
}
@Override
protected void doExecute(final Request request, final ActionListener<Response> listener) {
innerExecute(request, listener, false);
}
private void innerExecute(final Request request, final ActionListener<Response> listener, final boolean retrying) {
final ClusterState clusterState = clusterService.state();
final DiscoveryNodes nodes = clusterState.nodes();
if (nodes.localNodeMaster() || localExecute(request)) {
// check for block, if blocked, retry, else, execute locally
final ClusterBlockException blockException = checkBlock(request, clusterState);
if (blockException != null) {
if (!blockException.retryable()) {
listener.onFailure(blockException);
return;
}
clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
@Override
public void postAdded() {
ClusterBlockException blockException = checkBlock(request, clusterService.state());
if (blockException == null || !blockException.retryable()) {
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(blockException);
}
@Override
public void onTimeout(TimeValue timeout) {
clusterService.remove(this);
listener.onFailure(blockException);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
ClusterBlockException blockException = checkBlock(request, event.state());
if (blockException == null || !blockException.retryable()) {
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
});
} else {
try {
threadPool.executor(executor).execute(new Runnable() {
@Override
public void run() {
try {
masterOperation(request, clusterService.state(), listener);
} catch (Throwable e) {
listener.onFailure(e);
}
}
});
} catch (Throwable t) {
listener.onFailure(t);
}
}
} else {
if (nodes.masterNode() == null) {
if (retrying) {
listener.onFailure(new MasterNotDiscoveredException());
} else {
clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
@Override
public void postAdded() {
ClusterState clusterStateV2 = clusterService.state();
if (clusterStateV2.nodes().masterNodeId() != null) {
// now we have a master, try and execute it...
clusterService.remove(this);
innerExecute(request, listener, true);
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
clusterService.remove(this);
listener.onFailure(new MasterNotDiscoveredException("waited for [" + timeout + "]"));
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.nodesDelta().masterNodeChanged()) {
clusterService.remove(this);
innerExecute(request, listener, true);
}
}
});
}
return;
}
processBeforeDelegationToMaster(request, clusterState);
transportService.sendRequest(nodes.masterNode(), transportAction, request, new BaseTransportResponseHandler<Response>() {
@Override
public Response newInstance() {
return newResponse();
}
@Override
public void handleResponse(Response response) {
listener.onResponse(response);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleException(final TransportException exp) {
if (exp.unwrapCause() instanceof ConnectTransportException) {
// we want to retry here a bit to see if a new master is elected
clusterService.add(request.masterNodeTimeout(), new TimeoutClusterStateListener() {
@Override
public void postAdded() {
ClusterState clusterStateV2 = clusterService.state();
if (!clusterState.nodes().masterNodeId().equals(clusterStateV2.nodes().masterNodeId())) {
// master changes while adding the listener, try here
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
@Override
public void onClose() {
clusterService.remove(this);
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
clusterService.remove(this);
listener.onFailure(new MasterNotDiscoveredException());
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.nodesDelta().masterNodeChanged()) {
clusterService.remove(this);
innerExecute(request, listener, false);
}
}
});
} else {
listener.onFailure(exp);
}
}
});
}
}
private class TransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequest();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
// we just send back a response, no need to fork a listener
request.listenerThreaded(false);
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response", e1);
}
}
});
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_support_master_TransportMasterNodeOperationAction.java
|
168 |
static final class EmptyTask extends ForkJoinTask<Void> {
private static final long serialVersionUID = -7721805057305804111L;
EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
public final Void getRawResult() { return null; }
public final void setRawResult(Void x) {}
public final boolean exec() { return true; }
}
| 0true
|
src_main_java_jsr166y_ForkJoinPool.java
|
3,983 |
public abstract class DecayFunctionParser implements ScoreFunctionParser {
/**
* Override this function if you want to produce your own scorer.
* */
public abstract DecayFunction getDecayFunction();
/**
* Parses bodies of the kind
*
* <pre>
* {@code}
* {
* "fieldname1" : {
* "origin" = "someValue",
* "scale" = "someValue"
* }
*
* }
* </pre>
*
* */
@Override
public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
String currentFieldName = null;
XContentParser.Token token;
ScoreFunction scoreFunction = null;
token = parser.nextToken();
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
// parse per field the origin and scale value
scoreFunction = parseVariable(currentFieldName, parser, parseContext);
} else {
throw new ElasticsearchParseException("Malformed score function score parameters.");
}
} else {
throw new ElasticsearchParseException("Malformed score function score parameters.");
}
parser.nextToken();
return scoreFunction;
}
// parses origin and scale parameter for field "fieldName"
private ScoreFunction parseVariable(String fieldName, XContentParser parser, QueryParseContext parseContext) throws IOException {
// now, the field must exist, else we cannot read the value for
// the doc later
MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
if (smartMappers == null || !smartMappers.hasMapper()) {
throw new QueryParsingException(parseContext.index(), "Unknown field [" + fieldName + "]");
}
FieldMapper<?> mapper = smartMappers.fieldMappers().mapper();
// dates and time need special handling
if (mapper instanceof DateFieldMapper) {
return parseDateVariable(fieldName, parser, parseContext, (DateFieldMapper) mapper);
} else if (mapper instanceof GeoPointFieldMapper) {
return parseGeoVariable(fieldName, parser, parseContext, (GeoPointFieldMapper) mapper);
} else if (mapper instanceof NumberFieldMapper<?>) {
return parseNumberVariable(fieldName, parser, parseContext, (NumberFieldMapper<?>) mapper);
} else {
throw new QueryParsingException(parseContext.index(), "Field " + fieldName + " is of type " + mapper.fieldType()
+ ", but only numeric types are supported.");
}
}
private ScoreFunction parseNumberVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,
NumberFieldMapper<?> mapper) throws IOException {
XContentParser.Token token;
String parameterName = null;
double scale = 0;
double origin = 0;
double decay = 0.5;
double offset = 0.0d;
boolean scaleFound = false;
boolean refFound = false;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
parameterName = parser.currentName();
} else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {
scale = parser.doubleValue();
scaleFound = true;
} else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {
decay = parser.doubleValue();
} else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {
origin = parser.doubleValue();
refFound = true;
} else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {
offset = parser.doubleValue();
} else {
throw new ElasticsearchParseException("Parameter " + parameterName + " not supported!");
}
}
if (!scaleFound || !refFound) {
throw new ElasticsearchParseException("Both " + DecayFunctionBuilder.SCALE + "and " + DecayFunctionBuilder.ORIGIN
+ " must be set for numeric fields.");
}
IndexNumericFieldData<?> numericFieldData = parseContext.fieldData().getForField(mapper);
return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData);
}
private ScoreFunction parseGeoVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,
GeoPointFieldMapper mapper) throws IOException {
XContentParser.Token token;
String parameterName = null;
GeoPoint origin = new GeoPoint();
String scaleString = null;
String offsetString = "0km";
double decay = 0.5;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
parameterName = parser.currentName();
} else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {
scaleString = parser.text();
} else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {
origin = GeoPoint.parse(parser);
} else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {
decay = parser.doubleValue();
} else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {
offsetString = parser.text();
} else {
throw new ElasticsearchParseException("Parameter " + parameterName + " not supported!");
}
}
if (origin == null || scaleString == null) {
throw new ElasticsearchParseException(DecayFunctionBuilder.ORIGIN + " and " + DecayFunctionBuilder.SCALE + " must be set for geo fields.");
}
double scale = DistanceUnit.DEFAULT.parse(scaleString, DistanceUnit.DEFAULT);
double offset = DistanceUnit.DEFAULT.parse(offsetString, DistanceUnit.DEFAULT);
IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);
return new GeoFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), indexFieldData);
}
private ScoreFunction parseDateVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,
DateFieldMapper dateFieldMapper) throws IOException {
XContentParser.Token token;
String parameterName = null;
String scaleString = null;
String originString = null;
String offsetString = "0d";
double decay = 0.5;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
parameterName = parser.currentName();
} else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {
scaleString = parser.text();
} else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {
originString = parser.text();
} else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {
decay = parser.doubleValue();
} else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {
offsetString = parser.text();
} else {
throw new ElasticsearchParseException("Parameter " + parameterName + " not supported!");
}
}
long origin = SearchContext.current().nowInMillis();
if (originString != null) {
origin = dateFieldMapper.parseToMilliseconds(originString, parseContext);
}
if (scaleString == null) {
throw new ElasticsearchParseException(DecayFunctionBuilder.SCALE + " must be set for date fields.");
}
TimeValue val = TimeValue.parseTimeValue(scaleString, TimeValue.timeValueHours(24));
double scale = val.getMillis();
val = TimeValue.parseTimeValue(offsetString, TimeValue.timeValueHours(24));
double offset = val.getMillis();
IndexNumericFieldData<?> numericFieldData = parseContext.fieldData().getForField(dateFieldMapper);
return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData);
}
static class GeoFieldDataScoreFunction extends AbstractDistanceScoreFunction {
private final GeoPoint origin;
private final IndexGeoPointFieldData<?> fieldData;
private GeoPointValues geoPointValues = null;
private static final GeoDistance distFunction = GeoDistance.DEFAULT;
public GeoFieldDataScoreFunction(GeoPoint origin, double scale, double decay, double offset, DecayFunction func,
IndexGeoPointFieldData<?> fieldData) {
super(scale, decay, offset, func);
this.origin = origin;
this.fieldData = fieldData;
}
@Override
public void setNextReader(AtomicReaderContext context) {
geoPointValues = fieldData.load(context).getGeoPointValues();
}
private final GeoPoint getValue(int doc, GeoPoint missing) {
final int num = geoPointValues.setDocument(doc);
for (int i = 0; i < num; i++) {
return geoPointValues.nextValue();
}
return missing;
}
@Override
protected double distance(int docId) {
GeoPoint other = getValue(docId, origin);
double distance = Math.abs(distFunction.calculate(origin.lat(), origin.lon(), other.lat(), other.lon(),
DistanceUnit.METERS)) - offset;
return Math.max(0.0d, distance);
}
@Override
protected String getDistanceString(int docId) {
final GeoPoint other = getValue(docId, origin);
return "arcDistance(" + other + "(=doc value), " + origin + "(=origin)) - " + offset
+ "(=offset) < 0.0 ? 0.0: arcDistance(" + other + "(=doc value), " + origin + "(=origin)) - " + offset
+ "(=offset)";
}
@Override
protected String getFieldName() {
return fieldData.getFieldNames().fullName();
}
}
static class NumericFieldDataScoreFunction extends AbstractDistanceScoreFunction {
private final IndexNumericFieldData<?> fieldData;
private final double origin;
private DoubleValues doubleValues;
public NumericFieldDataScoreFunction(double origin, double scale, double decay, double offset, DecayFunction func,
IndexNumericFieldData<?> fieldData) {
super(scale, decay, offset, func);
this.fieldData = fieldData;
this.origin = origin;
}
public void setNextReader(AtomicReaderContext context) {
this.doubleValues = this.fieldData.load(context).getDoubleValues();
}
private final double getValue(int doc, double missing) {
final int num = doubleValues.setDocument(doc);
for (int i = 0; i < num; i++) {
return doubleValues.nextValue();
}
return missing;
}
@Override
protected double distance(int docId) {
double distance = Math.abs(getValue(docId, origin) - origin) - offset;
return Math.max(0.0d, distance);
}
@Override
protected String getDistanceString(int docId) {
return "Math.abs(" + getValue(docId, origin) + "(=doc value) - " + origin + "(=origin)) - "
+ offset + "(=offset) < 0.0 ? 0.0: Math.abs(" + getValue(docId, origin) + "(=doc value) - "
+ origin + ") - " + offset + "(=offset)";
}
@Override
protected String getFieldName() {
return fieldData.getFieldNames().fullName();
}
}
/**
* This is the base class for scoring a single field.
*
* */
public static abstract class AbstractDistanceScoreFunction extends ScoreFunction {
private final double scale;
protected final double offset;
private final DecayFunction func;
public AbstractDistanceScoreFunction(double userSuppiedScale, double decay, double offset, DecayFunction func) {
super(CombineFunction.MULT);
if (userSuppiedScale <= 0.0) {
throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : scale must be > 0.0.");
}
if (decay <= 0.0 || decay >= 1.0) {
throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME
+ " : decay must be in the range [0..1].");
}
this.scale = func.processScale(userSuppiedScale, decay);
this.func = func;
if (offset < 0.0d) {
throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : offset must be > 0.0");
}
this.offset = offset;
}
@Override
public double score(int docId, float subQueryScore) {
double value = distance(docId);
return func.evaluate(value, scale);
}
/**
* This function computes the distance from a defined origin. Since
* the value of the document is read from the index, it cannot be
* guaranteed that the value actually exists. If it does not, we assume
* the user handles this case in the query and return 0.
* */
protected abstract double distance(int docId);
protected abstract String getDistanceString(int docId);
protected abstract String getFieldName();
@Override
public Explanation explainScore(int docId, Explanation subQueryExpl) {
ComplexExplanation ce = new ComplexExplanation();
ce.setValue(CombineFunction.toFloat(score(docId, subQueryExpl.getValue())));
ce.setMatch(true);
ce.setDescription("Function for field " + getFieldName() + ":");
ce.addDetail(func.explainFunction(getDistanceString(docId), distance(docId), scale));
return ce;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_functionscore_DecayFunctionParser.java
|
3,389 |
public class PagedBytesEstimator implements PerValueEstimator {
private final AtomicReaderContext context;
private final MemoryCircuitBreaker breaker;
private long estimatedBytes;
PagedBytesEstimator(AtomicReaderContext context, MemoryCircuitBreaker breaker) {
this.breaker = breaker;
this.context = context;
}
/**
* @return the number of bytes for the term based on the length and ordinal overhead
*/
public long bytesPerValue(BytesRef term) {
long bytes = term.length;
// 64 bytes for miscellaneous overhead
bytes += 64;
// Seems to be about a 1.5x compression per term/ord, plus 1 for some wiggle room
bytes = (long) ((double) bytes / 1.5) + 1;
return bytes;
}
/**
* @return the estimate for loading the entire term set into field data, or 0 if unavailable
*/
public long estimateStringFieldData() {
try {
AtomicReader reader = context.reader();
Terms terms = reader.terms(getFieldNames().indexName());
Fields fields = reader.fields();
final Terms fieldTerms = fields.terms(getFieldNames().indexName());
if (fieldTerms instanceof BlockTreeTermsReader.FieldReader) {
final BlockTreeTermsReader.Stats stats = ((BlockTreeTermsReader.FieldReader) fieldTerms).computeStats();
long totalTermBytes = stats.totalTermBytes;
if (logger.isTraceEnabled()) {
logger.trace("totalTermBytes: {}, terms.size(): {}, terms.getSumDocFreq(): {}",
totalTermBytes, terms.size(), terms.getSumDocFreq());
}
long totalBytes = totalTermBytes + (2 * terms.size()) + (4 * terms.getSumDocFreq());
return totalBytes;
}
} catch (Exception e) {
logger.warn("Unable to estimate memory overhead", e);
}
return 0;
}
/**
* Determine whether the BlockTreeTermsReader.FieldReader can be used
* for estimating the field data, adding the estimate to the circuit
* breaker if it can, otherwise wrapping the terms in a
* RamAccountingTermsEnum to be estimated on a per-term basis.
*
* @param terms terms to be estimated
* @return A possibly wrapped TermsEnum for the terms
* @throws IOException
*/
public TermsEnum beforeLoad(Terms terms) throws IOException {
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
FilterSettingFields.ACCEPTABLE_TRANSIENT_OVERHEAD_RATIO,
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
AtomicReader reader = context.reader();
// Check if one of the following is present:
// - The OrdinalsBuilder overhead has been tweaked away from the default
// - A field data filter is present
// - A regex filter is present
if (acceptableTransientOverheadRatio != OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN, 0d) != 0d ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MAX, 0d) != 0d ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN_SEGMENT_SIZE, 0d) != 0d ||
fieldDataType.getSettings().get(FilterSettingFields.REGEX_PATTERN) != null) {
if (logger.isTraceEnabled()) {
logger.trace("Filter exists, can't circuit break normally, using RamAccountingTermsEnum");
}
return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
} else {
estimatedBytes = this.estimateStringFieldData();
// If we weren't able to estimate, wrap in the RamAccountingTermsEnum
if (estimatedBytes == 0) {
return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
}
breaker.addEstimateBytesAndMaybeBreak(estimatedBytes);
return filter(terms, reader);
}
}
/**
* Adjust the circuit breaker now that terms have been loaded, getting
* the actual used either from the parameter (if estimation worked for
* the entire set), or from the TermsEnum if it has been wrapped in a
* RamAccountingTermsEnum.
*
* @param termsEnum terms that were loaded
* @param actualUsed actual field data memory usage
*/
public void afterLoad(TermsEnum termsEnum, long actualUsed) {
if (termsEnum instanceof RamAccountingTermsEnum) {
estimatedBytes = ((RamAccountingTermsEnum) termsEnum).getTotalBytes();
}
breaker.addWithoutBreaking(-(estimatedBytes - actualUsed));
}
/**
* Adjust the breaker when no terms were actually loaded, but the field
* data takes up space regardless. For instance, when ordinals are
* used.
* @param actualUsed bytes actually used
*/
public void adjustForNoTerms(long actualUsed) {
breaker.addWithoutBreaking(actualUsed);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_PagedBytesIndexFieldData.java
|
326 |
public enum Placement {
PREPEND,APPEND,SPECIFIC
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_Placement.java
|
134 |
@RunWith(HazelcastSerialClassRunner.class)
public abstract class ClientTestSupport extends HazelcastTestSupport {
@Rule
public final ClientTestResource clientResource = new ClientTestResource(createConfig());
protected final HazelcastInstance getInstance() {
return clientResource.instance;
}
protected final SimpleClient getClient() {
return clientResource.client;
}
protected abstract Config createConfig();
public static final class ClientTestResource extends ExternalResource {
private final Config config;
private HazelcastInstance instance;
private SimpleClient client;
public ClientTestResource(Config config) {
this.config = config;
}
protected void before() throws Throwable {
instance = new TestHazelcastInstanceFactory(1).newHazelcastInstance(config);
client = newClient(TestUtil.getNode(instance));
client.auth();
}
protected void after() {
try {
client.close();
} catch (IOException e) {
e.printStackTrace();
}
instance.shutdown();
}
}
public static SimpleClient newClient(Node node) throws IOException {
if (node.isActive()) {
if (TestEnvironment.isMockNetwork()) {
ClientEngineImpl engine = node.clientEngine;
return new MockSimpleClient(engine);
} else {
return new SocketSimpleClient(node);
}
}
throw new IllegalArgumentException("Node is not active: " + node.getThisAddress());
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_client_ClientTestSupport.java
|
407 |
public class DeleteSnapshotRequest extends MasterNodeOperationRequest<DeleteSnapshotRequest> {
private String repository;
private String snapshot;
/**
* Constructs a new delete snapshots request
*/
public DeleteSnapshotRequest() {
}
/**
* Constructs a new delete snapshots request with repository and snapshot name
*
* @param repository repository name
* @param snapshot snapshot name
*/
public DeleteSnapshotRequest(String repository, String snapshot) {
this.repository = repository;
this.snapshot = snapshot;
}
/**
* Constructs a new delete snapshots request with repository name
*
* @param repository repository name
*/
public DeleteSnapshotRequest(String repository) {
this.repository = repository;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (repository == null) {
validationException = addValidationError("repository is missing", validationException);
}
if (snapshot == null) {
validationException = addValidationError("snapshot is missing", validationException);
}
return validationException;
}
public DeleteSnapshotRequest repository(String repository) {
this.repository = repository;
return this;
}
/**
* Returns repository name
*
* @return repository name
*/
public String repository() {
return this.repository;
}
/**
* Returns repository name
*
* @return repository name
*/
public String snapshot() {
return this.snapshot;
}
/**
* Sets snapshot name
*
* @return this request
*/
public DeleteSnapshotRequest snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
repository = in.readString();
snapshot = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(repository);
out.writeString(snapshot);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_delete_DeleteSnapshotRequest.java
|
85 |
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;
Node(int hash, K key, V val, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.val = val;
this.next = next;
}
public final K getKey() { return key; }
public final V getValue() { return val; }
public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
public final String toString(){ return key + "=" + val; }
public final V setValue(V value) {
throw new UnsupportedOperationException();
}
public final boolean equals(Object o) {
Object k, v, u; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == (u = val) || v.equals(u)));
}
/**
* Virtualized support for map.get(); overridden in subclasses.
*/
Node<K,V> find(int h, Object k) {
Node<K,V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
515 |
public class IndicesExistsRequest extends MasterNodeReadOperationRequest<IndicesExistsRequest> {
private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
public IndicesExistsRequest(String... indices) {
this.indices = indices;
}
public String[] indices() {
return indices;
}
public IndicesExistsRequest indices(String[] indices) {
this.indices = indices;
return this;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public IndicesExistsRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (indices == null || indices.length == 0) {
validationException = addValidationError("index/indices is missing", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readLocal(in, Version.V_1_0_0_RC2);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
writeLocal(out, Version.V_1_0_0_RC2);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_exists_indices_IndicesExistsRequest.java
|
1,132 |
public class OrderStatus implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, OrderStatus> TYPES = new LinkedHashMap<String, OrderStatus>();
public static final OrderStatus NAMED = new OrderStatus("NAMED", "Named");
public static final OrderStatus QUOTE = new OrderStatus("QUOTE", "Quote");
public static final OrderStatus IN_PROCESS = new OrderStatus("IN_PROCESS", "In Process");
public static final OrderStatus SUBMITTED = new OrderStatus("SUBMITTED", "Submitted");
public static final OrderStatus CANCELLED = new OrderStatus("CANCELLED", "Cancelled");
public static OrderStatus getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public OrderStatus() {
//do nothing
}
public OrderStatus(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OrderStatus other = (OrderStatus) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_type_OrderStatus.java
|
834 |
@SuppressWarnings("deprecation")
public class CreateOfferUtility {
private OfferDao offerDao;
private OfferCodeDao offerCodeDao;
private OfferService offerService;
public CreateOfferUtility(OfferDao offerDao, OfferCodeDao offerCodeDao, OfferService offerService) {
this.offerDao = offerDao;
this.offerCodeDao = offerCodeDao;
this.offerService = offerService;
}
public OfferCode createOfferCode(String offerName, OfferType offerType, OfferDiscountType discountType, double value, String customerRule, String orderRule, boolean stackable, boolean combinable, int priority) {
return createOfferCode("NONAME", offerName, offerType, discountType, value, customerRule, orderRule, stackable, combinable, priority);
}
public OfferCode createOfferCode(String offerCodeName, String offerName, OfferType offerType, OfferDiscountType discountType, double value, String customerRule, String orderRule, boolean stackable, boolean combinable, int priority) {
OfferCode offerCode = offerCodeDao.create();
Offer offer = createOffer(offerName, offerType, discountType, value, customerRule, orderRule, stackable, combinable, priority);
offerCode.setOffer(offer);
offerCode.setOfferCode(offerCodeName);
offerCode = offerService.saveOfferCode(offerCode);
return offerCode;
}
public Offer createOffer(String offerName, OfferType offerType, OfferDiscountType discountType, double value, String customerRule, String orderRule, boolean stackable, boolean combinable, int priority) {
Offer offer = offerDao.create();
offer.setName(offerName);
offer.setStartDate(SystemTime.asDate());
Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, -1);
offer.setStartDate(calendar.getTime());
calendar.add(Calendar.DATE, 2);
offer.setEndDate(calendar.getTime());
offer.setType(offerType);
offer.setDiscountType(discountType);
offer.setValue(BigDecimal.valueOf(value));
offer.setDeliveryType(OfferDeliveryType.CODE);
offer.setStackable(stackable);
offer.setAppliesToOrderRules(orderRule);
offer.setAppliesToCustomerRules(customerRule);
offer.setCombinableWithOtherOffers(combinable);
offer.setPriority(priority);
offer = offerService.save(offer);
offer.setMaxUses(50);
return offer;
}
public Offer updateOfferCodeMaxCustomerUses(OfferCode code, Long maxUses) {
code.getOffer().setMaxUsesPerCustomer(maxUses);
return offerService.save(code.getOffer());
}
}
| 1no label
|
integration_src_test_java_org_broadleafcommerce_core_offer_service_CreateOfferUtility.java
|
5,403 |
public static abstract class Bytes extends FieldDataSource {
public static abstract class WithOrdinals extends Bytes {
public abstract BytesValues.WithOrdinals bytesValues();
public static class FieldData extends WithOrdinals implements ReaderContextAware {
protected boolean needsHashes;
protected final IndexFieldData.WithOrdinals<?> indexFieldData;
protected final MetaData metaData;
protected AtomicFieldData.WithOrdinals<?> atomicFieldData;
private BytesValues.WithOrdinals bytesValues;
public FieldData(IndexFieldData.WithOrdinals<?> indexFieldData, MetaData metaData) {
this.indexFieldData = indexFieldData;
this.metaData = metaData;
needsHashes = false;
}
@Override
public MetaData metaData() {
return metaData;
}
public final void setNeedsHashes(boolean needsHashes) {
this.needsHashes = needsHashes;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
atomicFieldData = indexFieldData.load(reader);
if (bytesValues != null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
}
@Override
public BytesValues.WithOrdinals bytesValues() {
if (bytesValues == null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
return bytesValues;
}
}
}
public static class FieldData extends Bytes implements ReaderContextAware {
protected boolean needsHashes;
protected final IndexFieldData<?> indexFieldData;
protected final MetaData metaData;
protected AtomicFieldData<?> atomicFieldData;
private BytesValues bytesValues;
public FieldData(IndexFieldData<?> indexFieldData, MetaData metaData) {
this.indexFieldData = indexFieldData;
this.metaData = metaData;
needsHashes = false;
}
@Override
public MetaData metaData() {
return metaData;
}
public final void setNeedsHashes(boolean needsHashes) {
this.needsHashes = needsHashes;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
atomicFieldData = indexFieldData.load(reader);
if (bytesValues != null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = atomicFieldData.getBytesValues(needsHashes);
}
return bytesValues;
}
}
public static class Script extends Bytes {
private final ScriptBytesValues values;
public Script(SearchScript script) {
values = new ScriptBytesValues(script);
}
@Override
public MetaData metaData() {
return MetaData.UNKNOWN;
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
return values;
}
}
public static class SortedAndUnique extends Bytes implements ReaderContextAware {
private final FieldDataSource delegate;
private final MetaData metaData;
private BytesValues bytesValues;
public SortedAndUnique(FieldDataSource delegate) {
this.delegate = delegate;
this.metaData = MetaData.builder(delegate.metaData()).uniqueness(MetaData.Uniqueness.UNIQUE).build();
}
@Override
public MetaData metaData() {
return metaData;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
bytesValues = null; // order may change per-segment -> reset
}
@Override
public org.elasticsearch.index.fielddata.BytesValues bytesValues() {
if (bytesValues == null) {
bytesValues = delegate.bytesValues();
if (bytesValues.isMultiValued() &&
(!delegate.metaData().uniqueness.unique() || bytesValues.getOrder() != Order.BYTES)) {
bytesValues = new SortedUniqueBytesValues(bytesValues);
}
}
return bytesValues;
}
static class SortedUniqueBytesValues extends FilterBytesValues {
final BytesRef spare;
int[] sortedIds;
final BytesRefHash bytes;
int numUniqueValues;
int pos = Integer.MAX_VALUE;
public SortedUniqueBytesValues(BytesValues delegate) {
super(delegate);
bytes = new BytesRefHash();
spare = new BytesRef();
}
@Override
public int setDocument(int docId) {
final int numValues = super.setDocument(docId);
if (numValues == 0) {
sortedIds = null;
return 0;
}
bytes.clear();
bytes.reinit();
for (int i = 0; i < numValues; ++i) {
bytes.add(super.nextValue(), super.currentValueHash());
}
numUniqueValues = bytes.size();
sortedIds = bytes.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
pos = 0;
return numUniqueValues;
}
@Override
public BytesRef nextValue() {
bytes.get(sortedIds[pos++], spare);
return spare;
}
@Override
public int currentValueHash() {
return spare.hashCode();
}
@Override
public Order getOrder() {
return Order.BYTES;
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_support_FieldDataSource.java
|
1,596 |
public class MapStructure implements Serializable, PersistencePerspectiveItem {
private static final long serialVersionUID = 1L;
private String keyClassName;
private String mapKeyValueProperty;
private String keyPropertyName;
private String keyPropertyFriendlyName;
private String valueClassName;
private String mapProperty;
private Boolean deleteValueEntity = Boolean.FALSE;
private String manyToField;
private Boolean mutable = true;
public MapStructure() {
//do nothing - support serialization requirements
}
public MapStructure(String keyClassName, String keyPropertyName, String keyPropertyFriendlyName, String valueClassName,
String mapProperty, Boolean deleteValueEntity, String mapKeyValueProperty) {
if (!keyClassName.equals(String.class.getName())) {
throw new RuntimeException("keyClass of java.lang.String is currently the only type supported");
}
this.keyClassName = keyClassName;
this.valueClassName = valueClassName;
this.mapProperty = mapProperty;
this.keyPropertyName = keyPropertyName;
this.keyPropertyFriendlyName = keyPropertyFriendlyName;
this.deleteValueEntity = deleteValueEntity;
this.mapKeyValueProperty = mapKeyValueProperty;
}
public String getKeyClassName() {
return keyClassName;
}
public void setKeyClassName(String keyClassName) {
if (!keyClassName.equals(String.class.getName())) {
throw new RuntimeException("keyClass of java.lang.String is currently the only type supported");
}
this.keyClassName = keyClassName;
}
public String getValueClassName() {
return valueClassName;
}
public void setValueClassName(String valueClassName) {
this.valueClassName = valueClassName;
}
public String getMapProperty() {
return mapProperty;
}
public void setMapProperty(String mapProperty) {
this.mapProperty = mapProperty;
}
public String getKeyPropertyName() {
return keyPropertyName;
}
public void setKeyPropertyName(String keyPropertyName) {
this.keyPropertyName = keyPropertyName;
}
public String getKeyPropertyFriendlyName() {
return keyPropertyFriendlyName;
}
public void setKeyPropertyFriendlyName(String keyPropertyFriendlyName) {
this.keyPropertyFriendlyName = keyPropertyFriendlyName;
}
public Boolean getDeleteValueEntity() {
return deleteValueEntity;
}
public void setDeleteValueEntity(Boolean deleteValueEntity) {
this.deleteValueEntity = deleteValueEntity;
}
public String getManyToField() {
return manyToField;
}
public void setManyToField(String manyToField) {
this.manyToField = manyToField;
}
public Boolean getMutable() {
return mutable;
}
public void setMutable(Boolean mutable) {
this.mutable = mutable;
}
public String getMapKeyValueProperty() {
return mapKeyValueProperty;
}
public void setMapKeyValueProperty(String mapKeyValueProperty) {
this.mapKeyValueProperty = mapKeyValueProperty;
}
public void accept(PersistencePerspectiveItemVisitor visitor) {
visitor.visit(this);
}
@Override
public PersistencePerspectiveItem clonePersistencePerspectiveItem() {
MapStructure mapStructure = new MapStructure();
mapStructure.keyClassName = keyClassName;
mapStructure.keyPropertyName = keyPropertyName;
mapStructure.keyPropertyFriendlyName = keyPropertyFriendlyName;
mapStructure.valueClassName = valueClassName;
mapStructure.mapProperty = mapProperty;
mapStructure.deleteValueEntity = deleteValueEntity;
mapStructure.manyToField = manyToField;
mapStructure.mutable = mutable;
mapStructure.mapKeyValueProperty = mapKeyValueProperty;
return mapStructure;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof MapStructure)) return false;
MapStructure that = (MapStructure) o;
if (deleteValueEntity != null ? !deleteValueEntity.equals(that.deleteValueEntity) : that.deleteValueEntity != null)
return false;
if (mapKeyValueProperty != null ? !mapKeyValueProperty.equals(that.mapKeyValueProperty) : that.mapKeyValueProperty != null)
return false;
if (keyClassName != null ? !keyClassName.equals(that.keyClassName) : that.keyClassName != null) return false;
if (keyPropertyFriendlyName != null ? !keyPropertyFriendlyName.equals(that.keyPropertyFriendlyName) : that.keyPropertyFriendlyName != null)
return false;
if (keyPropertyName != null ? !keyPropertyName.equals(that.keyPropertyName) : that.keyPropertyName != null)
return false;
if (mapProperty != null ? !mapProperty.equals(that.mapProperty) : that.mapProperty != null) return false;
if (valueClassName != null ? !valueClassName.equals(that.valueClassName) : that.valueClassName != null)
return false;
if (manyToField != null ? !manyToField.equals(that.manyToField) : that.manyToField != null) return false;
if (mutable != null ? !mutable.equals(that.mutable) : that.mutable != null) return false;
return true;
}
@Override
public int hashCode() {
int result = keyClassName != null ? keyClassName.hashCode() : 0;
result = 31 * result + (keyPropertyName != null ? keyPropertyName.hashCode() : 0);
result = 31 * result + (keyPropertyFriendlyName != null ? keyPropertyFriendlyName.hashCode() : 0);
result = 31 * result + (mapKeyValueProperty != null ? mapKeyValueProperty.hashCode() : 0);
result = 31 * result + (valueClassName != null ? valueClassName.hashCode() : 0);
result = 31 * result + (mapProperty != null ? mapProperty.hashCode() : 0);
result = 31 * result + (deleteValueEntity != null ? deleteValueEntity.hashCode() : 0);
result = 31 * result + (manyToField != null ? manyToField.hashCode() : 0);
result = 31 * result + (mutable != null ? mutable.hashCode() : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_MapStructure.java
|
2,007 |
@Service("blCustomerAddressService")
public class CustomerAddressServiceImpl implements CustomerAddressService {
@Resource(name="blCustomerAddressDao")
protected CustomerAddressDao customerAddressDao;
public CustomerAddress saveCustomerAddress(CustomerAddress customerAddress) {
// if parameter address is set as default, unset all other default addresses
List<CustomerAddress> activeCustomerAddresses = readActiveCustomerAddressesByCustomerId(customerAddress.getCustomer().getId());
if (activeCustomerAddresses != null && activeCustomerAddresses.isEmpty()) {
customerAddress.getAddress().setDefault(true);
} else {
if (customerAddress.getAddress().isDefault()) {
for (CustomerAddress activeCustomerAddress : activeCustomerAddresses) {
if (activeCustomerAddress.getId() != customerAddress.getId() && activeCustomerAddress.getAddress().isDefault()) {
activeCustomerAddress.getAddress().setDefault(false);
customerAddressDao.save(activeCustomerAddress);
}
}
}
}
return customerAddressDao.save(customerAddress);
}
public List<CustomerAddress> readActiveCustomerAddressesByCustomerId(Long customerId) {
return customerAddressDao.readActiveCustomerAddressesByCustomerId(customerId);
}
public CustomerAddress readCustomerAddressById(Long customerAddressId) {
return customerAddressDao.readCustomerAddressById(customerAddressId);
}
public void makeCustomerAddressDefault(Long customerAddressId, Long customerId) {
customerAddressDao.makeCustomerAddressDefault(customerAddressId, customerId);
}
public void deleteCustomerAddressById(Long customerAddressId){
customerAddressDao.deleteCustomerAddressById(customerAddressId);
}
public CustomerAddress findDefaultCustomerAddress(Long customerId) {
return customerAddressDao.findDefaultCustomerAddress(customerId);
}
public CustomerAddress create() {
return customerAddressDao.create();
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_service_CustomerAddressServiceImpl.java
|
394 |
public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest<ClusterSearchShardsRequest> {
private String[] indices;
@Nullable
private String routing;
@Nullable
private String preference;
private String[] types = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.lenient();
public ClusterSearchShardsRequest() {
}
public ClusterSearchShardsRequest(String... indices) {
indices(indices);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
/**
* Sets the indices the search will be executed on.
*/
public ClusterSearchShardsRequest indices(String... indices) {
if (indices == null) {
throw new ElasticsearchIllegalArgumentException("indices must not be null");
} else {
for (int i = 0; i < indices.length; i++) {
if (indices[i] == null) {
throw new ElasticsearchIllegalArgumentException("indices[" + i + "] must not be null");
}
}
}
this.indices = indices;
return this;
}
/**
* The indices
*/
public String[] indices() {
return indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public ClusterSearchShardsRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public String[] types() {
return types;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public ClusterSearchShardsRequest types(String... types) {
this.types = types;
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public String routing() {
return this.routing;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public ClusterSearchShardsRequest routing(String routing) {
this.routing = routing;
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public ClusterSearchShardsRequest routing(String... routings) {
this.routing = Strings.arrayToCommaDelimitedString(routings);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public ClusterSearchShardsRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = new String[in.readVInt()];
for (int i = 0; i < indices.length; i++) {
indices[i] = in.readString();
}
routing = in.readOptionalString();
preference = in.readOptionalString();
types = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
readLocal(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(indices.length);
for (String index : indices) {
out.writeString(index);
}
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeStringArray(types);
indicesOptions.writeIndicesOptions(out);
writeLocal(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_shards_ClusterSearchShardsRequest.java
|
5,440 |
public class ScriptDoubleValues extends DoubleValues implements ScriptValues {
final SearchScript script;
private Object value;
private double[] values = new double[4];
private int valueCount;
private int valueOffset;
public ScriptDoubleValues(SearchScript script) {
super(true); // assume multi-valued
this.script = script;
}
@Override
public SearchScript script() {
return script;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
script.setNextDocId(docId);
value = script.run();
if (value == null) {
valueCount = 0;
}
else if (value instanceof Number) {
valueCount = 1;
values[0] = ((Number) value).doubleValue();
}
else if (value.getClass().isArray()) {
valueCount = Array.getLength(value);
values = ArrayUtil.grow(values, valueCount);
for (int i = 0; i < valueCount; ++i) {
values[i] = ((Number) Array.get(value, i)).doubleValue();
}
}
else if (value instanceof Collection) {
valueCount = ((Collection<?>) value).size();
int i = 0;
for (Iterator<?> it = ((Collection<?>) value).iterator(); it.hasNext(); ++i) {
values[i] = ((Number) it.next()).doubleValue();
}
assert i == valueCount;
}
else {
throw new AggregationExecutionException("Unsupported script value [" + value + "]");
}
valueOffset = 0;
return valueCount;
}
@Override
public double nextValue() {
assert valueOffset < valueCount;
return values[valueOffset++];
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_support_numeric_ScriptDoubleValues.java
|
28 |
final class SubMapKeyIterator extends SubMapIterator<K> {
SubMapKeyIterator(final OMVRBTreeEntryPosition<K, V> first, final OMVRBTreeEntryPosition<K, V> fence) {
super(first, fence);
}
public K next() {
return nextEntry().getKey();
}
public void remove() {
removeAscending();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
372 |
public class TransportGetRepositoriesAction extends TransportMasterNodeReadOperationAction<GetRepositoriesRequest, GetRepositoriesResponse> {
@Inject
public TransportGetRepositoriesAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected String transportAction() {
return GetRepositoriesAction.NAME;
}
@Override
protected GetRepositoriesRequest newRequest() {
return new GetRepositoriesRequest();
}
@Override
protected GetRepositoriesResponse newResponse() {
return new GetRepositoriesResponse();
}
@Override
protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
}
@Override
protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener<GetRepositoriesResponse> listener) throws ElasticsearchException {
MetaData metaData = state.metaData();
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) {
if (repositories != null) {
listener.onResponse(new GetRepositoriesResponse(repositories.repositories()));
} else {
listener.onResponse(new GetRepositoriesResponse(ImmutableList.<RepositoryMetaData>of()));
}
} else {
if (repositories != null) {
ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
for (String repository : request.repositories()) {
RepositoryMetaData repositoryMetaData = repositories.repository(repository);
if (repositoryMetaData == null) {
listener.onFailure(new RepositoryMissingException(repository));
return;
}
repositoryListBuilder.add(repositoryMetaData);
}
listener.onResponse(new GetRepositoriesResponse(repositoryListBuilder.build()));
} else {
listener.onFailure(new RepositoryMissingException(request.repositories()[0]));
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_get_TransportGetRepositoriesAction.java
|
4,103 |
public class IncludeNestedDocsQuery extends Query {
private final Filter parentFilter;
private final Query parentQuery;
// If we are rewritten, this is the original childQuery we
// were passed; we use this for .equals() and
// .hashCode(). This makes rewritten query equal the
// original, so that user does not have to .rewrite() their
// query before searching:
private final Query origParentQuery;
public IncludeNestedDocsQuery(Query parentQuery, Filter parentFilter) {
this.origParentQuery = parentQuery;
this.parentQuery = parentQuery;
this.parentFilter = parentFilter;
}
// For rewritting
IncludeNestedDocsQuery(Query rewrite, Query originalQuery, IncludeNestedDocsQuery previousInstance) {
this.origParentQuery = originalQuery;
this.parentQuery = rewrite;
this.parentFilter = previousInstance.parentFilter;
setBoost(previousInstance.getBoost());
}
// For cloning
IncludeNestedDocsQuery(Query originalQuery, IncludeNestedDocsQuery previousInstance) {
this.origParentQuery = originalQuery;
this.parentQuery = originalQuery;
this.parentFilter = previousInstance.parentFilter;
}
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new IncludeNestedDocsWeight(parentQuery, parentQuery.createWeight(searcher), parentFilter);
}
static class IncludeNestedDocsWeight extends Weight {
private final Query parentQuery;
private final Weight parentWeight;
private final Filter parentsFilter;
IncludeNestedDocsWeight(Query parentQuery, Weight parentWeight, Filter parentsFilter) {
this.parentQuery = parentQuery;
this.parentWeight = parentWeight;
this.parentsFilter = parentsFilter;
}
@Override
public Query getQuery() {
return parentQuery;
}
@Override
public void normalize(float norm, float topLevelBoost) {
parentWeight.normalize(norm, topLevelBoost);
}
@Override
public float getValueForNormalization() throws IOException {
return parentWeight.getValueForNormalization(); // this query is never boosted so just delegate...
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
final Scorer parentScorer = parentWeight.scorer(context, true, false, acceptDocs);
// no matches
if (parentScorer == null) {
return null;
}
DocIdSet parents = parentsFilter.getDocIdSet(context, acceptDocs);
if (parents == null) {
// No matches
return null;
}
if (!(parents instanceof FixedBitSet)) {
throw new IllegalStateException("parentFilter must return FixedBitSet; got " + parents);
}
int firstParentDoc = parentScorer.nextDoc();
if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
// No matches
return null;
}
return new IncludeNestedDocsScorer(this, parentScorer, (FixedBitSet) parents, firstParentDoc);
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return null; //Query is used internally and not by users, so explain can be empty
}
@Override
public boolean scoresDocsOutOfOrder() {
return false;
}
}
static class IncludeNestedDocsScorer extends Scorer {
final Scorer parentScorer;
final FixedBitSet parentBits;
int currentChildPointer = -1;
int currentParentPointer = -1;
int currentDoc = -1;
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, FixedBitSet parentBits, int currentParentPointer) {
super(weight);
this.parentScorer = parentScorer;
this.parentBits = parentBits;
this.currentParentPointer = currentParentPointer;
if (currentParentPointer == 0) {
currentChildPointer = 0;
} else {
this.currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, we delete from doc 0
currentChildPointer = 0;
} else {
currentChildPointer++; // we only care about children
}
}
currentDoc = currentChildPointer;
}
@Override
public Collection<ChildScorer> getChildren() {
return parentScorer.getChildren();
}
public int nextDoc() throws IOException {
if (currentParentPointer == NO_MORE_DOCS) {
return (currentDoc = NO_MORE_DOCS);
}
if (currentChildPointer == currentParentPointer) {
// we need to return the current parent as well, but prepare to return
// the next set of children
currentDoc = currentParentPointer;
currentParentPointer = parentScorer.nextDoc();
if (currentParentPointer != NO_MORE_DOCS) {
currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, just set the child to the current parent
currentChildPointer = currentParentPointer;
} else {
currentChildPointer++; // we only care about children
}
}
} else {
currentDoc = currentChildPointer++;
}
assert currentDoc != -1;
return currentDoc;
}
public int advance(int target) throws IOException {
if (target == NO_MORE_DOCS) {
return (currentDoc = NO_MORE_DOCS);
}
if (target == 0) {
return nextDoc();
}
if (target < currentParentPointer) {
currentDoc = currentParentPointer = parentScorer.advance(target);
if (currentParentPointer == NO_MORE_DOCS) {
return (currentDoc = NO_MORE_DOCS);
}
if (currentParentPointer == 0) {
currentChildPointer = 0;
} else {
currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, just set the child to 0 to delete all up to the parent
currentChildPointer = 0;
} else {
currentChildPointer++; // we only care about children
}
}
} else {
currentDoc = currentChildPointer++;
}
return currentDoc;
}
public float score() throws IOException {
return parentScorer.score();
}
public int freq() throws IOException {
return parentScorer.freq();
}
public int docID() {
return currentDoc;
}
@Override
public long cost() {
return parentScorer.cost();
}
}
@Override
public void extractTerms(Set<Term> terms) {
parentQuery.extractTerms(terms);
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
final Query parentRewrite = parentQuery.rewrite(reader);
if (parentRewrite != parentQuery) {
return new IncludeNestedDocsQuery(parentRewrite, parentQuery, this);
} else {
return this;
}
}
@Override
public String toString(String field) {
return "IncludeNestedDocsQuery (" + parentQuery.toString() + ")";
}
@Override
public boolean equals(Object _other) {
if (_other instanceof IncludeNestedDocsQuery) {
final IncludeNestedDocsQuery other = (IncludeNestedDocsQuery) _other;
return origParentQuery.equals(other.origParentQuery) && parentFilter.equals(other.parentFilter);
} else {
return false;
}
}
@Override
public int hashCode() {
final int prime = 31;
int hash = 1;
hash = prime * hash + origParentQuery.hashCode();
hash = prime * hash + parentFilter.hashCode();
return hash;
}
@Override
public Query clone() {
Query clonedQuery = origParentQuery.clone();
return new IncludeNestedDocsQuery(clonedQuery, this);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_nested_IncludeNestedDocsQuery.java
|
4 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class MemcacheTest {
final static Config config = new XmlConfigBuilder().build();
@BeforeClass
@AfterClass
public static void killAllHazelcastInstances() throws IOException {
Hazelcast.shutdownAll();
}
public MemcachedClient getMemcacheClient(HazelcastInstance instance) throws IOException {
final LinkedList<InetSocketAddress> addresses = new LinkedList<InetSocketAddress>();
addresses.add(instance.getCluster().getLocalMember().getInetSocketAddress());
final ConnectionFactory factory = new ConnectionFactoryBuilder().setOpTimeout(60 * 60 * 60).setDaemon(true).setFailureMode(FailureMode.Retry).build();
return new MemcachedClient(factory, addresses);
}
@Test
public void testMemcacheSimple() throws IOException, ExecutionException, InterruptedException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
MemcachedClient client = getMemcacheClient(instance);
try {
for (int i = 0; i < 100; i++) {
final OperationFuture<Boolean> future = client.set(String.valueOf(i), 0, i);
assertEquals(Boolean.TRUE, future.get());
}
for (int i = 0; i < 100; i++) {
assertEquals(i, client.get(String.valueOf(i)));
}
for (int i = 0; i < 100; i++) {
final OperationFuture<Boolean> future = client.add(String.valueOf(i), 0, i * 100);
assertEquals(Boolean.FALSE, future.get());
}
for (int i = 0; i < 100; i++) {
assertEquals(i, client.get(String.valueOf(i)));
}
for (int i = 100; i < 200; i++) {
final OperationFuture<Boolean> future = client.add(String.valueOf(i), 0, i);
assertEquals(Boolean.TRUE, future.get());
}
for (int i = 0; i < 200; i++) {
assertEquals(i, client.get(String.valueOf(i)));
}
for (int i = 0; i < 200; i++) {
final OperationFuture<Boolean> future = client.replace(String.valueOf(i), 0, i * 10);
assertEquals(Boolean.TRUE, future.get());
}
for (int i = 0; i < 200; i++) {
assertEquals(i * 10, client.get(String.valueOf(i)));
}
for (int i = 200; i < 400; i++) {
final OperationFuture<Boolean> future = client.replace(String.valueOf(i), 0, i);
assertEquals(Boolean.FALSE, future.get());
}
for (int i = 200; i < 400; i++) {
assertEquals(null, client.get(String.valueOf(i)));
}
for (int i = 100; i < 200; i++) {
final OperationFuture<Boolean> future = client.delete(String.valueOf(i));
assertEquals(Boolean.TRUE, future.get());
}
for (int i = 100; i < 200; i++) {
assertEquals(null, client.get(String.valueOf(100)));
}
for (int i = 100; i < 200; i++) {
final OperationFuture<Boolean> future = client.delete(String.valueOf(i));
assertEquals(Boolean.FALSE, future.get());
}
final LinkedList<String> keys = new LinkedList<String>();
for (int i = 0; i < 100; i++) {
keys.add(String.valueOf(i));
}
final Map<String, Object> bulk = client.getBulk(keys);
for (int i = 0; i < 100; i++) {
assertEquals(i * 10, bulk.get(String.valueOf(i)));
}
// STATS
final Map<String, String> stats = client.getStats().get(instance.getCluster().getLocalMember().getInetSocketAddress());
assertEquals("700", stats.get("cmd_set"));
assertEquals("1000", stats.get("cmd_get"));
assertEquals("700", stats.get("get_hits"));
assertEquals("300", stats.get("get_misses"));
assertEquals("100", stats.get("delete_hits"));
assertEquals("100", stats.get("delete_misses"));
} finally {
client.shutdown();
}
}
@Test
public void testMemcacheWithIMap() throws IOException, InterruptedException, ExecutionException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
MemcachedClient client = getMemcacheClient(instance);
final String prefix = "testMemcacheWithIMap:";
try {
final IMap<String, Object> map = instance.getMap("hz_memcache_testMemcacheWithIMap");
for (int i = 0; i < 100; i++) {
map.put(String.valueOf(i), String.valueOf(i));
}
for (int i = 0; i < 100; i++) {
assertEquals(String.valueOf(i), client.get(prefix + String.valueOf(i)));
final OperationFuture<Boolean> future = client.set(prefix + String.valueOf(i), 0, String.valueOf(i * 10));
future.get();
}
for (int i = 0; i < 100; i++) {
final MemcacheEntry memcacheEntry = (MemcacheEntry) map.get(String.valueOf(i));
final MemcacheEntry expected = new MemcacheEntry(prefix + String.valueOf(i), String.valueOf(i * 10).getBytes(), 0);
assertEquals(expected, memcacheEntry);
}
final OperationFuture<Boolean> future = client.delete(prefix);
future.get();
for (int i = 0; i < 100; i++) {
assertEquals(null, client.get(prefix + String.valueOf(i)));
}
} finally {
client.shutdown();
}
}
@Test
public void testIncrementAndDecrement() throws IOException, ExecutionException, InterruptedException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
MemcachedClient client = getMemcacheClient(instance);
try {
for (int i = 0; i < 100; i++) {
final OperationFuture<Boolean> future = client.set(String.valueOf(i), 0, i);
future.get();
}
for (int i = 0; i < 100; i++) {
assertEquals(i * 2, client.incr(String.valueOf(i), i));
}
for (int i = 100; i < 120; i++) {
assertEquals(-1, client.incr(String.valueOf(i), i));
}
for (int i = 0; i < 100; i++) {
assertEquals(i, client.decr(String.valueOf(i), i));
}
for (int i = 100; i < 130; i++) {
assertEquals(-1, client.decr(String.valueOf(i), i));
}
for (int i = 0; i < 100; i++) {
assertEquals(i, client.get(String.valueOf(i)));
}
final Map<String, String> stats = client.getStats().get(instance.getCluster().getLocalMember().getInetSocketAddress());
assertEquals("100", stats.get("cmd_set"));
assertEquals("100", stats.get("cmd_get"));
assertEquals("100", stats.get("incr_hits"));
assertEquals("20", stats.get("incr_misses"));
assertEquals("100", stats.get("decr_hits"));
assertEquals("30", stats.get("decr_misses"));
} finally {
client.shutdown();
}
}
@Test
public void testMemcacheAppendPrepend() throws IOException, ExecutionException, InterruptedException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
MemcachedClient client = getMemcacheClient(instance);
try {
for (int i = 0; i < 100; i++) {
final OperationFuture<Boolean> future = client.set(String.valueOf(i), 0, String.valueOf(i));
future.get();
}
for (int i = 0; i < 100; i++) {
final OperationFuture<Boolean> future = client.append(0, String.valueOf(i), "append");
assertEquals(Boolean.TRUE, future.get());
}
for (int i = 0; i < 100; i++) {
final OperationFuture<Boolean> future = client.prepend(0, String.valueOf(i), "prepend");
assertEquals(Boolean.TRUE, future.get());
}
for (int i = 1; i < 100; i++) {
assertEquals("prepend" + String.valueOf(i) + "append", client.get(String.valueOf(i)));
}
} finally {
client.shutdown();
}
}
@Test
public void testQuit() throws IOException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
MemcachedClient client = getMemcacheClient(instance);
client.shutdown();
}
@Test
public void testMemcacheTTL() throws IOException, ExecutionException, InterruptedException {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
MemcachedClient client = getMemcacheClient(instance);
try {
OperationFuture<Boolean> future = client.set(String.valueOf(0), 3, 10);
future.get();
assertEquals(10, client.get(String.valueOf(0)));
Thread.sleep(6000);
assertEquals(null, client.get(String.valueOf(0)));
} finally {
client.shutdown();
}
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_ascii_MemcacheTest.java
|
3,371 |
final class OperationThread extends Thread {
private final int threadId;
private final boolean isPartitionSpecific;
private final BlockingQueue workQueue;
private final Queue priorityWorkQueue;
public OperationThread(String name, boolean isPartitionSpecific,
int threadId, BlockingQueue workQueue, Queue priorityWorkQueue) {
super(node.threadGroup, name);
setContextClassLoader(node.getConfigClassLoader());
this.isPartitionSpecific = isPartitionSpecific;
this.workQueue = workQueue;
this.priorityWorkQueue = priorityWorkQueue;
this.threadId = threadId;
}
@Override
public void run() {
try {
doRun();
} catch (OutOfMemoryError e) {
onOutOfMemory(e);
} catch (Throwable t) {
logger.severe(t);
}
}
private void doRun() {
for (; ; ) {
Object task;
try {
task = workQueue.take();
} catch (InterruptedException e) {
if (shutdown) {
return;
}
continue;
}
if (shutdown) {
return;
}
processPriorityMessages();
process(task);
}
}
private void process(Object task) {
try {
processor.process(task);
} catch (Exception e) {
logger.severe("Failed to process task: " + task + " on partitionThread:" + getName());
}
}
private void processPriorityMessages() {
for (; ; ) {
Object task = priorityWorkQueue.poll();
if (task == null) {
return;
}
process(task);
}
}
public void awaitTermination(int timeout, TimeUnit unit) throws InterruptedException {
join(unit.toMillis(timeout));
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_BasicOperationScheduler.java
|
81 |
LESS_THAN {
@Override
public boolean isValidValueType(Class<?> clazz) {
Preconditions.checkNotNull(clazz);
return Comparable.class.isAssignableFrom(clazz);
}
@Override
public boolean isValidCondition(Object condition) {
return condition!=null && condition instanceof Comparable;
}
@Override
public boolean evaluate(Object value, Object condition) {
Integer cmp = AttributeUtil.compare(value,condition);
return cmp!=null?cmp<0:false;
}
@Override
public String toString() {
return "<";
}
@Override
public TitanPredicate negate() {
return GREATER_THAN_EQUAL;
}
},
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Cmp.java
|
1,566 |
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean processEdges;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.processEdges = context.getConfiguration().getBoolean(PROCESS_EDGES, true);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
value.startPath();
long edgesProcessed = 0;
if (this.processEdges) {
for (final Edge edge : value.getEdges(Direction.BOTH)) {
((StandardFaunusEdge) edge).clearPaths();
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_PROCESSED, edgesProcessed);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
context.write(NullWritable.get(), value);
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_VerticesMap.java
|
321 |
public class TransportNodesHotThreadsAction extends TransportNodesOperationAction<NodesHotThreadsRequest, NodesHotThreadsResponse, TransportNodesHotThreadsAction.NodeRequest, NodeHotThreads> {
@Inject
public TransportNodesHotThreadsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService) {
super(settings, clusterName, threadPool, clusterService, transportService);
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return NodesHotThreadsAction.NAME;
}
@Override
protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, AtomicReferenceArray responses) {
final List<NodeHotThreads> nodes = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeHotThreads) {
nodes.add((NodeHotThreads) resp);
}
}
return new NodesHotThreadsResponse(clusterName, nodes.toArray(new NodeHotThreads[nodes.size()]));
}
@Override
protected NodesHotThreadsRequest newRequest() {
return new NodesHotThreadsRequest();
}
@Override
protected NodeRequest newNodeRequest() {
return new NodeRequest();
}
@Override
protected NodeRequest newNodeRequest(String nodeId, NodesHotThreadsRequest request) {
return new NodeRequest(nodeId, request);
}
@Override
protected NodeHotThreads newNodeResponse() {
return new NodeHotThreads();
}
@Override
protected NodeHotThreads nodeOperation(NodeRequest request) throws ElasticsearchException {
HotThreads hotThreads = new HotThreads()
.busiestThreads(request.request.threads)
.type(request.request.type)
.interval(request.request.interval)
.threadElementsSnapshotCount(request.request.snapshots);
try {
return new NodeHotThreads(clusterService.localNode(), hotThreads.detect());
} catch (Exception e) {
throw new ElasticsearchException("failed to detect hot threads", e);
}
}
@Override
protected boolean accumulateExceptions() {
return false;
}
static class NodeRequest extends NodeOperationRequest {
NodesHotThreadsRequest request;
NodeRequest() {
}
NodeRequest(String nodeId, NodesHotThreadsRequest request) {
super(request, nodeId);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new NodesHotThreadsRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_hotthreads_TransportNodesHotThreadsAction.java
|
257 |
public class ODefaultCollate extends ODefaultComparator implements OCollate {
public static final String NAME = "default";
public String getName() {
return NAME;
}
public Object transform(final Object obj) {
return obj;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_collate_ODefaultCollate.java
|
1,447 |
public static class Entry {
private final State state;
private final SnapshotId snapshotId;
private final boolean includeGlobalState;
private final ImmutableMap<ShardId, ShardSnapshotStatus> shards;
private final ImmutableList<String> indices;
public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardSnapshotStatus> shards) {
this.state = state;
this.snapshotId = snapshotId;
this.includeGlobalState = includeGlobalState;
this.indices = indices;
if (shards == null) {
this.shards = ImmutableMap.of();
} else {
this.shards = shards;
}
}
public SnapshotId snapshotId() {
return this.snapshotId;
}
public ImmutableMap<ShardId, ShardSnapshotStatus> shards() {
return this.shards;
}
public State state() {
return state;
}
public ImmutableList<String> indices() {
return indices;
}
public boolean includeGlobalState() {
return includeGlobalState;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Entry entry = (Entry) o;
if (includeGlobalState != entry.includeGlobalState) return false;
if (!indices.equals(entry.indices)) return false;
if (!shards.equals(entry.shards)) return false;
if (!snapshotId.equals(entry.snapshotId)) return false;
if (state != entry.state) return false;
return true;
}
@Override
public int hashCode() {
int result = state.hashCode();
result = 31 * result + snapshotId.hashCode();
result = 31 * result + (includeGlobalState ? 1 : 0);
result = 31 * result + shards.hashCode();
result = 31 * result + indices.hashCode();
return result;
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_metadata_SnapshotMetaData.java
|
4,077 |
public class TopChildrenQuery extends Query {
private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator();
private final CacheRecycler cacheRecycler;
private final String parentType;
private final String childType;
private final ScoreType scoreType;
private final int factor;
private final int incrementalFactor;
private final Query originalChildQuery;
// This field will hold the rewritten form of originalChildQuery, so that we can reuse it
private Query rewrittenChildQuery;
private IndexReader rewriteIndexReader;
// Note, the query is expected to already be filtered to only child type docs
public TopChildrenQuery(Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, CacheRecycler cacheRecycler) {
this.originalChildQuery = childQuery;
this.childType = childType;
this.parentType = parentType;
this.scoreType = scoreType;
this.factor = factor;
this.incrementalFactor = incrementalFactor;
this.cacheRecycler = cacheRecycler;
}
// Rewrite invocation logic:
// 1) query_then_fetch (default): Rewrite is execute as part of the createWeight invocation, when search child docs.
// 2) dfs_query_then_fetch:: First rewrite and then createWeight is executed. During query phase rewrite isn't
// executed any more because searchContext#queryRewritten() returns true.
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (rewrittenChildQuery == null) {
rewrittenChildQuery = originalChildQuery.rewrite(reader);
rewriteIndexReader = reader;
}
// We can always return the current instance, and we can do this b/c the child query is executed separately
// before the main query (other scope) in a different IS#search() invocation than the main query.
// In fact we only need override the rewrite method because for the dfs phase, to get also global document
// frequency for the child query.
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenChildQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs = cacheRecycler.hashMap(-1);
SearchContext searchContext = SearchContext.current();
searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
int parentHitsResolved;
int requestedDocs = (searchContext.from() + searchContext.size());
if (requestedDocs <= 0) {
requestedDocs = 1;
}
int numChildDocs = requestedDocs * factor;
Query childQuery;
if (rewrittenChildQuery == null) {
childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
} else {
assert rewriteIndexReader == searcher.getIndexReader();
childQuery = rewrittenChildQuery;
}
IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
indexSearcher.setSimilarity(searcher.getSimilarity());
while (true) {
parentDocs.v().clear();
TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs);
parentHitsResolved = resolveParentDocuments(topChildDocs, searchContext, parentDocs);
// check if we found enough docs, if so, break
if (parentHitsResolved >= requestedDocs) {
break;
}
// if we did not find enough docs, check if it make sense to search further
if (topChildDocs.totalHits <= numChildDocs) {
break;
}
// if not, update numDocs, and search again
numChildDocs *= incrementalFactor;
if (numChildDocs > topChildDocs.totalHits) {
numChildDocs = topChildDocs.totalHits;
}
}
ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentDocs);
searchContext.addReleasable(parentWeight);
return parentWeight;
}
int resolveParentDocuments(TopDocs topDocs, SearchContext context, Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) {
int parentHitsResolved = 0;
Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>> parentDocsPerReader = cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int readerIndex = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
AtomicReaderContext subContext = context.searcher().getIndexReader().leaves().get(readerIndex);
int subDoc = scoreDoc.doc - subContext.docBase;
// find the parent id
HashedBytesArray parentId = context.idCache().reader(subContext.reader()).parentIdByDoc(parentType, subDoc);
if (parentId == null) {
// no parent found
continue;
}
// now go over and find the parent doc Id and reader tuple
for (AtomicReaderContext atomicReaderContext : context.searcher().getIndexReader().leaves()) {
AtomicReader indexReader = atomicReaderContext.reader();
int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
Bits liveDocs = indexReader.getLiveDocs();
if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
// we found a match, add it and break
Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs = parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
if (readerParentDocs == null) {
readerParentDocs = cacheRecycler.intObjectMap(indexReader.maxDoc());
parentDocsPerReader.v().put(indexReader.getCoreCacheKey(), readerParentDocs);
}
ParentDoc parentDoc = readerParentDocs.v().get(parentDocId);
if (parentDoc == null) {
parentHitsResolved++; // we have a hit on a parent
parentDoc = new ParentDoc();
parentDoc.docId = parentDocId;
parentDoc.count = 1;
parentDoc.maxScore = scoreDoc.score;
parentDoc.sumScores = scoreDoc.score;
readerParentDocs.v().put(parentDocId, parentDoc);
} else {
parentDoc.count++;
parentDoc.sumScores += scoreDoc.score;
if (scoreDoc.score > parentDoc.maxScore) {
parentDoc.maxScore = scoreDoc.score;
}
}
}
}
}
boolean[] states = parentDocsPerReader.v().allocated;
Object[] keys = parentDocsPerReader.v().keys;
Object[] values = parentDocsPerReader.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
Recycler.V<IntObjectOpenHashMap<ParentDoc>> value = (Recycler.V<IntObjectOpenHashMap<ParentDoc>>) values[i];
ParentDoc[] _parentDocs = value.v().values().toArray(ParentDoc.class);
Arrays.sort(_parentDocs, PARENT_DOC_COMP);
parentDocs.v().put(keys[i], _parentDocs);
Releasables.release(value);
}
}
Releasables.release(parentDocsPerReader);
return parentHitsResolved;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != this.getClass()) {
return false;
}
TopChildrenQuery that = (TopChildrenQuery) obj;
if (!originalChildQuery.equals(that.originalChildQuery)) {
return false;
}
if (!childType.equals(that.childType)) {
return false;
}
if (incrementalFactor != that.incrementalFactor) {
return false;
}
if (getBoost() != that.getBoost()) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = originalChildQuery.hashCode();
result = 31 * result + parentType.hashCode();
result = 31 * result + incrementalFactor;
result = 31 * result + Float.floatToIntBits(getBoost());
return result;
}
public String toString(String field) {
StringBuilder sb = new StringBuilder();
sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery.toString(field)).append(')');
sb.append(ToStringUtils.boost(getBoost()));
return sb.toString();
}
private class ParentWeight extends Weight implements Releasable {
private final Weight queryWeight;
private final Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs;
public ParentWeight(Weight queryWeight, Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) throws IOException {
this.queryWeight = queryWeight;
this.parentDocs = parentDocs;
}
public Query getQuery() {
return TopChildrenQuery.this;
}
@Override
public float getValueForNormalization() throws IOException {
float sum = queryWeight.getValueForNormalization();
sum *= getBoost() * getBoost();
return sum;
}
@Override
public void normalize(float norm, float topLevelBoost) {
// Nothing to normalize
}
@Override
public boolean release() throws ElasticsearchException {
Releasables.release(parentDocs);
return true;
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
ParentDoc[] readerParentDocs = parentDocs.v().get(context.reader().getCoreCacheKey());
if (readerParentDocs != null) {
if (scoreType == ScoreType.MAX) {
return new ParentScorer(this, readerParentDocs) {
@Override
public float score() throws IOException {
assert doc.docId >= 0 || doc.docId < NO_MORE_DOCS;
return doc.maxScore;
}
};
} else if (scoreType == ScoreType.AVG) {
return new ParentScorer(this, readerParentDocs) {
@Override
public float score() throws IOException {
assert doc.docId >= 0 || doc.docId < NO_MORE_DOCS;
return doc.sumScores / doc.count;
}
};
} else if (scoreType == ScoreType.SUM) {
return new ParentScorer(this, readerParentDocs) {
@Override
public float score() throws IOException {
assert doc.docId >= 0 || doc.docId < NO_MORE_DOCS;
return doc.sumScores;
}
};
}
throw new ElasticsearchIllegalStateException("No support for score type [" + scoreType + "]");
}
return new EmptyScorer(this);
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");
}
}
private static abstract class ParentScorer extends Scorer {
private final ParentDoc spare = new ParentDoc();
protected final ParentDoc[] docs;
protected ParentDoc doc = spare;
private int index = -1;
ParentScorer(ParentWeight weight, ParentDoc[] docs) throws IOException {
super(weight);
this.docs = docs;
spare.docId = -1;
spare.count = -1;
}
@Override
public final int docID() {
return doc.docId;
}
@Override
public final int advance(int target) throws IOException {
return slowAdvance(target);
}
@Override
public final int nextDoc() throws IOException {
if (++index >= docs.length) {
doc = spare;
doc.count = 0;
return (doc.docId = NO_MORE_DOCS);
}
return (doc = docs[index]).docId;
}
@Override
public final int freq() throws IOException {
return doc.count; // The number of matches in the child doc, which is propagated to parent
}
@Override
public final long cost() {
return docs.length;
}
}
private static class ParentDocComparator implements Comparator<ParentDoc> {
@Override
public int compare(ParentDoc o1, ParentDoc o2) {
return o1.docId - o2.docId;
}
}
private static class ParentDoc {
public int docId;
public int count;
public float maxScore = Float.NaN;
public float sumScores = 0;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_child_TopChildrenQuery.java
|
2,613 |
private static class DefaultStringCreator implements UTFEncoderDecoder.StringCreator {
@Override
public String buildString(char[] chars) {
return new String(chars);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_UTFEncoderDecoder.java
|
4,198 |
public static class FileInfo {
private final String name;
private final String physicalName;
private final long length;
private final String checksum;
private final ByteSizeValue partSize;
private final long partBytes;
private final long numberOfParts;
/**
* Constructs a new instance of file info
*
* @param name file name as stored in the blob store
* @param physicalName original file name
* @param length total length of the file
* @param partSize size of the single chunk
* @param checksum checksum for the file
*/
public FileInfo(String name, String physicalName, long length, ByteSizeValue partSize, String checksum) {
this.name = name;
this.physicalName = physicalName;
this.length = length;
this.checksum = checksum;
long partBytes = Long.MAX_VALUE;
if (partSize != null) {
partBytes = partSize.bytes();
}
long totalLength = length;
long numberOfParts = totalLength / partBytes;
if (totalLength % partBytes > 0) {
numberOfParts++;
}
if (numberOfParts == 0) {
numberOfParts++;
}
this.numberOfParts = numberOfParts;
this.partSize = partSize;
this.partBytes = partBytes;
}
/**
* Returns the base file name
*
* @return file name
*/
public String name() {
return name;
}
/**
* Returns part name if file is stored as multiple parts
*
* @param part part number
* @return part name
*/
public String partName(long part) {
if (numberOfParts > 1) {
return name + ".part" + part;
} else {
return name;
}
}
/**
* Returns base file name from part name
*
* @param blobName part name
* @return base file name
*/
public static String canonicalName(String blobName) {
if (blobName.contains(".part")) {
return blobName.substring(0, blobName.indexOf(".part"));
}
return blobName;
}
/**
* Returns original file name
*
* @return original file name
*/
public String physicalName() {
return this.physicalName;
}
/**
* File length
*
* @return file length
*/
public long length() {
return length;
}
/**
* Returns part size
*
* @return part size
*/
public ByteSizeValue partSize() {
return partSize;
}
/**
* Return maximum number of bytes in a part
*
* @return maximum number of bytes in a part
*/
public long partBytes() {
return partBytes;
}
/**
* Returns number of parts
*
* @return number of parts
*/
public long numberOfParts() {
return numberOfParts;
}
/**
* Returns file md5 checksum provided by {@link org.elasticsearch.index.store.Store}
*
* @return file checksum
*/
@Nullable
public String checksum() {
return checksum;
}
/**
* Checks if a file in a store is the same file
*
* @param md file in a store
* @return true if file in a store this this file have the same checksum and length
*/
public boolean isSame(StoreFileMetaData md) {
if (checksum == null || md.checksum() == null) {
return false;
}
return length == md.length() && checksum.equals(md.checksum());
}
static final class Fields {
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString PHYSICAL_NAME = new XContentBuilderString("physical_name");
static final XContentBuilderString LENGTH = new XContentBuilderString("length");
static final XContentBuilderString CHECKSUM = new XContentBuilderString("checksum");
static final XContentBuilderString PART_SIZE = new XContentBuilderString("part_size");
}
/**
* Serializes file info into JSON
*
* @param file file info
* @param builder XContent builder
* @param params parameters
* @throws IOException
*/
public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME, file.name);
builder.field(Fields.PHYSICAL_NAME, file.physicalName);
builder.field(Fields.LENGTH, file.length);
if (file.checksum != null) {
builder.field(Fields.CHECKSUM, file.checksum);
}
if (file.partSize != null) {
builder.field(Fields.PART_SIZE, file.partSize.bytes());
}
builder.endObject();
}
/**
* Parses JSON that represents file info
*
* @param parser parser
* @return file info
* @throws IOException
*/
public static FileInfo fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
String name = null;
String physicalName = null;
long length = -1;
String checksum = null;
ByteSizeValue partSize = null;
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if ("name".equals(currentFieldName)) {
name = parser.text();
} else if ("physical_name".equals(currentFieldName)) {
physicalName = parser.text();
} else if ("length".equals(currentFieldName)) {
length = parser.longValue();
} else if ("checksum".equals(currentFieldName)) {
checksum = parser.text();
} else if ("part_size".equals(currentFieldName)) {
partSize = new ByteSizeValue(parser.longValue());
} else {
throw new ElasticsearchParseException("unknown parameter [" + currentFieldName + "]");
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
}
}
// TODO: Verify???
return new FileInfo(name, physicalName, length, partSize, checksum);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_snapshots_blobstore_BlobStoreIndexShardSnapshot.java
|
18 |
public static class TransactionCountPruneStrategy extends AbstractPruneStrategy
{
private final int maxTransactionCount;
public TransactionCountPruneStrategy( FileSystemAbstraction fileSystem, int maxTransactionCount )
{
super( fileSystem );
this.maxTransactionCount = maxTransactionCount;
}
@Override
protected Threshold newThreshold()
{
return new Threshold()
{
private Long highest;
@Override
public boolean reached( File file, long version, LogLoader source )
{
// Here we know that the log version exists (checked in AbstractPruneStrategy#prune)
long tx = source.getFirstCommittedTxId( version );
if ( highest == null )
{
highest = source.getLastCommittedTxId();
return false;
}
return highest-tx >= maxTransactionCount;
}
};
}
}
| 1no label
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogPruneStrategies.java
|
33 |
static final class ParameterInfo
extends InvocationCompletionProposal {
private ParameterInfo(int offset, Declaration dec,
ProducedReference producedReference,
Scope scope, CeylonParseController cpc,
boolean namedInvocation) {
super(offset, "", "show parameters", "", dec,
producedReference, scope, cpc, true,
true, namedInvocation, false, null);
}
@Override
boolean isParameterInfo() {
return true;
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public void apply(IDocument document) {}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java
|
1,571 |
@Embeddable
public class AdminAuditable implements Serializable {
private static final long serialVersionUID = 1L;
@Column(name = "DATE_CREATED", updatable = false)
@Temporal(TemporalType.TIMESTAMP)
@AdminPresentation(friendlyName = "AdminAuditable_Date_Created", group = "AdminAuditable_Audit", readOnly = true)
protected Date dateCreated;
@Column(name = "CREATED_BY", updatable = false)
protected Long createdBy;
@Column(name = "DATE_UPDATED")
@Temporal(TemporalType.TIMESTAMP)
@AdminPresentation(friendlyName = "AdminAuditable_Date_Updated", group = "AdminAuditable_Audit", readOnly = true)
protected Date dateUpdated;
@Column(name = "UPDATED_BY")
protected Long updatedBy;
public Date getDateCreated() {
return dateCreated;
}
public Date getDateUpdated() {
return dateUpdated;
}
public void setDateCreated(Date dateCreated) {
this.dateCreated = dateCreated;
}
public void setDateUpdated(Date dateUpdated) {
this.dateUpdated = dateUpdated;
}
public Long getCreatedBy() {
return createdBy;
}
public void setCreatedBy(Long createdBy) {
this.createdBy = createdBy;
}
public Long getUpdatedBy() {
return updatedBy;
}
public void setUpdatedBy(Long updatedBy) {
this.updatedBy = updatedBy;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_audit_AdminAuditable.java
|
2,111 |
abstract class MapProxySupport extends AbstractDistributedObject<MapService> implements InitializingObject {
protected static final String NULL_KEY_IS_NOT_ALLOWED = "Null key is not allowed!";
protected static final String NULL_VALUE_IS_NOT_ALLOWED = "Null value is not allowed!";
protected final String name;
protected final MapConfig mapConfig;
protected final LocalMapStatsImpl localMapStats;
protected final LockProxySupport lockSupport;
protected final PartitioningStrategy partitionStrategy;
protected MapProxySupport(final String name, final MapService service, NodeEngine nodeEngine) {
super(nodeEngine, service);
this.name = name;
mapConfig = service.getMapContainer(name).getMapConfig();
partitionStrategy = service.getMapContainer(name).getPartitioningStrategy();
localMapStats = service.getLocalMapStatsImpl(name);
lockSupport = new LockProxySupport(new DefaultObjectNamespace(MapService.SERVICE_NAME, name));
}
@Override
public void initialize() {
initializeListeners();
initializeIndexes();
initializeMapStoreLoad();
}
private void initializeMapStoreLoad() {
MapStoreConfig mapStoreConfig = mapConfig.getMapStoreConfig();
if (mapStoreConfig != null && mapStoreConfig.isEnabled()) {
MapStoreConfig.InitialLoadMode initialLoadMode = mapStoreConfig.getInitialLoadMode();
if (initialLoadMode.equals(MapStoreConfig.InitialLoadMode.EAGER)) {
waitUntilLoaded();
}
}
}
private void initializeIndexes() {
for (MapIndexConfig index : mapConfig.getMapIndexConfigs()) {
if (index.getAttribute() != null) {
addIndex(index.getAttribute(), index.isOrdered());
}
}
}
private void initializeListeners() {
final NodeEngine nodeEngine = getNodeEngine();
List<EntryListenerConfig> listenerConfigs = mapConfig.getEntryListenerConfigs();
for (EntryListenerConfig listenerConfig : listenerConfigs) {
EntryListener listener = null;
if (listenerConfig.getImplementation() != null) {
listener = listenerConfig.getImplementation();
} else if (listenerConfig.getClassName() != null) {
try {
listener = ClassLoaderUtil
.newInstance(nodeEngine.getConfigClassLoader(), listenerConfig.getClassName());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
if (listener != null) {
if (listener instanceof HazelcastInstanceAware) {
((HazelcastInstanceAware) listener).setHazelcastInstance(nodeEngine.getHazelcastInstance());
}
if (listenerConfig.isLocal()) {
addLocalEntryListener(listener);
} else {
addEntryListenerInternal(listener, null, listenerConfig.isIncludeValue());
}
}
}
}
// this operation returns the object in data format except it is got from near-cache and near-cache memory format is object.
protected Object getInternal(Data key) {
final MapService mapService = getService();
final boolean nearCacheEnabled = mapConfig.isNearCacheEnabled();
if (nearCacheEnabled) {
Object cached = mapService.getFromNearCache(name, key);
if (cached != null) {
if (NearCache.NULL_OBJECT.equals(cached)) {
cached = null;
}
mapService.interceptAfterGet(name, cached);
return cached;
}
}
NodeEngine nodeEngine = getNodeEngine();
// todo action for read-backup true is not well tested.
if (mapConfig.isReadBackupData()) {
int backupCount = mapConfig.getTotalBackupCount();
InternalPartitionService partitionService = mapService.getNodeEngine().getPartitionService();
for (int i = 0; i <= backupCount; i++) {
int partitionId = partitionService.getPartitionId(key);
InternalPartition partition = partitionService.getPartition(partitionId);
if (nodeEngine.getThisAddress().equals(partition.getReplicaAddress(i))) {
Object val = mapService.getPartitionContainer(partitionId).getRecordStore(name).get(key);
if (val != null) {
mapService.interceptAfterGet(name, val);
// this serialization step is needed not to expose the object, see issue 1292
return mapService.toData(val);
}
}
}
}
GetOperation operation = new GetOperation(name, key);
Data result = (Data) invokeOperation(key, operation);
if (nearCacheEnabled) {
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
if (!nodeEngine.getPartitionService().getPartitionOwner(partitionId)
.equals(nodeEngine.getClusterService().getThisAddress()) || mapConfig.getNearCacheConfig().isCacheLocalEntries()) {
return mapService.putNearCache(name, key, result);
}
}
return result;
}
protected ICompletableFuture<Data> getAsyncInternal(final Data key) {
final NodeEngine nodeEngine = getNodeEngine();
final MapService mapService = getService();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
final boolean nearCacheEnabled = mapConfig.isNearCacheEnabled();
if (nearCacheEnabled) {
Object cached = mapService.getFromNearCache(name, key);
if (cached != null) {
if (NearCache.NULL_OBJECT.equals(cached)) {
cached = null;
}
return new CompletedFuture<Data>(
nodeEngine.getSerializationService(),
cached,
nodeEngine.getExecutionService().getExecutor(ExecutionService.ASYNC_EXECUTOR));
}
}
GetOperation operation = new GetOperation(name, key);
try {
final OperationService operationService = nodeEngine.getOperationService();
final InvocationBuilder invocationBuilder = operationService.createInvocationBuilder(SERVICE_NAME, operation, partitionId).setResultDeserialized(false);
final InternalCompletableFuture<Data> future = invocationBuilder.invoke();
future.andThen(new ExecutionCallback<Data>() {
@Override
public void onResponse(Data response) {
if (nearCacheEnabled) {
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
if (!nodeEngine.getPartitionService().getPartitionOwner(partitionId)
.equals(nodeEngine.getClusterService().getThisAddress()) || mapConfig.getNearCacheConfig().isCacheLocalEntries()) {
mapService.putNearCache(name, key, response);
}
}
}
@Override
public void onFailure(Throwable t) {
}
});
return future;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected Data putInternal(final Data key, final Data value, final long ttl, final TimeUnit timeunit) {
PutOperation operation = new PutOperation(name, key, value, getTimeInMillis(ttl, timeunit));
Data previousValue = (Data) invokeOperation(key, operation);
invalidateNearCache(key);
return previousValue;
}
protected boolean tryPutInternal(final Data key, final Data value, final long timeout, final TimeUnit timeunit) {
TryPutOperation operation = new TryPutOperation(name, key, value, getTimeInMillis(timeout, timeunit));
boolean putSuccessful = (Boolean) invokeOperation(key, operation);
invalidateNearCache(key);
return putSuccessful;
}
protected Data putIfAbsentInternal(final Data key, final Data value, final long ttl, final TimeUnit timeunit) {
PutIfAbsentOperation operation = new PutIfAbsentOperation(name, key, value, getTimeInMillis(ttl, timeunit));
Data previousValue = (Data) invokeOperation(key, operation);
invalidateNearCache(key);
return previousValue;
}
protected void putTransientInternal(final Data key, final Data value, final long ttl, final TimeUnit timeunit) {
PutTransientOperation operation = new PutTransientOperation(name, key, value, getTimeInMillis(ttl, timeunit));
invokeOperation(key, operation);
invalidateNearCache(key);
}
private Object invokeOperation(Data key, KeyBasedMapOperation operation) {
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
operation.setThreadId(ThreadUtil.getThreadId());
try {
Future f;
Object o;
OperationService operationService = nodeEngine.getOperationService();
if (mapConfig.isStatisticsEnabled()) {
long time = System.currentTimeMillis();
f = operationService
.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setResultDeserialized(false)
.invoke();
o = f.get();
if (operation instanceof BasePutOperation)
localMapStats.incrementPuts(System.currentTimeMillis() - time);
else if (operation instanceof BaseRemoveOperation)
localMapStats.incrementRemoves(System.currentTimeMillis() - time);
else if (operation instanceof GetOperation)
localMapStats.incrementGets(System.currentTimeMillis() - time);
} else {
f = operationService.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setResultDeserialized(false).invoke();
o = f.get();
}
return o;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected ICompletableFuture<Data> putAsyncInternal(final Data key, final Data value, final long ttl, final TimeUnit timeunit) {
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
PutOperation operation = new PutOperation(name, key, value, getTimeInMillis(ttl, timeunit));
operation.setThreadId(ThreadUtil.getThreadId());
try {
ICompletableFuture<Data> future = nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, operation, partitionId);
invalidateNearCache(key);
return future;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected boolean replaceInternal(final Data key, final Data oldValue, final Data newValue) {
ReplaceIfSameOperation operation = new ReplaceIfSameOperation(name, key, oldValue, newValue);
boolean replaceSuccessful = (Boolean) invokeOperation(key, operation);
invalidateNearCache(key);
return replaceSuccessful;
}
protected Data replaceInternal(final Data key, final Data value) {
ReplaceOperation operation = new ReplaceOperation(name, key, value);
final Data result = (Data) invokeOperation(key, operation);
invalidateNearCache(key);
return result;
}
protected void setInternal(final Data key, final Data value, final long ttl, final TimeUnit timeunit) {
SetOperation operation = new SetOperation(name, key, value, timeunit.toMillis(ttl));
invokeOperation(key, operation);
invalidateNearCache(key);
}
protected boolean evictInternal(final Data key) {
EvictOperation operation = new EvictOperation(name, key, false);
final boolean evictSuccess = (Boolean) invokeOperation(key, operation);
invalidateNearCache(key);
return evictSuccess;
}
protected Data removeInternal(Data key) {
RemoveOperation operation = new RemoveOperation(name, key);
Data previousValue = (Data) invokeOperation(key, operation);
invalidateNearCache(key);
return previousValue;
}
protected void deleteInternal(Data key) {
RemoveOperation operation = new RemoveOperation(name, key);
invokeOperation(key, operation);
invalidateNearCache(key);
}
protected boolean removeInternal(final Data key, final Data value) {
RemoveIfSameOperation operation = new RemoveIfSameOperation(name, key, value);
boolean removed = (Boolean) invokeOperation(key, operation);
invalidateNearCache(key);
return removed;
}
protected boolean tryRemoveInternal(final Data key, final long timeout, final TimeUnit timeunit) {
TryRemoveOperation operation = new TryRemoveOperation(name, key, getTimeInMillis(timeout, timeunit));
boolean removed = (Boolean) invokeOperation(key, operation);
invalidateNearCache(key);
return removed;
}
protected ICompletableFuture<Data> removeAsyncInternal(final Data key) {
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
RemoveOperation operation = new RemoveOperation(name, key);
operation.setThreadId(ThreadUtil.getThreadId());
try {
ICompletableFuture<Data> future = nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, operation, partitionId);
invalidateNearCache(key);
return future;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected boolean containsKeyInternal(Data key) {
if (isKeyInNearCache(key)) {
return true;
}
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
ContainsKeyOperation containsKeyOperation = new ContainsKeyOperation(name, key);
containsKeyOperation.setServiceName(SERVICE_NAME);
try {
Future f = nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, containsKeyOperation,
partitionId);
return (Boolean) getService().toObject(f.get());
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public void waitUntilLoaded() {
final NodeEngine nodeEngine = getNodeEngine();
try {
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME, new PartitionCheckIfLoadedOperationFactory(name));
Iterator<Entry<Integer, Object>> iterator = results.entrySet().iterator();
boolean isFinished = false;
final Set<Integer> retrySet = new HashSet<Integer>();
while (!isFinished) {
while (iterator.hasNext()) {
final Entry<Integer, Object> entry = iterator.next();
if (Boolean.TRUE.equals(entry.getValue())) {
iterator.remove();
} else {
retrySet.add(entry.getKey());
}
}
if (retrySet.size() > 0) {
results = retryPartitions(retrySet);
iterator = results.entrySet().iterator();
Thread.sleep(1000);
retrySet.clear();
} else {
isFinished = true;
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
private Map<Integer, Object> retryPartitions(Collection partitions) {
final NodeEngine nodeEngine = getNodeEngine();
try {
final Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnPartitions(SERVICE_NAME, new PartitionCheckIfLoadedOperationFactory(name), partitions);
return results;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public int size() {
final NodeEngine nodeEngine = getNodeEngine();
try {
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME, new SizeOperationFactory(name));
int total = 0;
for (Object result : results.values()) {
Integer size = (Integer) getService().toObject(result);
total += size;
}
return total;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public boolean containsValueInternal(Data dataValue) {
final NodeEngine nodeEngine = getNodeEngine();
try {
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME, new ContainsValueOperationFactory(name, dataValue));
for (Object result : results.values()) {
Boolean contains = (Boolean) getService().toObject(result);
if (contains)
return true;
}
return false;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public boolean isEmpty() {
final NodeEngine nodeEngine = getNodeEngine();
try {
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME,
new BinaryOperationFactory(new MapIsEmptyOperation(name), nodeEngine));
for (Object result : results.values()) {
if (!(Boolean) getService().toObject(result))
return false;
}
return true;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected Map<Object, Object> getAllObjectInternal(final Set<Data> keys) {
final NodeEngine nodeEngine = getNodeEngine();
final MapService mapService = getService();
Map<Object, Object> result = new HashMap<Object, Object>();
final boolean nearCacheEnabled = mapConfig.isNearCacheEnabled();
if (nearCacheEnabled) {
final Iterator<Data> iterator = keys.iterator();
while (iterator.hasNext()) {
Data key = iterator.next();
Object cachedValue = mapService.getFromNearCache(name, key);
if (cachedValue != null) {
if (!NearCache.NULL_OBJECT.equals(cachedValue)) {
result.put(mapService.toObject(key), mapService.toObject(cachedValue));
}
iterator.remove();
}
}
}
if (keys.isEmpty()) {
return result;
}
Collection<Integer> partitions = getPartitionsForKeys(keys);
Map<Integer, Object> responses;
try {
responses = nodeEngine.getOperationService()
.invokeOnPartitions(SERVICE_NAME, new MapGetAllOperationFactory(name, keys), partitions);
for (Object response : responses.values()) {
Set<Map.Entry<Data, Data>> entries = ((MapEntrySet) mapService.toObject(response)).getEntrySet();
for (Entry<Data, Data> entry : entries) {
result.put(mapService.toObject(entry.getKey()), mapService.toObject(entry.getValue()));
if (nearCacheEnabled) {
int partitionId = nodeEngine.getPartitionService().getPartitionId(entry.getKey());
if (!nodeEngine.getPartitionService().getPartitionOwner(partitionId)
.equals(nodeEngine.getClusterService().getThisAddress()) || mapConfig.getNearCacheConfig().isCacheLocalEntries()) {
mapService.putNearCache(name, entry.getKey(), entry.getValue());
}
}
}
}
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
return result;
}
private Collection<Integer> getPartitionsForKeys(Set<Data> keys) {
InternalPartitionService partitionService = getNodeEngine().getPartitionService();
int partitions = partitionService.getPartitionCount();
int capacity = Math.min(partitions, keys.size()); //todo: is there better way to estimate size?
Set<Integer> partitionIds = new HashSet<Integer>(capacity);
Iterator<Data> iterator = keys.iterator();
while (iterator.hasNext() && partitionIds.size() < partitions) {
Data key = iterator.next();
partitionIds.add(partitionService.getPartitionId(key));
}
return partitionIds;
}
protected void putAllInternal(final Map<? extends Object, ? extends Object> entries) {
final NodeEngine nodeEngine = getNodeEngine();
final MapService mapService = getService();
int factor = 3;
InternalPartitionService partitionService = nodeEngine.getPartitionService();
OperationService operationService = nodeEngine.getOperationService();
int partitionCount = partitionService.getPartitionCount();
boolean tooManyEntries = entries.size() > (partitionCount * factor);
try {
if (tooManyEntries) {
List<Future> futures = new LinkedList<Future>();
Map<Integer, MapEntrySet> entryMap = new HashMap<Integer, MapEntrySet>(nodeEngine.getPartitionService().getPartitionCount());
for (Entry entry : entries.entrySet()) {
if (entry.getKey() == null) {
throw new NullPointerException(NULL_KEY_IS_NOT_ALLOWED);
}
if (entry.getValue() == null) {
throw new NullPointerException(NULL_VALUE_IS_NOT_ALLOWED);
}
int partitionId = partitionService.getPartitionId(entry.getKey());
if (!entryMap.containsKey(partitionId)) {
entryMap.put(partitionId, new MapEntrySet());
}
entryMap.get(partitionId).add(new AbstractMap.SimpleImmutableEntry<Data, Data>(mapService.toData(
entry.getKey(),
partitionStrategy),
mapService
.toData(entry.getValue())
));
}
for (final Map.Entry<Integer, MapEntrySet> entry : entryMap.entrySet()) {
final Integer partitionId = entry.getKey();
final PutAllOperation op = new PutAllOperation(name, entry.getValue());
op.setPartitionId(partitionId);
futures.add(operationService.invokeOnPartition(SERVICE_NAME, op, partitionId));
}
for (Future future : futures) {
future.get();
}
} else {
for (Entry entry : entries.entrySet()) {
if (entry.getKey() == null) {
throw new NullPointerException(NULL_KEY_IS_NOT_ALLOWED);
}
if (entry.getValue() == null) {
throw new NullPointerException(NULL_VALUE_IS_NOT_ALLOWED);
}
putInternal(mapService.toData(entry.getKey(), partitionStrategy),
mapService.toData(entry.getValue()),
-1,
TimeUnit.SECONDS);
}
}
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
protected Set<Data> keySetInternal() {
final NodeEngine nodeEngine = getNodeEngine();
try {
// todo you can optimize keyset by taking keys without lock then re-fetch missing ones. see localKeySet
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME,
new BinaryOperationFactory(new MapKeySetOperation(name), nodeEngine));
Set<Data> keySet = new HashSet<Data>();
for (Object result : results.values()) {
Set keys = ((MapKeySet) getService().toObject(result)).getKeySet();
keySet.addAll(keys);
}
return keySet;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected Set<Data> localKeySetInternal() {
final NodeEngine nodeEngine = getNodeEngine();
final MapService mapService = getService();
Set<Data> keySet = new HashSet<Data>();
try {
List<Integer> memberPartitions =
nodeEngine.getPartitionService().getMemberPartitions(nodeEngine.getThisAddress());
for (Integer memberPartition : memberPartitions) {
RecordStore recordStore = mapService.getRecordStore(memberPartition, name);
keySet.addAll(recordStore.getReadonlyRecordMap().keySet());
}
return keySet;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public void flush() {
final NodeEngine nodeEngine = getNodeEngine();
try {
// todo add a feature to mancenter to sync cache to db completely
nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME,
new BinaryOperationFactory(new MapFlushOperation(name), nodeEngine));
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
protected Collection<Data> valuesInternal() {
final NodeEngine nodeEngine = getNodeEngine();
try {
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME,
new BinaryOperationFactory(new MapValuesOperation(name), nodeEngine));
List<Data> values = new ArrayList<Data>();
for (Object result : results.values()) {
values.addAll(((MapValueCollection) getService().toObject(result)).getValues());
}
return values;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public void clearInternal() {
final String mapName = name;
final NodeEngine nodeEngine = getNodeEngine();
try {
ClearOperation clearOperation = new ClearOperation(mapName);
clearOperation.setServiceName(SERVICE_NAME);
nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME, new BinaryOperationFactory(clearOperation, nodeEngine));
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public String addMapInterceptorInternal(MapInterceptor interceptor) {
final NodeEngine nodeEngine = getNodeEngine();
final MapService mapService = getService();
if (interceptor instanceof HazelcastInstanceAware) {
((HazelcastInstanceAware) interceptor).setHazelcastInstance(nodeEngine.getHazelcastInstance());
}
String id = mapService.addInterceptor(name, interceptor);
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
for (MemberImpl member : members) {
try {
if (member.localMember())
continue;
Future f = nodeEngine.getOperationService()
.invokeOnTarget(SERVICE_NAME, new AddInterceptorOperation(id, interceptor, name),
member.getAddress());
f.get();
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
return id;
}
public void removeMapInterceptorInternal(String id) {
final NodeEngine nodeEngine = getNodeEngine();
final MapService mapService = getService();
mapService.removeInterceptor(name, id);
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
for (Member member : members) {
try {
if (member.localMember())
continue;
MemberImpl memberImpl = (MemberImpl) member;
Future f = nodeEngine.getOperationService()
.invokeOnTarget(SERVICE_NAME, new RemoveInterceptorOperation(name, id), memberImpl.getAddress());
f.get();
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
}
public String addLocalEntryListener(final EntryListener listener) {
final MapService mapService = getService();
return mapService.addLocalEventListener(listener, name);
}
public String addLocalEntryListenerInternal(EntryListener listener, Predicate predicate, final Data key, boolean includeValue) {
final MapService mapService = getService();
EventFilter eventFilter = new QueryEventFilter(includeValue, key, predicate);
return mapService.addLocalEventListener(listener, eventFilter, name);
}
protected String addEntryListenerInternal(
final EntryListener listener, final Data key, final boolean includeValue) {
EventFilter eventFilter = new EntryEventFilter(includeValue, key);
final MapService mapService = getService();
return mapService.addEventListener(listener, eventFilter, name);
}
protected String addEntryListenerInternal(
EntryListener listener, Predicate predicate, final Data key, final boolean includeValue) {
EventFilter eventFilter = new QueryEventFilter(includeValue, key, predicate);
final MapService mapService = getService();
return mapService.addEventListener(listener, eventFilter, name);
}
protected boolean removeEntryListenerInternal(String id) {
final MapService mapService = getService();
return mapService.removeEventListener(name, id);
}
protected EntryView getEntryViewInternal(final Data key) {
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
GetEntryViewOperation getEntryViewOperation = new GetEntryViewOperation(name, key);
getEntryViewOperation.setServiceName(SERVICE_NAME);
try {
Future f = nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, getEntryViewOperation, partitionId);
Object o = getService().toObject(f.get());
return (EntryView) o;
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
protected Set<Entry<Data, Data>> entrySetInternal() {
final NodeEngine nodeEngine = getNodeEngine();
try {
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME,
new BinaryOperationFactory(new MapEntrySetOperation(name), nodeEngine));
Set<Entry<Data, Data>> entrySet = new HashSet<Entry<Data, Data>>();
for (Object result : results.values()) {
Set entries = ((MapEntrySet) getService().toObject(result)).getEntrySet();
if (entries != null)
entrySet.addAll(entries);
}
return entrySet;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public Data executeOnKeyInternal(Data key, EntryProcessor entryProcessor) {
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
EntryOperation operation = new EntryOperation(name, key, entryProcessor);
operation.setThreadId(ThreadUtil.getThreadId());
try {
Future future = nodeEngine.getOperationService()
.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setResultDeserialized(false)
.invoke();
final Data data = (Data) future.get();
invalidateNearCache(key);
return data;
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public Map executeOnKeysInternal(Set<Data> keys, EntryProcessor entryProcessor) {
Map result = new HashMap();
final NodeEngine nodeEngine = getNodeEngine();
final Collection<Integer> partitionsForKeys = getPartitionsForKeys(keys);
try {
MultipleEntryOperationFactory operationFactory = new MultipleEntryOperationFactory(name, keys, entryProcessor);
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnPartitions(SERVICE_NAME, operationFactory, partitionsForKeys);
for (Object o : results.values()) {
if (o != null) {
final MapService service = getService();
final MapEntrySet mapEntrySet = (MapEntrySet) o;
for (Entry<Data, Data> entry : mapEntrySet.getEntrySet()) {
result.put(service.toObject(entry.getKey()), service.toObject(entry.getValue()));
}
}
}
invalidateNearCache(keys);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
return result;
}
public ICompletableFuture executeOnKeyInternal(Data key, EntryProcessor entryProcessor, ExecutionCallback callback) {
final NodeEngine nodeEngine = getNodeEngine();
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
EntryOperation operation = new EntryOperation(name, key, entryProcessor);
operation.setThreadId(ThreadUtil.getThreadId());
try {
if (callback == null) {
return nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, operation, partitionId);
} else {
ICompletableFuture future = nodeEngine.getOperationService()
.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setCallback(new MapExecutionCallbackAdapter(callback))
.invoke();
invalidateNearCache(key);
return future;
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
/**
* {@link IMap#executeOnEntries(EntryProcessor)}
*/
public Map executeOnEntries(EntryProcessor entryProcessor) {
Map result = new HashMap();
try {
NodeEngine nodeEngine = getNodeEngine();
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME, new PartitionWideEntryOperationFactory(name, entryProcessor));
for (Object o : results.values()) {
if (o != null) {
final MapService service = getService();
final MapEntrySet mapEntrySet = (MapEntrySet) o;
for (Entry<Data, Data> entry : mapEntrySet.getEntrySet()) {
final Data key = entry.getKey();
result.put(service.toObject(entry.getKey()), service.toObject(entry.getValue()));
invalidateNearCache(key);
}
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
return result;
}
/**
* {@link IMap#executeOnEntries(EntryProcessor, Predicate)}
*/
public Map executeOnEntries(EntryProcessor entryProcessor, Predicate predicate) {
Map result = new HashMap();
try {
NodeEngine nodeEngine = getNodeEngine();
Map<Integer, Object> results = nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME,
new PartitionWideEntryWithPredicateOperationFactory(name,
entryProcessor,
predicate)
);
for (Object o : results.values()) {
if (o != null) {
final MapService service = getService();
final MapEntrySet mapEntrySet = (MapEntrySet) o;
for (Entry<Data, Data> entry : mapEntrySet.getEntrySet()) {
final Data key = entry.getKey();
result.put(service.toObject(key), service.toObject(entry.getValue()));
invalidateNearCache(key);
}
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
return result;
}
protected Set queryLocal(final Predicate predicate, final IterationType iterationType, final boolean dataResult) {
final NodeEngine nodeEngine = getNodeEngine();
OperationService operationService = nodeEngine.getOperationService();
final SerializationService ss = nodeEngine.getSerializationService();
List<Integer> partitionIds = nodeEngine.getPartitionService().getMemberPartitions(nodeEngine.getThisAddress());
PagingPredicate pagingPredicate = null;
if (predicate instanceof PagingPredicate) {
pagingPredicate = (PagingPredicate) predicate;
pagingPredicate.setIterationType(iterationType);
if (pagingPredicate.getPage() > 0 && pagingPredicate.getAnchor() == null) {
pagingPredicate.previousPage();
query(pagingPredicate, iterationType, dataResult);
pagingPredicate.nextPage();
}
}
Set result;
if (pagingPredicate == null) {
result = new QueryResultSet(ss, iterationType, dataResult);
} else {
result = new SortedQueryResultSet(pagingPredicate.getComparator(), iterationType, pagingPredicate.getPageSize());
}
List<Integer> returnedPartitionIds = new ArrayList<Integer>();
try {
Future future = operationService
.invokeOnTarget(SERVICE_NAME,
new QueryOperation(name, predicate),
nodeEngine.getThisAddress());
QueryResult queryResult = (QueryResult) future.get();
if (queryResult != null) {
returnedPartitionIds = queryResult.getPartitionIds();
if (pagingPredicate == null) {
result.addAll(queryResult.getResult());
} else {
for (QueryResultEntry queryResultEntry : queryResult.getResult()) {
Object key = ss.toObject(queryResultEntry.getKeyData());
Object value = ss.toObject(queryResultEntry.getValueData());
result.add(new AbstractMap.SimpleImmutableEntry(key, value));
}
}
}
if (returnedPartitionIds.size() == partitionIds.size()) {
if (pagingPredicate != null) {
PagingPredicateAccessor.setPagingPredicateAnchor(pagingPredicate, ((SortedQueryResultSet) result).last());
}
return result;
}
List<Integer> missingList = new ArrayList<Integer>();
for (Integer partitionId : partitionIds) {
if (!returnedPartitionIds.contains(partitionId))
missingList.add(partitionId);
}
List<Future> futures = new ArrayList<Future>(missingList.size());
for (Integer pid : missingList) {
QueryPartitionOperation queryPartitionOperation = new QueryPartitionOperation(name, predicate);
queryPartitionOperation.setPartitionId(pid);
try {
Future f =
operationService.invokeOnPartition(SERVICE_NAME, queryPartitionOperation, pid);
futures.add(f);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
for (Future f : futures) {
QueryResult qResult = (QueryResult) f.get();
if (pagingPredicate == null) {
result.addAll(qResult.getResult());
} else {
for (QueryResultEntry queryResultEntry : qResult.getResult()) {
Object key = ss.toObject(queryResultEntry.getKeyData());
Object value = ss.toObject(queryResultEntry.getValueData());
result.add(new AbstractMap.SimpleImmutableEntry(key, value));
}
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
return result;
}
protected Set query(final Predicate predicate, final IterationType iterationType, final boolean dataResult) {
final NodeEngine nodeEngine = getNodeEngine();
OperationService operationService = nodeEngine.getOperationService();
final SerializationService ss = nodeEngine.getSerializationService();
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
Set<Integer> plist = new HashSet<Integer>(partitionCount);
PagingPredicate pagingPredicate = null;
if (predicate instanceof PagingPredicate) {
pagingPredicate = (PagingPredicate) predicate;
pagingPredicate.setIterationType(iterationType);
if (pagingPredicate.getPage() > 0 && pagingPredicate.getAnchor() == null) {
pagingPredicate.previousPage();
query(pagingPredicate, iterationType, dataResult);
pagingPredicate.nextPage();
}
}
Set result;
if (pagingPredicate == null) {
result = new QueryResultSet(ss, iterationType, dataResult);
} else {
result = new SortedQueryResultSet(pagingPredicate.getComparator(), iterationType, pagingPredicate.getPageSize());
}
List<Integer> missingList = new ArrayList<Integer>();
try {
List<Future> flist = new ArrayList<Future>();
for (MemberImpl member : members) {
Future future = operationService
.invokeOnTarget(SERVICE_NAME, new QueryOperation(name, predicate), member.getAddress());
flist.add(future);
}
for (Future future : flist) {
QueryResult queryResult = (QueryResult) future.get();
if (queryResult != null) {
final List<Integer> partitionIds = queryResult.getPartitionIds();
if (partitionIds != null) {
plist.addAll(partitionIds);
if (pagingPredicate == null) {
result.addAll(queryResult.getResult());
} else {
for (QueryResultEntry queryResultEntry : queryResult.getResult()) {
Object key = ss.toObject(queryResultEntry.getKeyData());
Object value = ss.toObject(queryResultEntry.getValueData());
result.add(new AbstractMap.SimpleImmutableEntry(key, value));
}
}
}
}
}
if (plist.size() == partitionCount) {
if (pagingPredicate != null) {
PagingPredicateAccessor.setPagingPredicateAnchor(pagingPredicate, ((SortedQueryResultSet) result).last());
}
return result;
}
for (int i = 0; i < partitionCount; i++) {
if (!plist.contains(i)) {
missingList.add(i);
}
}
} catch (Throwable t) {
missingList.clear();
for (int i = 0; i < partitionCount; i++) {
if (!plist.contains(i)) {
missingList.add(i);
}
}
}
try {
List<Future> futures = new ArrayList<Future>(missingList.size());
for (Integer pid : missingList) {
QueryPartitionOperation queryPartitionOperation = new QueryPartitionOperation(name, predicate);
queryPartitionOperation.setPartitionId(pid);
try {
Future f =
operationService.invokeOnPartition(SERVICE_NAME, queryPartitionOperation, pid);
futures.add(f);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
for (Future future : futures) {
QueryResult queryResult = (QueryResult) future.get();
if (pagingPredicate == null) {
result.addAll(queryResult.getResult());
} else {
for (QueryResultEntry queryResultEntry : queryResult.getResult()) {
Object key = ss.toObject(queryResultEntry.getKeyData());
Object value = ss.toObject(queryResultEntry.getValueData());
result.add(new AbstractMap.SimpleImmutableEntry(key, value));
}
}
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
if (pagingPredicate != null) {
PagingPredicateAccessor.setPagingPredicateAnchor(pagingPredicate, ((SortedQueryResultSet) result).last());
}
return result;
}
public void addIndex(final String attribute, final boolean ordered) {
final NodeEngine nodeEngine = getNodeEngine();
if (attribute == null) throw new IllegalArgumentException("Attribute name cannot be null");
try {
AddIndexOperation addIndexOperation = new AddIndexOperation(name, attribute, ordered);
nodeEngine.getOperationService()
.invokeOnAllPartitions(SERVICE_NAME, new BinaryOperationFactory(addIndexOperation, nodeEngine));
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
}
public LocalMapStats getLocalMapStats() {
return getService().createLocalMapStats(name);
}
private boolean isKeyInNearCache(Data key) {
final MapService mapService = getService();
final boolean nearCacheEnabled = mapConfig.isNearCacheEnabled();
if (nearCacheEnabled) {
Object cached = mapService.getFromNearCache(name, key);
if (cached != null && !cached.equals(NearCache.NULL_OBJECT)) {
return true;
}
}
return false;
}
private void invalidateNearCache(Data key) {
if (key == null) {
return;
}
getService().invalidateNearCache(name, key);
}
private void invalidateNearCache(Set<Data> keys) {
if (keys == null || keys.isEmpty()) {
return;
}
getService().invalidateNearCache(name, keys);
}
protected long getTimeInMillis(final long time, final TimeUnit timeunit) {
return timeunit != null ? timeunit.toMillis(time) : time;
}
public final String getName() {
return name;
}
public final String getServiceName() {
return SERVICE_NAME;
}
private class MapExecutionCallbackAdapter implements Callback {
private final ExecutionCallback executionCallback;
public MapExecutionCallbackAdapter(ExecutionCallback executionCallback) {
this.executionCallback = executionCallback;
}
@Override
public void notify(Object response) {
if (response instanceof Throwable) {
executionCallback.onFailure((Throwable) response);
} else {
executionCallback.onResponse(getService().toObject(response));
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_proxy_MapProxySupport.java
|
4,085 |
public class GeoDistanceFilter extends Filter {
private final double lat;
private final double lon;
private final double distance; // in miles
private final GeoDistance geoDistance;
private final IndexGeoPointFieldData indexFieldData;
private final GeoDistance.FixedSourceDistance fixedSourceDistance;
private GeoDistance.DistanceBoundingCheck distanceBoundingCheck;
private final Filter boundingBoxFilter;
public GeoDistanceFilter(double lat, double lon, double distance, GeoDistance geoDistance, IndexGeoPointFieldData indexFieldData, GeoPointFieldMapper mapper,
String optimizeBbox) {
this.lat = lat;
this.lon = lon;
this.distance = distance;
this.geoDistance = geoDistance;
this.indexFieldData = indexFieldData;
this.fixedSourceDistance = geoDistance.fixedSourceDistance(lat, lon, DistanceUnit.DEFAULT);
if (optimizeBbox != null && !"none".equals(optimizeBbox)) {
distanceBoundingCheck = GeoDistance.distanceBoundingCheck(lat, lon, distance, DistanceUnit.DEFAULT);
if ("memory".equals(optimizeBbox)) {
boundingBoxFilter = null;
} else if ("indexed".equals(optimizeBbox)) {
boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper);
distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter
} else {
throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported");
}
} else {
distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE;
boundingBoxFilter = null;
}
}
public double lat() {
return lat;
}
public double lon() {
return lon;
}
public double distance() {
return distance;
}
public GeoDistance geoDistance() {
return geoDistance;
}
public String fieldName() {
return indexFieldData.getFieldNames().indexName();
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptedDocs) throws IOException {
DocIdSet boundingBoxDocSet = null;
if (boundingBoxFilter != null) {
boundingBoxDocSet = boundingBoxFilter.getDocIdSet(context, acceptedDocs);
if (DocIdSets.isEmpty(boundingBoxDocSet)) {
return null;
}
}
final GeoPointValues values = indexFieldData.load(context).getGeoPointValues();
GeoDistanceDocSet distDocSet = new GeoDistanceDocSet(context.reader().maxDoc(), acceptedDocs, values, fixedSourceDistance, distanceBoundingCheck, distance);
if (boundingBoxDocSet == null) {
return distDocSet;
} else {
return new AndDocIdSet(new DocIdSet[]{boundingBoxDocSet, distDocSet});
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GeoDistanceFilter filter = (GeoDistanceFilter) o;
if (Double.compare(filter.distance, distance) != 0) return false;
if (Double.compare(filter.lat, lat) != 0) return false;
if (Double.compare(filter.lon, lon) != 0) return false;
if (!indexFieldData.getFieldNames().indexName().equals(filter.indexFieldData.getFieldNames().indexName()))
return false;
if (geoDistance != filter.geoDistance) return false;
return true;
}
@Override
public String toString() {
return "GeoDistanceFilter(" + indexFieldData.getFieldNames().indexName() + ", " + geoDistance + ", " + distance + ", " + lat + ", " + lon + ")";
}
@Override
public int hashCode() {
int result;
long temp;
temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L;
result = (int) (temp ^ (temp >>> 32));
temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L;
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = distance != +0.0d ? Double.doubleToLongBits(distance) : 0L;
result = 31 * result + (int) (temp ^ (temp >>> 32));
result = 31 * result + (geoDistance != null ? geoDistance.hashCode() : 0);
result = 31 * result + indexFieldData.getFieldNames().indexName().hashCode();
return result;
}
public static class GeoDistanceDocSet extends MatchDocIdSet {
private final double distance; // in miles
private final GeoPointValues values;
private final GeoDistance.FixedSourceDistance fixedSourceDistance;
private final GeoDistance.DistanceBoundingCheck distanceBoundingCheck;
public GeoDistanceDocSet(int maxDoc, @Nullable Bits acceptDocs, GeoPointValues values, GeoDistance.FixedSourceDistance fixedSourceDistance, GeoDistance.DistanceBoundingCheck distanceBoundingCheck,
double distance) {
super(maxDoc, acceptDocs);
this.values = values;
this.fixedSourceDistance = fixedSourceDistance;
this.distanceBoundingCheck = distanceBoundingCheck;
this.distance = distance;
}
@Override
public boolean isCacheable() {
return true;
}
@Override
protected boolean matchDoc(int doc) {
final int length = values.setDocument(doc);
for (int i = 0; i < length; i++) {
GeoPoint point = values.nextValue();
if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) {
double d = fixedSourceDistance.calculate(point.lat(), point.lon());
if (d < distance) {
return true;
}
}
}
return false;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_geo_GeoDistanceFilter.java
|
1,689 |
public class ONetworkProtocolBinary extends OBinaryNetworkProtocolAbstract {
protected OClientConnection connection;
protected OUser account;
private String dbType;
public ONetworkProtocolBinary() {
super("OrientDB <- BinaryClient/?");
}
public ONetworkProtocolBinary(final String iThreadName) {
super(iThreadName);
}
@Override
public void config(final OServer iServer, final Socket iSocket, final OContextConfiguration iConfig,
final List<?> iStatelessCommands, List<?> iStatefulCommands) throws IOException {
// CREATE THE CLIENT CONNECTION
connection = OClientConnectionManager.instance().connect(this);
super.config(iServer, iSocket, iConfig, iStatelessCommands, iStatefulCommands);
// SEND PROTOCOL VERSION
channel.writeShort((short) OChannelBinaryProtocol.CURRENT_PROTOCOL_VERSION);
channel.flush();
start();
setName("OrientDB <- BinaryClient (" + iSocket.getRemoteSocketAddress() + ")");
}
@Override
public int getVersion() {
return OChannelBinaryProtocol.CURRENT_PROTOCOL_VERSION;
}
@Override
protected void onBeforeRequest() throws IOException {
waitNodeIsOnline();
connection = OClientConnectionManager.instance().getConnection(clientTxId);
if (clientTxId < 0) {
short protocolId = 0;
if (connection != null)
protocolId = connection.data.protocolVersion;
connection = OClientConnectionManager.instance().connect(this);
if (connection != null)
connection.data.protocolVersion = protocolId;
}
if (connection != null) {
ODatabaseRecordThreadLocal.INSTANCE.set(connection.database);
if (connection.database != null) {
connection.data.lastDatabase = connection.database.getName();
connection.data.lastUser = connection.database.getUser() != null ? connection.database.getUser().getName() : null;
} else {
connection.data.lastDatabase = null;
connection.data.lastUser = null;
}
++connection.data.totalRequests;
setDataCommandInfo("Listening");
connection.data.commandDetail = "-";
connection.data.lastCommandReceived = System.currentTimeMillis();
} else {
if (requestType != OChannelBinaryProtocol.REQUEST_DB_CLOSE && requestType != OChannelBinaryProtocol.REQUEST_SHUTDOWN) {
OLogManager.instance().debug(this, "Found unknown session %d, shutdown current connection", clientTxId);
shutdown();
throw new OIOException("Found unknown session " + clientTxId);
}
}
OServerPluginHelper.invokeHandlerCallbackOnBeforeClientRequest(server, connection, (byte) requestType);
}
@Override
protected void onAfterRequest() throws IOException {
OServerPluginHelper.invokeHandlerCallbackOnAfterClientRequest(server, connection, (byte) requestType);
if (connection != null) {
if (connection.database != null)
if (!connection.database.isClosed())
connection.database.getLevel1Cache().clear();
connection.data.lastCommandExecutionTime = System.currentTimeMillis() - connection.data.lastCommandReceived;
connection.data.totalCommandExecutionTime += connection.data.lastCommandExecutionTime;
connection.data.lastCommandInfo = connection.data.commandInfo;
connection.data.lastCommandDetail = connection.data.commandDetail;
setDataCommandInfo("Listening");
connection.data.commandDetail = "-";
}
}
protected boolean executeRequest() throws IOException {
switch (requestType) {
case OChannelBinaryProtocol.REQUEST_SHUTDOWN:
shutdownConnection();
break;
case OChannelBinaryProtocol.REQUEST_CONNECT:
connect();
break;
case OChannelBinaryProtocol.REQUEST_DB_LIST:
listDatabases();
break;
case OChannelBinaryProtocol.REQUEST_DB_OPEN:
openDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_RELOAD:
reloadDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_CREATE:
createDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_CLOSE:
closeDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_EXIST:
existsDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_DROP:
dropDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_SIZE:
sizeDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_COUNTRECORDS:
countDatabaseRecords();
break;
case OChannelBinaryProtocol.REQUEST_DB_COPY:
copyDatabase();
break;
case OChannelBinaryProtocol.REQUEST_REPLICATION:
replicationDatabase();
break;
case OChannelBinaryProtocol.REQUEST_CLUSTER:
distributedCluster();
break;
case OChannelBinaryProtocol.REQUEST_DATASEGMENT_ADD:
addDataSegment();
break;
case OChannelBinaryProtocol.REQUEST_DATASEGMENT_DROP:
dropDataSegment();
break;
case OChannelBinaryProtocol.REQUEST_DATACLUSTER_COUNT:
countClusters();
break;
case OChannelBinaryProtocol.REQUEST_DATACLUSTER_DATARANGE:
rangeCluster();
break;
case OChannelBinaryProtocol.REQUEST_DATACLUSTER_ADD:
addCluster();
break;
case OChannelBinaryProtocol.REQUEST_DATACLUSTER_DROP:
removeCluster();
break;
case OChannelBinaryProtocol.REQUEST_RECORD_METADATA:
readRecordMetadata();
break;
case OChannelBinaryProtocol.REQUEST_RECORD_LOAD:
readRecord();
break;
case OChannelBinaryProtocol.REQUEST_RECORD_CREATE:
createRecord();
break;
case OChannelBinaryProtocol.REQUEST_RECORD_UPDATE:
updateRecord();
break;
case OChannelBinaryProtocol.REQUEST_RECORD_DELETE:
deleteRecord();
break;
case OChannelBinaryProtocol.REQUEST_POSITIONS_HIGHER:
higherPositions();
break;
case OChannelBinaryProtocol.REQUEST_POSITIONS_CEILING:
ceilingPositions();
break;
case OChannelBinaryProtocol.REQUEST_POSITIONS_LOWER:
lowerPositions();
break;
case OChannelBinaryProtocol.REQUEST_POSITIONS_FLOOR:
floorPositions();
break;
case OChannelBinaryProtocol.REQUEST_COUNT:
throw new UnsupportedOperationException("Operation OChannelBinaryProtocol.REQUEST_COUNT has been deprecated");
case OChannelBinaryProtocol.REQUEST_COMMAND:
command();
break;
case OChannelBinaryProtocol.REQUEST_TX_COMMIT:
commit();
break;
case OChannelBinaryProtocol.REQUEST_CONFIG_GET:
configGet();
break;
case OChannelBinaryProtocol.REQUEST_CONFIG_SET:
configSet();
break;
case OChannelBinaryProtocol.REQUEST_CONFIG_LIST:
configList();
break;
case OChannelBinaryProtocol.REQUEST_DB_FREEZE:
freezeDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DB_RELEASE:
releaseDatabase();
break;
case OChannelBinaryProtocol.REQUEST_DATACLUSTER_FREEZE:
freezeCluster();
break;
case OChannelBinaryProtocol.REQUEST_DATACLUSTER_RELEASE:
releaseCluster();
break;
case OChannelBinaryProtocol.REQUEST_RECORD_CLEAN_OUT:
cleanOutRecord();
break;
default:
setDataCommandInfo("Command not supported");
return false;
}
return true;
}
private void lowerPositions() throws IOException {
setDataCommandInfo("Retrieve lower positions");
final int clusterId = channel.readInt();
final OClusterPosition clusterPosition = channel.readClusterPosition();
beginResponse();
try {
sendOk(clientTxId);
final OPhysicalPosition[] previousPositions = connection.database.getStorage().lowerPhysicalPositions(clusterId,
new OPhysicalPosition(clusterPosition));
if (previousPositions != null) {
channel.writeInt(previousPositions.length);
for (final OPhysicalPosition physicalPosition : previousPositions) {
channel.writeClusterPosition(physicalPosition.clusterPosition);
channel.writeInt(physicalPosition.dataSegmentId);
channel.writeLong(physicalPosition.dataSegmentPos);
channel.writeInt(physicalPosition.recordSize);
channel.writeVersion(physicalPosition.recordVersion);
}
} else {
channel.writeInt(0); // NO MORE RECORDS
}
} finally {
endResponse();
}
}
private void floorPositions() throws IOException {
setDataCommandInfo("Retrieve floor positions");
final int clusterId = channel.readInt();
final OClusterPosition clusterPosition = channel.readClusterPosition();
beginResponse();
try {
sendOk(clientTxId);
final OPhysicalPosition[] previousPositions = connection.database.getStorage().floorPhysicalPositions(clusterId,
new OPhysicalPosition(clusterPosition));
if (previousPositions != null) {
channel.writeInt(previousPositions.length);
for (final OPhysicalPosition physicalPosition : previousPositions) {
channel.writeClusterPosition(physicalPosition.clusterPosition);
channel.writeInt(physicalPosition.dataSegmentId);
channel.writeLong(physicalPosition.dataSegmentPos);
channel.writeInt(physicalPosition.recordSize);
channel.writeVersion(physicalPosition.recordVersion);
}
} else {
channel.writeInt(0); // NO MORE RECORDS
}
} finally {
endResponse();
}
}
private void higherPositions() throws IOException {
setDataCommandInfo("Retrieve higher positions");
final int clusterId = channel.readInt();
final OClusterPosition clusterPosition = channel.readClusterPosition();
beginResponse();
try {
sendOk(clientTxId);
OPhysicalPosition[] nextPositions = connection.database.getStorage().higherPhysicalPositions(clusterId,
new OPhysicalPosition(clusterPosition));
if (nextPositions != null) {
channel.writeInt(nextPositions.length);
for (final OPhysicalPosition physicalPosition : nextPositions) {
channel.writeClusterPosition(physicalPosition.clusterPosition);
channel.writeInt(physicalPosition.dataSegmentId);
channel.writeLong(physicalPosition.dataSegmentPos);
channel.writeInt(physicalPosition.recordSize);
channel.writeVersion(physicalPosition.recordVersion);
}
} else {
channel.writeInt(0); // NO MORE RECORDS
}
} finally {
endResponse();
}
}
private void ceilingPositions() throws IOException {
setDataCommandInfo("Retrieve ceiling positions");
final int clusterId = channel.readInt();
final OClusterPosition clusterPosition = channel.readClusterPosition();
beginResponse();
try {
sendOk(clientTxId);
final OPhysicalPosition[] previousPositions = connection.database.getStorage().ceilingPhysicalPositions(clusterId,
new OPhysicalPosition(clusterPosition));
if (previousPositions != null) {
channel.writeInt(previousPositions.length);
for (final OPhysicalPosition physicalPosition : previousPositions) {
channel.writeClusterPosition(physicalPosition.clusterPosition);
channel.writeInt(physicalPosition.dataSegmentId);
channel.writeLong(physicalPosition.dataSegmentPos);
channel.writeInt(physicalPosition.recordSize);
channel.writeVersion(physicalPosition.recordVersion);
}
} else {
channel.writeInt(0); // NO MORE RECORDS
}
} finally {
endResponse();
}
}
protected void checkServerAccess(final String iResource) {
if (connection.serverUser == null)
throw new OSecurityAccessException("Server user not authenticated.");
if (!server.authenticate(connection.serverUser.name, null, iResource))
throw new OSecurityAccessException("User '" + connection.serverUser.name + "' cannot access to the resource [" + iResource
+ "]. Use another server user or change permission in the file config/orientdb-server-config.xml");
}
protected ODatabaseComplex<?> openDatabase(final ODatabaseComplex<?> database, final String iUser, final String iPassword) {
if (database.isClosed())
if (database.getStorage() instanceof OStorageMemory && !database.exists())
database.create();
else {
try {
database.open(iUser, iPassword);
} catch (OSecurityException e) {
// TRY WITH SERVER'S USER
try {
connection.serverUser = server.serverLogin(iUser, iPassword, "database.passthrough");
} catch (OSecurityException ex) {
throw e;
}
// SERVER AUTHENTICATED, BYPASS SECURITY
database.setProperty(ODatabase.OPTIONS.SECURITY.toString(), Boolean.FALSE);
database.open(iUser, iPassword);
}
}
return database;
}
protected void addDataSegment() throws IOException {
setDataCommandInfo("Add data segment");
if (!isConnectionAlive())
return;
final String name = channel.readString();
final String location = channel.readString();
final int num = connection.database.addDataSegment(name, location);
beginResponse();
try {
sendOk(clientTxId);
channel.writeInt(num);
} finally {
endResponse();
}
}
protected void dropDataSegment() throws IOException {
setDataCommandInfo("Drop data segment");
if (!isConnectionAlive())
return;
final String name = channel.readString();
boolean result = connection.database.dropDataSegment(name);
beginResponse();
try {
sendOk(clientTxId);
channel.writeByte((byte) (result ? 1 : 0));
} finally {
endResponse();
}
}
protected void removeCluster() throws IOException {
setDataCommandInfo("Remove cluster");
if (!isConnectionAlive())
return;
final int id = channel.readShort();
final String clusterName = connection.database.getClusterNameById(id);
if (clusterName == null)
throw new IllegalArgumentException("Cluster " + id
+ " doesn't exist anymore. Refresh the db structure or just reconnect to the database");
boolean result = connection.database.dropCluster(clusterName, true);
beginResponse();
try {
sendOk(clientTxId);
channel.writeByte((byte) (result ? 1 : 0));
} finally {
endResponse();
}
}
protected void addCluster() throws IOException {
setDataCommandInfo("Add cluster");
if (!isConnectionAlive())
return;
final String type = channel.readString();
final String name = channel.readString();
int clusterId = -1;
final String location;
if (connection.data.protocolVersion >= 10 || type.equalsIgnoreCase("PHYSICAL"))
location = channel.readString();
else
location = null;
final String dataSegmentName;
if (connection.data.protocolVersion >= 10)
dataSegmentName = channel.readString();
else {
channel.readInt(); // OLD INIT SIZE, NOT MORE USED
dataSegmentName = null;
}
if (connection.data.protocolVersion >= 18)
clusterId = channel.readShort();
Object[] params = null;
final int num;
if (clusterId < 0)
num = connection.database.addCluster(type, name, location, dataSegmentName, params);
else
num = connection.database.addCluster(type, name, clusterId, location, dataSegmentName, params);
beginResponse();
try {
sendOk(clientTxId);
channel.writeShort((short) num);
} finally {
endResponse();
}
}
protected void rangeCluster() throws IOException {
setDataCommandInfo("Get the begin/end range of data in cluster");
if (!isConnectionAlive())
return;
OClusterPosition[] pos = connection.database.getStorage().getClusterDataRange(channel.readShort());
beginResponse();
try {
sendOk(clientTxId);
channel.writeClusterPosition(pos[0]);
channel.writeClusterPosition(pos[1]);
} finally {
endResponse();
}
}
protected void countClusters() throws IOException {
setDataCommandInfo("Count cluster elements");
if (!isConnectionAlive())
return;
int[] clusterIds = new int[channel.readShort()];
for (int i = 0; i < clusterIds.length; ++i)
clusterIds[i] = channel.readShort();
boolean countTombstones = false;
if (connection.data.protocolVersion >= 13)
countTombstones = channel.readByte() > 0;
final long count = connection.database.countClusterElements(clusterIds, countTombstones);
beginResponse();
try {
sendOk(clientTxId);
channel.writeLong(count);
} finally {
endResponse();
}
}
protected void reloadDatabase() throws IOException {
setDataCommandInfo("Reload database information");
if (!isConnectionAlive())
return;
beginResponse();
try {
sendOk(clientTxId);
sendDatabaseInformation();
} finally {
endResponse();
}
}
protected void openDatabase() throws IOException {
setDataCommandInfo("Open database");
readConnectionData();
final String dbURL = channel.readString();
dbType = ODatabaseDocument.TYPE;
if (connection.data.protocolVersion >= 8)
// READ DB-TYPE FROM THE CLIENT
dbType = channel.readString();
final String user = channel.readString();
final String passwd = channel.readString();
connection.database = (ODatabaseDocumentTx) server.openDatabase(dbType, dbURL, user, passwd);
connection.rawDatabase = ((ODatabaseRaw) ((ODatabaseComplex<?>) connection.database.getUnderlying()).getUnderlying());
if (connection.database.getStorage() instanceof OStorageProxy && !loadUserFromSchema(user, passwd)) {
sendError(clientTxId, new OSecurityAccessException(connection.database.getName(),
"User or password not valid for database: '" + connection.database.getName() + "'"));
} else {
beginResponse();
try {
sendOk(clientTxId);
channel.writeInt(connection.id);
sendDatabaseInformation();
final OServerPlugin plugin = server.getPlugin("cluster");
ODocument distributedCfg = null;
if (plugin != null && plugin instanceof ODistributedServerManager)
distributedCfg = ((ODistributedServerManager) plugin).getClusterConfiguration();
channel.writeBytes(distributedCfg != null ? distributedCfg.toStream() : null);
if (connection.data.protocolVersion >= 14)
channel.writeString(OConstants.getVersion());
} finally {
endResponse();
}
}
}
protected void connect() throws IOException {
setDataCommandInfo("Connect");
readConnectionData();
connection.serverUser = server.serverLogin(channel.readString(), channel.readString(), "connect");
beginResponse();
try {
sendOk(clientTxId);
channel.writeInt(connection.id);
} finally {
endResponse();
}
}
protected void shutdownConnection() throws IOException {
setDataCommandInfo("Shutdowning");
OLogManager.instance().info(this, "Received shutdown command from the remote client %s:%d", channel.socket.getInetAddress(),
channel.socket.getPort());
final String user = channel.readString();
final String passwd = channel.readString();
if (server.authenticate(user, passwd, "shutdown")) {
OLogManager.instance().info(this, "Remote client %s:%d authenticated. Starting shutdown of server...",
channel.socket.getInetAddress(), channel.socket.getPort());
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
channel.close();
server.shutdown();
System.exit(0);
}
OLogManager.instance().error(this, "Authentication error of remote client %s:%d: shutdown is aborted.",
channel.socket.getInetAddress(), channel.socket.getPort());
sendError(clientTxId, new OSecurityAccessException("Invalid user/password to shutdown the server"));
}
protected void copyDatabase() throws IOException {
setDataCommandInfo("Copy the database to a remote server");
final String dbUrl = channel.readString();
final String dbUser = channel.readString();
final String dbPassword = channel.readString();
final String remoteServerName = channel.readString();
final String remoteServerEngine = channel.readString();
checkServerAccess("database.copy");
final ODatabaseDocumentTx db = (ODatabaseDocumentTx) server.openDatabase(ODatabaseDocument.TYPE, dbUrl, dbUser, dbPassword);
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void replicationDatabase() throws IOException {
setDataCommandInfo("Replication command");
final ODocument request = new ODocument(channel.readBytes());
final ODistributedServerManager dManager = server.getDistributedManager();
if (dManager == null)
throw new OConfigurationException("No distributed manager configured");
final String operation = request.field("operation");
ODocument response = null;
if (operation.equals("start")) {
checkServerAccess("server.replication.start");
} else if (operation.equals("stop")) {
checkServerAccess("server.replication.stop");
} else if (operation.equals("config")) {
checkServerAccess("server.replication.config");
response = new ODocument().fromJSON(dManager.getDatabaseConfiguration((String) request.field("db")).serialize()
.toJSON("prettyPrint"));
}
sendResponse(response);
}
protected void distributedCluster() throws IOException {
setDataCommandInfo("Cluster status");
final ODocument req = new ODocument(channel.readBytes());
ODocument response = null;
final String operation = req.field("operation");
if (operation == null)
throw new IllegalArgumentException("Cluster operation is null");
if (operation.equals("status")) {
final OServerPlugin plugin = server.getPlugin("cluster");
if (plugin != null && plugin instanceof ODistributedServerManager)
response = ((ODistributedServerManager) plugin).getClusterConfiguration();
} else
throw new IllegalArgumentException("Cluster operation '" + operation + "' is not supported");
sendResponse(response);
}
protected void countDatabaseRecords() throws IOException {
setDataCommandInfo("Database count records");
if (!isConnectionAlive())
return;
beginResponse();
try {
sendOk(clientTxId);
channel.writeLong(connection.database.getStorage().countRecords());
} finally {
endResponse();
}
}
protected void sizeDatabase() throws IOException {
setDataCommandInfo("Database size");
if (!isConnectionAlive())
return;
beginResponse();
try {
sendOk(clientTxId);
channel.writeLong(connection.database.getStorage().getSize());
} finally {
endResponse();
}
}
protected void dropDatabase() throws IOException {
setDataCommandInfo("Drop database");
String dbName = channel.readString();
String storageType;
if (connection.data.protocolVersion >= 16)
storageType = channel.readString();
else
storageType = "local";
checkServerAccess("database.delete");
connection.database = getDatabaseInstance(dbName, ODatabaseDocument.TYPE, storageType);
if (connection.database.exists()) {
OLogManager.instance().info(this, "Dropped database '%s'", connection.database.getName());
if (connection.database.isClosed())
openDatabase(connection.database, connection.serverUser.name, connection.serverUser.password);
connection.database.drop();
connection.close();
} else {
throw new OStorageException("Database with name '" + dbName + "' doesn't exits.");
}
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void existsDatabase() throws IOException {
setDataCommandInfo("Exists database");
final String dbName = channel.readString();
final String storageType;
if (connection.data.protocolVersion >= 16)
storageType = channel.readString();
else
storageType = "local";
checkServerAccess("database.exists");
connection.database = getDatabaseInstance(dbName, ODatabaseDocument.TYPE, storageType);
beginResponse();
try {
sendOk(clientTxId);
channel.writeByte((byte) (connection.database.exists() ? 1 : 0));
} finally {
endResponse();
}
}
protected void createDatabase() throws IOException {
setDataCommandInfo("Create database");
String dbName = channel.readString();
String dbType = ODatabaseDocument.TYPE;
if (connection.data.protocolVersion >= 8)
// READ DB-TYPE FROM THE CLIENT
dbType = channel.readString();
String storageType = channel.readString();
checkServerAccess("database.create");
checkStorageExistence(dbName);
connection.database = getDatabaseInstance(dbName, dbType, storageType);
createDatabase(connection.database, null, null);
connection.rawDatabase = (((ODatabaseComplex<?>) connection.database.getUnderlying()).getUnderlying());
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void closeDatabase() throws IOException {
setDataCommandInfo("Close Database");
if (connection != null) {
if (connection.data.protocolVersion > 0 && connection.data.protocolVersion < 9)
// OLD CLIENTS WAIT FOR A OK
sendOk(clientTxId);
if (OClientConnectionManager.instance().disconnect(connection.id))
sendShutdown();
}
}
protected void configList() throws IOException {
setDataCommandInfo("List config");
checkServerAccess("server.config.get");
beginResponse();
try {
sendOk(clientTxId);
channel.writeShort((short) OGlobalConfiguration.values().length);
for (OGlobalConfiguration cfg : OGlobalConfiguration.values()) {
String key;
try {
key = cfg.getKey();
} catch (Exception e) {
key = "?";
}
String value;
try {
value = cfg.getValueAsString() != null ? cfg.getValueAsString() : "";
} catch (Exception e) {
value = "";
}
channel.writeString(key);
channel.writeString(value);
}
} finally {
endResponse();
}
}
protected void configSet() throws IOException {
setDataCommandInfo("Get config");
checkServerAccess("server.config.set");
final String key = channel.readString();
final String value = channel.readString();
final OGlobalConfiguration cfg = OGlobalConfiguration.findByKey(key);
if (cfg != null)
cfg.setValue(value);
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void configGet() throws IOException {
setDataCommandInfo("Get config");
checkServerAccess("server.config.get");
final String key = channel.readString();
final OGlobalConfiguration cfg = OGlobalConfiguration.findByKey(key);
String cfgValue = cfg != null ? cfg.getValueAsString() : "";
beginResponse();
try {
sendOk(clientTxId);
channel.writeString(cfgValue);
} finally {
endResponse();
}
}
protected void commit() throws IOException {
setDataCommandInfo("Transaction commit");
if (!isConnectionAlive())
return;
final OTransactionOptimisticProxy tx = new OTransactionOptimisticProxy((ODatabaseRecordTx) connection.database.getUnderlying(),
channel);
try {
connection.database.begin(tx);
try {
connection.database.commit();
beginResponse();
try {
sendOk(clientTxId);
// SEND BACK ALL THE RECORD IDS FOR THE CREATED RECORDS
channel.writeInt(tx.getCreatedRecords().size());
for (Entry<ORecordId, ORecordInternal<?>> entry : tx.getCreatedRecords().entrySet()) {
channel.writeRID(entry.getKey());
channel.writeRID(entry.getValue().getIdentity());
// IF THE NEW OBJECT HAS VERSION > 0 MEANS THAT HAS BEEN UPDATED IN THE SAME TX. THIS HAPPENS FOR GRAPHS
if (entry.getValue().getRecordVersion().getCounter() > 0)
tx.getUpdatedRecords().put((ORecordId) entry.getValue().getIdentity(), entry.getValue());
}
// SEND BACK ALL THE NEW VERSIONS FOR THE UPDATED RECORDS
channel.writeInt(tx.getUpdatedRecords().size());
for (Entry<ORecordId, ORecordInternal<?>> entry : tx.getUpdatedRecords().entrySet()) {
channel.writeRID(entry.getKey());
channel.writeVersion(entry.getValue().getRecordVersion());
}
} finally {
endResponse();
}
} catch (Exception e) {
connection.database.rollback();
sendError(clientTxId, e);
}
} catch (OTransactionAbortedException e) {
// TX ABORTED BY THE CLIENT
} catch (Exception e) {
// Error during TX initialization, possibly index constraints violation.
tx.rollback();
tx.close();
sendError(clientTxId, e);
}
}
protected void command() throws IOException {
setDataCommandInfo("Execute remote command");
final boolean asynch = channel.readByte() == 'a';
final OCommandRequestText command = (OCommandRequestText) OStreamSerializerAnyStreamable.INSTANCE.fromStream(channel
.readBytes());
connection.data.commandDetail = command.getText();
// ENABLES THE CACHE TO IMPROVE PERFORMANCE OF COMPLEX COMMANDS LIKE TRAVERSE
// connection.database.getLevel1Cache().setEnable(true);
beginResponse();
try {
final OAbstractCommandResultListener listener;
if (asynch) {
listener = new OAsyncCommandResultListener(this, clientTxId);
command.setResultListener(listener);
} else
listener = new OSyncCommandResultListener();
final long serverTimeout = OGlobalConfiguration.COMMAND_TIMEOUT.getValueAsLong();
if (serverTimeout > 0 && command.getTimeoutTime() > serverTimeout)
// FORCE THE SERVER'S TIMEOUT
command.setTimeout(serverTimeout, command.getTimeoutStrategy());
if (!isConnectionAlive())
return;
// ASSIGNED THE PARSED FETCHPLAN
listener.setFetchPlan(((OCommandRequestInternal) connection.database.command(command)).getFetchPlan());
final Object result = ((OCommandRequestInternal) connection.database.command(command)).execute();
if (asynch) {
// ASYNCHRONOUS
if (listener.isEmpty())
try {
sendOk(clientTxId);
} catch (IOException e1) {
}
} else {
// SYNCHRONOUS
sendOk(clientTxId);
if (result == null) {
// NULL VALUE
channel.writeByte((byte) 'n');
} else if (result instanceof OIdentifiable) {
// RECORD
channel.writeByte((byte) 'r');
listener.result(result);
writeIdentifiable((OIdentifiable) result);
} else if (OMultiValue.isMultiValue(result)) {
channel.writeByte((byte) 'l');
channel.writeInt(OMultiValue.getSize(result));
for (Object o : OMultiValue.getMultiValueIterable(result)) {
listener.result(o);
writeIdentifiable((OIdentifiable) o);
}
} else {
// ANY OTHER (INCLUDING LITERALS)
channel.writeByte((byte) 'a');
final StringBuilder value = new StringBuilder();
listener.result(result);
ORecordSerializerStringAbstract.fieldTypeToString(value, OType.getTypeByClass(result.getClass()), result);
channel.writeString(value.toString());
}
}
if (asynch || connection.data.protocolVersion >= 17) {
// SEND FETCHED RECORDS TO LOAD IN CLIENT CACHE
for (ODocument doc : listener.getFetchedRecordsToSend()) {
channel.writeByte((byte) 2); // CLIENT CACHE RECORD. IT
// ISN'T PART OF THE
// RESULT SET
writeIdentifiable(doc);
}
channel.writeByte((byte) 0); // NO MORE RECORDS
}
} finally {
endResponse();
}
}
private boolean isConnectionAlive() {
if (connection == null || connection.database == null) {
// CONNECTION/DATABASE CLOSED, KILL IT
OClientConnectionManager.instance().kill(connection);
return false;
}
return true;
}
protected void deleteRecord() throws IOException {
setDataCommandInfo("Delete record");
if (!isConnectionAlive())
return;
final ORID rid = channel.readRID();
final ORecordVersion version = channel.readVersion();
final byte mode = channel.readByte();
final int result = deleteRecord(connection.database, rid, version);
if (mode < 2) {
beginResponse();
try {
sendOk(clientTxId);
channel.writeByte((byte) result);
} finally {
endResponse();
}
}
}
protected void cleanOutRecord() throws IOException {
setDataCommandInfo("Clean out record");
if (!isConnectionAlive())
return;
final ORID rid = channel.readRID();
final ORecordVersion version = channel.readVersion();
final byte mode = channel.readByte();
final int result = cleanOutRecord(connection.database, rid, version);
if (mode < 2) {
beginResponse();
try {
sendOk(clientTxId);
channel.writeByte((byte) result);
} finally {
endResponse();
}
}
}
/**
* VERSION MANAGEMENT:<br/>
* -1 : DOCUMENT UPDATE, NO VERSION CONTROL<br/>
* -2 : DOCUMENT UPDATE, NO VERSION CONTROL, NO VERSION INCREMENT<br/>
* -3 : DOCUMENT ROLLBACK, DECREMENT VERSION<br/>
* >-1 : MVCC CONTROL, RECORD UPDATE AND VERSION INCREMENT<br/>
* <-3 : WRONG VERSION VALUE
*
* @throws IOException
*/
protected void updateRecord() throws IOException {
setDataCommandInfo("Update record");
if (!isConnectionAlive())
return;
if (!isConnectionAlive())
return;
final ORecordId rid = channel.readRID();
final byte[] buffer = channel.readBytes();
final ORecordVersion version = channel.readVersion();
final byte recordType = channel.readByte();
final byte mode = channel.readByte();
final ORecordVersion newVersion = updateRecord(connection.database, rid, buffer, version, recordType);
if (mode < 2) {
beginResponse();
try {
sendOk(clientTxId);
channel.writeVersion(newVersion);
} finally {
endResponse();
}
}
}
protected void createRecord() throws IOException {
setDataCommandInfo("Create record");
if (!isConnectionAlive())
return;
final int dataSegmentId = connection.data.protocolVersion >= 10 ? channel.readInt() : 0;
final ORecordId rid = new ORecordId(channel.readShort(), ORID.CLUSTER_POS_INVALID);
final byte[] buffer = channel.readBytes();
final byte recordType = channel.readByte();
final byte mode = channel.readByte();
final ORecord<?> record = createRecord(connection.database, rid, buffer, recordType, dataSegmentId);
if (mode < 2) {
beginResponse();
try {
sendOk(clientTxId);
channel.writeClusterPosition(record.getIdentity().getClusterPosition());
if (connection.data.protocolVersion >= 11)
channel.writeVersion(record.getRecordVersion());
} finally {
endResponse();
}
}
}
protected void readRecordMetadata() throws IOException {
setDataCommandInfo("Record metadata");
final ORID rid = channel.readRID();
beginResponse();
try {
final ORecordMetadata metadata = connection.database.getRecordMetadata(rid);
sendOk(clientTxId);
channel.writeRID(metadata.getRecordId());
channel.writeVersion(metadata.getRecordVersion());
} finally {
endResponse();
}
}
protected void readRecord() throws IOException {
setDataCommandInfo("Load record");
if (!isConnectionAlive())
return;
final ORecordId rid = channel.readRID();
final String fetchPlanString = channel.readString();
boolean ignoreCache = false;
if (connection.data.protocolVersion >= 9)
ignoreCache = channel.readByte() == 1;
boolean loadTombstones = false;
if (connection.data.protocolVersion >= 13)
loadTombstones = channel.readByte() > 0;
if (rid.clusterId == 0 && rid.clusterPosition.longValue() == 0) {
// @COMPATIBILITY 0.9.25
// SEND THE DB CONFIGURATION INSTEAD SINCE IT WAS ON RECORD 0:0
OFetchHelper.checkFetchPlanValid(fetchPlanString);
beginResponse();
try {
sendOk(clientTxId);
channel.writeByte((byte) 1);
channel.writeBytes(connection.database.getStorage().getConfiguration().toStream());
channel.writeVersion(OVersionFactory.instance().createVersion());
channel.writeByte(ORecordBytes.RECORD_TYPE);
channel.writeByte((byte) 0); // NO MORE RECORDS
} finally {
endResponse();
}
} else {
final ORecordInternal<?> record = connection.database.load(rid, fetchPlanString, ignoreCache, loadTombstones);
beginResponse();
try {
sendOk(clientTxId);
if (record != null) {
channel.writeByte((byte) 1); // HAS RECORD
channel.writeBytes(record.toStream());
channel.writeVersion(record.getRecordVersion());
channel.writeByte(record.getRecordType());
if (fetchPlanString.length() > 0) {
// BUILD THE SERVER SIDE RECORD TO ACCES TO THE FETCH
// PLAN
if (record instanceof ODocument) {
final Map<String, Integer> fetchPlan = OFetchHelper.buildFetchPlan(fetchPlanString);
final Set<ODocument> recordsToSend = new HashSet<ODocument>();
final ODocument doc = (ODocument) record;
final OFetchListener listener = new ORemoteFetchListener(recordsToSend);
final OFetchContext context = new ORemoteFetchContext();
OFetchHelper.fetch(doc, doc, fetchPlan, listener, context, "");
// SEND RECORDS TO LOAD IN CLIENT CACHE
for (ODocument d : recordsToSend) {
if (d.getIdentity().isValid()) {
channel.writeByte((byte) 2); // CLIENT CACHE
// RECORD. IT ISN'T PART OF THE RESULT SET
writeIdentifiable(d);
}
}
}
}
}
channel.writeByte((byte) 0); // NO MORE RECORDS
} finally {
endResponse();
}
}
}
protected void beginResponse() {
channel.acquireWriteLock();
}
protected void endResponse() throws IOException {
channel.flush();
channel.releaseWriteLock();
}
protected void setDataCommandInfo(final String iCommandInfo) {
if (connection != null)
connection.data.commandInfo = iCommandInfo;
}
protected void readConnectionData() throws IOException {
connection.data.driverName = channel.readString();
connection.data.driverVersion = channel.readString();
connection.data.protocolVersion = channel.readShort();
connection.data.clientId = channel.readString();
}
private void sendDatabaseInformation() throws IOException {
final Collection<? extends OCluster> clusters = connection.database.getStorage().getClusterInstances();
int clusterCount = 0;
for (OCluster c : clusters) {
if (c != null) {
++clusterCount;
}
}
if (connection.data.protocolVersion >= 7)
channel.writeShort((short) clusterCount);
else
channel.writeInt(clusterCount);
for (OCluster c : clusters) {
if (c != null) {
channel.writeString(c.getName());
channel.writeShort((short) c.getId());
channel.writeString(c.getType());
if (connection.data.protocolVersion >= 12)
channel.writeShort((short) c.getDataSegmentId());
}
}
}
@Override
public void startup() {
super.startup();
OServerPluginHelper.invokeHandlerCallbackOnClientConnection(server, connection);
}
@Override
public void shutdown() {
sendShutdown();
super.shutdown();
if (connection == null)
return;
OServerPluginHelper.invokeHandlerCallbackOnClientDisconnection(server, connection);
OClientConnectionManager.instance().disconnect(connection);
}
protected void sendOk(final int iClientTxId) throws IOException {
channel.writeByte(OChannelBinaryProtocol.RESPONSE_STATUS_OK);
channel.writeInt(iClientTxId);
}
private void listDatabases() throws IOException {
checkServerAccess("server.dblist");
final ODocument result = new ODocument();
result.field("databases", server.getAvailableStorageNames());
setDataCommandInfo("List databases");
beginResponse();
try {
sendOk(clientTxId);
channel.writeBytes(result.toStream());
} finally {
endResponse();
}
}
protected void sendError(final int iClientTxId, final Throwable t) throws IOException {
channel.acquireWriteLock();
try {
channel.writeByte(OChannelBinaryProtocol.RESPONSE_STATUS_ERROR);
channel.writeInt(iClientTxId);
Throwable current;
if (t instanceof OLockException && t.getCause() instanceof ODatabaseException)
// BYPASS THE DB POOL EXCEPTION TO PROPAGATE THE RIGHT SECURITY ONE
current = t.getCause();
else
current = t;
final Throwable original = current;
while (current != null) {
// MORE DETAILS ARE COMING AS EXCEPTION
channel.writeByte((byte) 1);
channel.writeString(current.getClass().getName());
channel.writeString(current != null ? current.getMessage() : null);
current = current.getCause();
}
channel.writeByte((byte) 0);
if (connection != null && connection.data.protocolVersion >= 19) {
final OMemoryStream memoryStream = new OMemoryStream();
final ObjectOutputStream objectOutputStream = new ObjectOutputStream(memoryStream);
objectOutputStream.writeObject(original);
objectOutputStream.flush();
final byte[] result = memoryStream.toByteArray();
objectOutputStream.close();
channel.writeBytes(result);
}
channel.flush();
if (OLogManager.instance().isLevelEnabled(logClientExceptions)) {
if (logClientFullStackTrace)
OLogManager.instance().log(this, logClientExceptions, "Sent run-time exception to the client %s: %s", t,
channel.socket.getRemoteSocketAddress(), t.toString());
else
OLogManager.instance().log(this, logClientExceptions, "Sent run-time exception to the client %s: %s", null,
channel.socket.getRemoteSocketAddress(), t.toString());
}
} catch (Exception e) {
if (e instanceof SocketException)
shutdown();
} finally {
channel.releaseWriteLock();
}
}
private boolean loadUserFromSchema(final String iUserName, final String iUserPassword) {
account = connection.database.getMetadata().getSecurity().authenticate(iUserName, iUserPassword);
return true;
}
@Override
protected void handleConnectionError(final OChannelBinaryServer iChannel, final Throwable e) {
super.handleConnectionError(channel, e);
OServerPluginHelper.invokeHandlerCallbackOnClientError(server, connection, e);
}
public String getType() {
return "binary";
}
protected void sendResponse(final ODocument iResponse) throws IOException {
beginResponse();
try {
sendOk(clientTxId);
channel.writeBytes(iResponse != null ? iResponse.toStream() : null);
} finally {
endResponse();
}
}
protected void freezeDatabase() throws IOException {
setDataCommandInfo("Freeze database");
String dbName = channel.readString();
checkServerAccess("database.freeze");
final String storageType;
if (connection.data.protocolVersion >= 16)
storageType = channel.readString();
else
storageType = "local";
connection.database = getDatabaseInstance(dbName, ODatabaseDocument.TYPE, storageType);
if (connection.database.exists()) {
OLogManager.instance().info(this, "Freezing database '%s'", connection.database.getURL());
if (connection.database.isClosed())
openDatabase(connection.database, connection.serverUser.name, connection.serverUser.password);
connection.database.freeze(true);
} else {
throw new OStorageException("Database with name '" + dbName + "' doesn't exits.");
}
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void releaseDatabase() throws IOException {
setDataCommandInfo("Release database");
String dbName = channel.readString();
checkServerAccess("database.release");
final String storageType;
if (connection.data.protocolVersion >= 16)
storageType = channel.readString();
else
storageType = "local";
connection.database = getDatabaseInstance(dbName, ODatabaseDocument.TYPE, storageType);
if (connection.database.exists()) {
OLogManager.instance().info(this, "Realising database '%s'", connection.database.getURL());
if (connection.database.isClosed())
openDatabase(connection.database, connection.serverUser.name, connection.serverUser.password);
connection.database.release();
} else {
throw new OStorageException("Database with name '" + dbName + "' doesn't exits.");
}
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void freezeCluster() throws IOException {
setDataCommandInfo("Freeze cluster");
final String dbName = channel.readString();
final int clusterId = channel.readShort();
checkServerAccess("database.freeze");
final String storageType;
if (connection.data.protocolVersion >= 16)
storageType = channel.readString();
else
storageType = "local";
connection.database = getDatabaseInstance(dbName, ODatabaseDocument.TYPE, storageType);
if (connection.database.exists()) {
OLogManager.instance().info(this, "Freezing database '%s' cluster %d", connection.database.getURL(), clusterId);
if (connection.database.isClosed()) {
openDatabase(connection.database, connection.serverUser.name, connection.serverUser.password);
}
connection.database.freezeCluster(clusterId);
} else {
throw new OStorageException("Database with name '" + dbName + "' doesn't exits.");
}
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
protected void releaseCluster() throws IOException {
setDataCommandInfo("Release database");
final String dbName = channel.readString();
final int clusterId = channel.readShort();
checkServerAccess("database.release");
final String storageType;
if (connection.data.protocolVersion >= 16)
storageType = channel.readString();
else
storageType = "local";
connection.database = getDatabaseInstance(dbName, ODatabaseDocument.TYPE, storageType);
if (connection.database.exists()) {
OLogManager.instance().info(this, "Realising database '%s' cluster %d", connection.database.getURL(), clusterId);
if (connection.database.isClosed()) {
openDatabase(connection.database, connection.serverUser.name, connection.serverUser.password);
}
connection.database.releaseCluster(clusterId);
} else {
throw new OStorageException("Database with name '" + dbName + "' doesn't exits.");
}
beginResponse();
try {
sendOk(clientTxId);
} finally {
endResponse();
}
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_network_protocol_binary_ONetworkProtocolBinary.java
|
69 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class CloudyUtilityTest {
String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n" +
"<DescribeInstancesResponse xmlns=\"http://ec2.amazonaws.com/doc/2011-05-15/\">\n" +
" <requestId>c0f82bf8-b7f5-4cf1-bbfa-b95ea4bd38da</requestId>\n" +
" <reservationSet>\n" +
" <item>\n" +
" <reservationId>r-48ff3826</reservationId>\n" +
" <ownerId>665466731577</ownerId>\n" +
" <groupSet>\n" +
" <item>\n" +
" <groupId>sg-b67baddf</groupId>\n" +
" <groupName>hazelcast</groupName>\n" +
" </item>\n" +
" </groupSet>\n" +
" <instancesSet>\n" +
" <item>\n" +
" <instanceId>i-0a0c616a</instanceId>\n" +
" <imageId>ami-7f418316</imageId>\n" +
" <instanceState>\n" +
" <code>16</code>\n" +
" <name>running</name>\n" +
" </instanceState>\n" +
" <privateDnsName>domU-12-31-39-07-C5-C4.compute-1.internal</privateDnsName>\n" +
" <dnsName>ec2-50-17-19-37.compute-1.amazonaws.com</dnsName>\n" +
" <reason/>\n" +
" <keyName>hazelcast_key_pair</keyName>\n" +
" <amiLaunchIndex>0</amiLaunchIndex>\n" +
" <productCodes/>\n" +
" <instanceType>t1.micro</instanceType>\n" +
" <launchTime>2011-09-27T11:37:35.000Z</launchTime>\n" +
" <placement>\n" +
" <availabilityZone>us-east-1a</availabilityZone>\n" +
" <groupName/>\n" +
" <tenancy>default</tenancy>\n" +
" </placement>\n" +
" <kernelId>aki-805ea7e9</kernelId>\n" +
" <monitoring>\n" +
" <state>disabled</state>\n" +
" </monitoring>\n" +
" <privateIpAddress>10.209.198.50</privateIpAddress>\n" +
" <ipAddress>50.17.19.37</ipAddress>\n" +
" <groupSet>\n" +
" <item>\n" +
" <groupId>sg-b67baddf</groupId>\n" +
" <groupName>hazelcast</groupName>\n" +
" </item>\n" +
" </groupSet>\n" +
" <architecture>i386</architecture>\n" +
" <rootDeviceType>ebs</rootDeviceType>\n" +
" <rootDeviceName>/dev/sda1</rootDeviceName>\n" +
" <blockDeviceMapping>\n" +
" <item>\n" +
" <deviceName>/dev/sda1</deviceName>\n" +
" <ebs>\n" +
" <volumeId>vol-d5bdffbf</volumeId>\n" +
" <status>attached</status>\n" +
" <attachTime>2011-09-27T11:37:56.000Z</attachTime>\n" +
" <deleteOnTermination>true</deleteOnTermination>\n" +
" </ebs>\n" +
" </item>\n" +
" </blockDeviceMapping>\n" +
" <virtualizationType>paravirtual</virtualizationType>\n" +
" <clientToken/>\n" +
" <tagSet>\n" +
" <item>\n" +
" <key>name2</key>\n" +
" <value>value2</value>\n" +
" </item>\n" +
" <item>\n" +
" <key>Name1</key>\n" +
" <value>value1</value>\n" +
" </item>\n" +
" <item>\n" +
" <key>name</key>\n" +
" <value/>\n" +
" </item>\n" +
" </tagSet>\n" +
" <hypervisor>xen</hypervisor>\n" +
" </item>\n" +
" <item>\n" +
" <instanceId>i-0c0c616c</instanceId>\n" +
" <imageId>ami-7f418316</imageId>\n" +
" <instanceState>\n" +
" <code>16</code>\n" +
" <name>running</name>\n" +
" </instanceState>\n" +
" <privateDnsName>domU-12-31-39-07-C2-60.compute-1.internal</privateDnsName>\n" +
" <dnsName>ec2-50-16-102-143.compute-1.amazonaws.com</dnsName>\n" +
" <reason/>\n" +
" <keyName>hazelcast_key_pair</keyName>\n" +
" <amiLaunchIndex>1</amiLaunchIndex>\n" +
" <productCodes/>\n" +
" <instanceType>t1.micro</instanceType>\n" +
" <launchTime>2011-09-27T11:37:35.000Z</launchTime>\n" +
" <placement>\n" +
" <availabilityZone>us-east-1a</availabilityZone>\n" +
" <groupName/>\n" +
" <tenancy>default</tenancy>\n" +
" </placement>\n" +
" <kernelId>aki-805ea7e9</kernelId>\n" +
" <monitoring>\n" +
" <state>disabled</state>\n" +
" </monitoring>\n" +
" <privateIpAddress>10.209.193.170</privateIpAddress>\n" +
" <ipAddress>50.16.102.143</ipAddress>\n" +
" <groupSet>\n" +
" <item>\n" +
" <groupId>sg-b67baddf</groupId>\n" +
" <groupName>hazelcast</groupName>\n" +
" </item>\n" +
" </groupSet>\n" +
" <architecture>i386</architecture>\n" +
" <rootDeviceType>ebs</rootDeviceType>\n" +
" <rootDeviceName>/dev/sda1</rootDeviceName>\n" +
" <blockDeviceMapping>\n" +
" <item>\n" +
" <deviceName>/dev/sda1</deviceName>\n" +
" <ebs>\n" +
" <volumeId>vol-abbdffc1</volumeId>\n" +
" <status>attached</status>\n" +
" <attachTime>2011-09-27T11:37:57.000Z</attachTime>\n" +
" <deleteOnTermination>true</deleteOnTermination>\n" +
" </ebs>\n" +
" </item>\n" +
" </blockDeviceMapping>\n" +
" <virtualizationType>paravirtual</virtualizationType>\n" +
" <clientToken/>\n" +
" <tagSet>\n" +
" <item>\n" +
" <key>Name1</key>\n" +
" <value>value1</value>\n" +
" </item>\n" +
" <item>\n" +
" <key>name2</key>\n" +
" <value>value2</value>\n" +
" </item>\n" +
" </tagSet>\n" +
" <hypervisor>xen</hypervisor>\n" +
" </item>\n" +
" </instancesSet>\n" +
" <requesterId>058890971305</requesterId>\n" +
" </item>\n" +
" </reservationSet>\n" +
"</DescribeInstancesResponse>";
@Test
public void testNoTags() throws IOException {
InputStream is = new ByteArrayInputStream(xml.getBytes());
AwsConfig awsConfig = new AwsConfig();
awsConfig.setAccessKey("some-access-key");
awsConfig.setSecretKey("some-secret-key");
awsConfig.setSecurityGroupName("hazelcast");
List<String> result = (List<String>) CloudyUtility.unmarshalTheResponse(is, awsConfig);
assertEquals(2, result.size());
}
@Test
public void testTagsBothNodeHave() throws IOException {
InputStream is = new ByteArrayInputStream(xml.getBytes());
AwsConfig awsConfig = new AwsConfig();
awsConfig.setAccessKey("some-access-key");
awsConfig.setSecretKey("some-secret-key");
awsConfig.setSecurityGroupName("hazelcast");
awsConfig.setTagKey("Name1");
awsConfig.setTagValue("value1");
List<String> result = (List<String>) CloudyUtility.unmarshalTheResponse(is, awsConfig);
assertEquals(2, result.size());
}
@Test
public void testTagOnlyOneNodeHave() throws IOException {
InputStream is = new ByteArrayInputStream(xml.getBytes());
AwsConfig awsConfig = new AwsConfig();
awsConfig.setAccessKey("some-access-key");
awsConfig.setSecretKey("some-secret-key");
awsConfig.setSecurityGroupName("hazelcast");
awsConfig.setTagKey("name");
awsConfig.setTagValue("");
List<String> result = (List<String>) CloudyUtility.unmarshalTheResponse(is, awsConfig);
assertEquals(1, result.size());
}
}
| 0true
|
hazelcast-cloud_src_test_java_com_hazelcast_aws_utility_CloudyUtilityTest.java
|
1,249 |
public abstract class AbstractClusterAdminClient implements InternalClusterAdminClient {
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(ClusterAction<Request, Response, RequestBuilder> action) {
return action.newRequestBuilder(this);
}
@Override
public ActionFuture<ClusterHealthResponse> health(final ClusterHealthRequest request) {
return execute(ClusterHealthAction.INSTANCE, request);
}
@Override
public void health(final ClusterHealthRequest request, final ActionListener<ClusterHealthResponse> listener) {
execute(ClusterHealthAction.INSTANCE, request, listener);
}
@Override
public ClusterHealthRequestBuilder prepareHealth(String... indices) {
return new ClusterHealthRequestBuilder(this).setIndices(indices);
}
@Override
public ActionFuture<ClusterStateResponse> state(final ClusterStateRequest request) {
return execute(ClusterStateAction.INSTANCE, request);
}
@Override
public void state(final ClusterStateRequest request, final ActionListener<ClusterStateResponse> listener) {
execute(ClusterStateAction.INSTANCE, request, listener);
}
@Override
public ClusterStateRequestBuilder prepareState() {
return new ClusterStateRequestBuilder(this);
}
@Override
public ActionFuture<ClusterRerouteResponse> reroute(final ClusterRerouteRequest request) {
return execute(ClusterRerouteAction.INSTANCE, request);
}
@Override
public void reroute(final ClusterRerouteRequest request, final ActionListener<ClusterRerouteResponse> listener) {
execute(ClusterRerouteAction.INSTANCE, request, listener);
}
@Override
public ClusterRerouteRequestBuilder prepareReroute() {
return new ClusterRerouteRequestBuilder(this);
}
@Override
public ActionFuture<ClusterUpdateSettingsResponse> updateSettings(final ClusterUpdateSettingsRequest request) {
return execute(ClusterUpdateSettingsAction.INSTANCE, request);
}
@Override
public void updateSettings(final ClusterUpdateSettingsRequest request, final ActionListener<ClusterUpdateSettingsResponse> listener) {
execute(ClusterUpdateSettingsAction.INSTANCE, request, listener);
}
@Override
public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() {
return new ClusterUpdateSettingsRequestBuilder(this);
}
@Override
public ActionFuture<NodesInfoResponse> nodesInfo(final NodesInfoRequest request) {
return execute(NodesInfoAction.INSTANCE, request);
}
@Override
public void nodesInfo(final NodesInfoRequest request, final ActionListener<NodesInfoResponse> listener) {
execute(NodesInfoAction.INSTANCE, request, listener);
}
@Override
public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) {
return new NodesInfoRequestBuilder(this).setNodesIds(nodesIds);
}
@Override
public ActionFuture<NodesStatsResponse> nodesStats(final NodesStatsRequest request) {
return execute(NodesStatsAction.INSTANCE, request);
}
@Override
public void nodesStats(final NodesStatsRequest request, final ActionListener<NodesStatsResponse> listener) {
execute(NodesStatsAction.INSTANCE, request, listener);
}
@Override
public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) {
return new NodesStatsRequestBuilder(this).setNodesIds(nodesIds);
}
@Override
public ActionFuture<ClusterStatsResponse> clusterStats(ClusterStatsRequest request) {
return execute(ClusterStatsAction.INSTANCE, request);
}
@Override
public void clusterStats(ClusterStatsRequest request, ActionListener<ClusterStatsResponse> listener) {
execute(ClusterStatsAction.INSTANCE, request, listener);
}
@Override
public ClusterStatsRequestBuilder prepareClusterStats() {
return new ClusterStatsRequestBuilder(this);
}
@Override
public ActionFuture<NodesHotThreadsResponse> nodesHotThreads(NodesHotThreadsRequest request) {
return execute(NodesHotThreadsAction.INSTANCE, request);
}
@Override
public void nodesHotThreads(NodesHotThreadsRequest request, ActionListener<NodesHotThreadsResponse> listener) {
execute(NodesHotThreadsAction.INSTANCE, request, listener);
}
@Override
public NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds) {
return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds);
}
@Override
public ActionFuture<NodesRestartResponse> nodesRestart(final NodesRestartRequest request) {
return execute(NodesRestartAction.INSTANCE, request);
}
@Override
public void nodesRestart(final NodesRestartRequest request, final ActionListener<NodesRestartResponse> listener) {
execute(NodesRestartAction.INSTANCE, request, listener);
}
@Override
public NodesRestartRequestBuilder prepareNodesRestart(String... nodesIds) {
return new NodesRestartRequestBuilder(this).setNodesIds(nodesIds);
}
@Override
public ActionFuture<NodesShutdownResponse> nodesShutdown(final NodesShutdownRequest request) {
return execute(NodesShutdownAction.INSTANCE, request);
}
@Override
public void nodesShutdown(final NodesShutdownRequest request, final ActionListener<NodesShutdownResponse> listener) {
execute(NodesShutdownAction.INSTANCE, request, listener);
}
@Override
public NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds) {
return new NodesShutdownRequestBuilder(this).setNodesIds(nodesIds);
}
@Override
public ActionFuture<ClusterSearchShardsResponse> searchShards(final ClusterSearchShardsRequest request) {
return execute(ClusterSearchShardsAction.INSTANCE, request);
}
@Override
public void searchShards(final ClusterSearchShardsRequest request, final ActionListener<ClusterSearchShardsResponse> listener) {
execute(ClusterSearchShardsAction.INSTANCE, request, listener);
}
@Override
public ClusterSearchShardsRequestBuilder prepareSearchShards() {
return new ClusterSearchShardsRequestBuilder(this);
}
@Override
public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) {
return new ClusterSearchShardsRequestBuilder(this).setIndices(indices);
}
@Override
public PendingClusterTasksRequestBuilder preparePendingClusterTasks() {
return new PendingClusterTasksRequestBuilder(this);
}
@Override
public ActionFuture<PendingClusterTasksResponse> pendingClusterTasks(PendingClusterTasksRequest request) {
return execute(PendingClusterTasksAction.INSTANCE, request);
}
@Override
public void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener<PendingClusterTasksResponse> listener) {
execute(PendingClusterTasksAction.INSTANCE, request, listener);
}
public ActionFuture<PutRepositoryResponse> putRepository(PutRepositoryRequest request) {
return execute(PutRepositoryAction.INSTANCE, request);
}
@Override
public void putRepository(PutRepositoryRequest request, ActionListener<PutRepositoryResponse> listener) {
execute(PutRepositoryAction.INSTANCE, request, listener);
}
@Override
public PutRepositoryRequestBuilder preparePutRepository(String name) {
return new PutRepositoryRequestBuilder(this, name);
}
@Override
public ActionFuture<CreateSnapshotResponse> createSnapshot(CreateSnapshotRequest request) {
return execute(CreateSnapshotAction.INSTANCE, request);
}
@Override
public void createSnapshot(CreateSnapshotRequest request, ActionListener<CreateSnapshotResponse> listener) {
execute(CreateSnapshotAction.INSTANCE, request, listener);
}
@Override
public CreateSnapshotRequestBuilder prepareCreateSnapshot(String repository, String name) {
return new CreateSnapshotRequestBuilder(this, repository, name);
}
@Override
public ActionFuture<GetSnapshotsResponse> getSnapshots(GetSnapshotsRequest request) {
return execute(GetSnapshotsAction.INSTANCE, request);
}
@Override
public void getSnapshots(GetSnapshotsRequest request, ActionListener<GetSnapshotsResponse> listener) {
execute(GetSnapshotsAction.INSTANCE, request, listener);
}
@Override
public GetSnapshotsRequestBuilder prepareGetSnapshots(String repository) {
return new GetSnapshotsRequestBuilder(this, repository);
}
@Override
public ActionFuture<DeleteSnapshotResponse> deleteSnapshot(DeleteSnapshotRequest request) {
return execute(DeleteSnapshotAction.INSTANCE, request);
}
@Override
public void deleteSnapshot(DeleteSnapshotRequest request, ActionListener<DeleteSnapshotResponse> listener) {
execute(DeleteSnapshotAction.INSTANCE, request, listener);
}
@Override
public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String name) {
return new DeleteSnapshotRequestBuilder(this, repository, name);
}
@Override
public ActionFuture<DeleteRepositoryResponse> deleteRepository(DeleteRepositoryRequest request) {
return execute(DeleteRepositoryAction.INSTANCE, request);
}
@Override
public void deleteRepository(DeleteRepositoryRequest request, ActionListener<DeleteRepositoryResponse> listener) {
execute(DeleteRepositoryAction.INSTANCE, request, listener);
}
@Override
public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) {
return new DeleteRepositoryRequestBuilder(this, name);
}
@Override
public ActionFuture<GetRepositoriesResponse> getRepositories(GetRepositoriesRequest request) {
return execute(GetRepositoriesAction.INSTANCE, request);
}
@Override
public void getRepositories(GetRepositoriesRequest request, ActionListener<GetRepositoriesResponse> listener) {
execute(GetRepositoriesAction.INSTANCE, request, listener);
}
@Override
public GetRepositoriesRequestBuilder prepareGetRepositories(String... name) {
return new GetRepositoriesRequestBuilder(this, name);
}
@Override
public ActionFuture<RestoreSnapshotResponse> restoreSnapshot(RestoreSnapshotRequest request) {
return execute(RestoreSnapshotAction.INSTANCE, request);
}
@Override
public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener<RestoreSnapshotResponse> listener) {
execute(RestoreSnapshotAction.INSTANCE, request, listener);
}
@Override
public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot) {
return new RestoreSnapshotRequestBuilder(this, repository, snapshot);
}
}
| 1no label
|
src_main_java_org_elasticsearch_client_support_AbstractClusterAdminClient.java
|
193 |
public class ClientSecurityConfig {
private Credentials credentials;
private String credentialsClassname;
public Credentials getCredentials() {
return credentials;
}
public void setCredentials(Credentials credentials) {
this.credentials = credentials;
}
public String getCredentialsClassname() {
return credentialsClassname;
}
public void setCredentialsClassname(String credentialsClassname) {
this.credentialsClassname = credentialsClassname;
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_config_ClientSecurityConfig.java
|
314 |
public abstract class AbstractMergeBeanPostProcessor implements BeanPostProcessor, ApplicationContextAware {
protected static final Log LOG = LogFactory.getLog(AbstractMergeBeanPostProcessor.class);
protected String collectionRef;
protected String targetRef;
protected Placement placement = Placement.APPEND;
protected int position;
protected ApplicationContext applicationContext;
protected MergeBeanStatusProvider statusProvider;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
return bean;
}
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
if (statusProvider != null && !statusProvider.isProcessingEnabled(bean, beanName, applicationContext)) {
if (LOG.isTraceEnabled()) {
LOG.trace(String.format("Not performing post-processing on targetRef [%s] because the registered " +
"status provider [%s] returned false", targetRef, statusProvider.getClass().getSimpleName()));
}
return bean;
}
if (beanName.equals(targetRef)) {
Object mergeCollection = applicationContext.getBean(collectionRef);
if (bean instanceof ListFactoryBean || bean instanceof List) {
try {
List mergeList = (List) mergeCollection;
List sourceList;
if (bean instanceof ListFactoryBean) {
Field field = ListFactoryBean.class.getDeclaredField("sourceList");
field.setAccessible(true);
sourceList = (List) field.get(bean);
} else {
sourceList = (List) bean;
}
switch (placement) {
case APPEND:
sourceList.addAll(mergeList);
break;
case PREPEND:
sourceList.addAll(0, mergeList);
break;
case SPECIFIC:
sourceList.addAll(position, mergeList);
break;
}
} catch (Exception e) {
throw new BeanCreationException(e.getMessage());
}
} else if (bean instanceof SetFactoryBean || bean instanceof Set) {
try {
Set mergeSet = (Set) mergeCollection;
Set sourceSet;
if (bean instanceof SetFactoryBean) {
Field field = SetFactoryBean.class.getDeclaredField("sourceSet");
field.setAccessible(true);
sourceSet = (Set) field.get(bean);
} else {
sourceSet = (Set)bean;
}
List tempList = new ArrayList(sourceSet);
switch (placement) {
case APPEND:
tempList.addAll(mergeSet);
break;
case PREPEND:
tempList.addAll(0, mergeSet);
break;
case SPECIFIC:
tempList.addAll(position, mergeSet);
break;
}
sourceSet.clear();
sourceSet.addAll(tempList);
} catch (Exception e) {
throw new BeanCreationException(e.getMessage());
}
} else if (bean instanceof MapFactoryBean || bean instanceof Map) {
try {
Map mergeMap = (Map) mergeCollection;
Map sourceMap;
if (bean instanceof MapFactoryBean) {
Field field = MapFactoryBean.class.getDeclaredField("sourceMap");
field.setAccessible(true);
sourceMap = (Map) field.get(bean);
} else {
sourceMap = (Map) bean;
}
LinkedHashMap tempMap = new LinkedHashMap();
switch (placement) {
case APPEND:
tempMap.putAll(sourceMap);
tempMap.putAll(mergeMap);
break;
case PREPEND:
tempMap.putAll(mergeMap);
tempMap.putAll(sourceMap);
break;
case SPECIFIC:
boolean added = false;
int j = 0;
for (Object key : sourceMap.keySet()) {
if (j == position) {
tempMap.putAll(mergeMap);
added = true;
}
tempMap.put(key, sourceMap.get(key));
j++;
}
if (!added) {
tempMap.putAll(mergeMap);
}
break;
}
sourceMap.clear();
sourceMap.putAll(tempMap);
} catch (Exception e) {
throw new BeanCreationException(e.getMessage());
}
} else {
throw new IllegalArgumentException("Bean (" + beanName + ") is specified as a merge target, " +
"but is not" +
" of type ListFactoryBean, SetFactoryBean or MapFactoryBean");
}
}
return bean;
}
/**
* Retrieve the id of the collection to be merged
*
* @return the id of the collection to be merged
*/
public String getCollectionRef() {
return collectionRef;
}
/**
* Set the id of the collection to be merged
*
* @param collectionRef the id of the collection to be merged
*/
public void setCollectionRef(String collectionRef) {
this.collectionRef = collectionRef;
}
/**
* Retrieve the id of the collection to receive the merge
*
* @return the id of the collection receiving the merge
*/
public String getTargetRef() {
return targetRef;
}
/**
* Set the id of the collection to receive the merge
*
* @param targetRef the id of the collection receiving the merge
*/
public void setTargetRef(String targetRef) {
this.targetRef = targetRef;
}
/**
* The position in the target collection to place the merge. This can be at the beginning,
* end or at an explicit position.
*
* @return the position in the target collection to place the merge
*/
public Placement getPlacement() {
return placement;
}
/**
* The position in the target collection to place the merge. This can be at the beginning,
* end or at an explicit position.
*
* @param placement the position in the target collection to place the merge
*/
public void setPlacement(Placement placement) {
this.placement = placement;
}
/**
* If a placement of type Placement.SPECIFIC is used, then this is the integer position in the target
* target collection at which the merge will be performed.
*
* @return the specific position in the target collection
*/
public int getPosition() {
return position;
}
/**
* If a placement of type Placement.SPECIFIC is used, then this is the integer position in the target
* target collection at which the merge will be performed.
*
* @param position the specific position in the target collection
*/
public void setPosition(int position) {
this.position = position;
}
/**
* Gets the status provider that is configured for this post processor
*
* @return the MergeStatusBeanProvider
*/
public MergeBeanStatusProvider getStatusProvider() {
return statusProvider;
}
/**
* Sets the MergeBeanStatusProvider, which controls whether or not this post processor is activated.
* If no statusProvider is set, then we will always execute.
*
* @param statusProvider
*/
public void setStatusProvider(MergeBeanStatusProvider statusProvider) {
this.statusProvider = statusProvider;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_AbstractMergeBeanPostProcessor.java
|
70 |
public interface TitanTransaction extends TitanGraphTransaction {
/* ---------------------------------------------------------------
* Modifications
* ---------------------------------------------------------------
*/
/**
* Creates a new vertex in the graph with the given vertex id and the given vertex label.
* Note, that an exception is thrown if the vertex id is not a valid Titan vertex id or if a vertex with the given
* id already exists.
* <p/>
* Custom id setting must be enabled via the configuration option {@link com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration#ALLOW_SETTING_VERTEX_ID}.
* <p/>
* Use {@link com.thinkaurelius.titan.core.util.TitanId#toVertexId(long)} to construct a valid Titan vertex id from a user id.
*
* @param id vertex id of the vertex to be created
* @param vertexLabel vertex label for this vertex - can be null if no vertex label should be set.
* @return New vertex
*/
public TitanVertex addVertex(Long id, VertexLabel vertexLabel);
/**
* Creates a new edge connecting the specified vertices.
* <p/>
* Creates and returns a new {@link TitanEdge} with given label connecting the vertices in the order
* specified.
*
* @param label label of the edge to be created
* @param outVertex outgoing vertex of the edge
* @param inVertex incoming vertex of the edge
* @return new edge
*/
public TitanEdge addEdge(TitanVertex outVertex, TitanVertex inVertex, EdgeLabel label);
/**
* Creates a new edge connecting the specified vertices.
* <p/>
* Creates and returns a new {@link TitanEdge} with given label connecting the vertices in the order
* specified.
* <br />
* Automatically creates the edge label if it does not exist and automatic creation of types is enabled. Otherwise,
* this method with throw an {@link IllegalArgumentException}.
*
* @param label label of the edge to be created
* @param outVertex outgoing vertex of the edge
* @param inVertex incoming vertex of the edge
* @return new edge
*/
public TitanEdge addEdge(TitanVertex outVertex, TitanVertex inVertex, String label);
/**
* Creates a new property for the given vertex and key with the specified value.
* <p/>
* Creates and returns a new {@link TitanProperty} with specified property key and the given object being the value.
*
* @param key key of the property to be created
* @param vertex vertex for which to create the property
* @param value value of the property to be created
* @return new property
* @throws IllegalArgumentException if the value does not match the data type of the given property key.
*/
public TitanProperty addProperty(TitanVertex vertex, PropertyKey key, Object value);
/**
* Creates a new property for the given vertex and key with the specified value.
* <p/>
* Creates and returns a new {@link TitanProperty} with specified property key and the given object being the value.
* <br />
* Automatically creates the property key if it does not exist and automatic creation of types is enabled. Otherwise,
* this method with throw an {@link IllegalArgumentException}.
*
* @param key key of the property to be created
* @param vertex vertex for which to create the property
* @param value value of the property to be created
* @return new property
* @throws IllegalArgumentException if the value does not match the data type of the given property key.
*/
public TitanProperty addProperty(TitanVertex vertex, String key, Object value);
/**
* Retrieves all vertices which have a property of the given key with the specified value.
* <p/>
* For this operation to be efficient, please ensure that the given property key is indexed.
* Some storage backends may not support this method without a pre-configured index.
*
* @param key key
* @param value value value
* @return All vertices which have a property of the given key with the specified value.
* @see com.thinkaurelius.titan.core.schema.TitanManagement#buildIndex(String, Class)
*/
public Iterable<TitanVertex> getVertices(PropertyKey key, Object value);
/**
* Retrieves all vertices which have a property of the given key with the specified value.
* <p/>
* For this operation to be efficient, please ensure that the given property key is indexed.
* Some storage backends may not support this method without a pre-configured index.
*
* @param key key
* @param value value value
* @return All edges which have a property of the given key with the specified value.
* @see com.thinkaurelius.titan.core.schema.TitanManagement#buildIndex(String, Class)
*/
public Iterable<TitanEdge> getEdges(PropertyKey key, Object value);
/* ---------------------------------------------------------------
* Closing and admin
* ---------------------------------------------------------------
*/
/**
* Commits and closes the transaction.
* <p/>
* Will attempt to persist all modifications which may result in exceptions in case of persistence failures or
* lock contention.
* <br />
* The call releases data structures if possible. All element references (e.g. vertex objects) retrieved
* through this transaction are stale after the transaction closes and should no longer be used.
*
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* if an error arises during persistence
*/
public void commit();
/**
* Aborts and closes the transaction. Will discard all modifications.
* <p/>
* The call releases data structures if possible. All element references (e.g. vertex objects) retrieved
* through this transaction are stale after the transaction closes and should no longer be used.
*
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* if an error arises when releasing the transaction handle
*/
public void rollback();
/**
* Checks whether the transaction is still open.
*
* @return true, when the transaction is open, else false
*/
public boolean isOpen();
/**
* Checks whether the transaction has been closed.
*
* @return true, if the transaction has been closed, else false
*/
public boolean isClosed();
/**
* Checks whether any changes to the graph database have been made in this transaction.
* <p/>
* A modification may be an edge or vertex update, addition, or deletion.
*
* @return true, if the transaction contains updates, else false.
*/
public boolean hasModifications();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanTransaction.java
|
1,819 |
@Component("blMapFieldPersistenceProvider")
@Scope("prototype")
public class MapFieldPersistenceProvider extends BasicFieldPersistenceProvider {
@Override
protected boolean canHandlePersistence(PopulateValueRequest populateValueRequest, Serializable instance) {
return populateValueRequest.getProperty().getName().contains(FieldManager.MAPFIELDSEPARATOR);
}
@Override
protected boolean canHandleExtraction(ExtractValueRequest extractValueRequest, Property property) {
return property.getName().contains(FieldManager.MAPFIELDSEPARATOR);
}
@Override
public FieldProviderResponse populateValue(PopulateValueRequest populateValueRequest, Serializable instance) {
try {
//handle some additional field settings (if applicable)
Class<?> valueType = null;
String valueClassName = populateValueRequest.getMetadata().getMapFieldValueClass();
if (valueClassName != null) {
valueType = Class.forName(valueClassName);
}
if (valueType == null) {
valueType = populateValueRequest.getReturnType();
}
if (valueType == null) {
throw new IllegalAccessException("Unable to determine the valueType for the rule field (" + populateValueRequest.getProperty().getName() + ")");
}
if (ValueAssignable.class.isAssignableFrom(valueType)) {
ValueAssignable assignableValue;
try {
assignableValue = (ValueAssignable) populateValueRequest.getFieldManager().getFieldValue(instance, populateValueRequest.getProperty().getName());
} catch (FieldNotAvailableException e) {
throw new IllegalArgumentException(e);
}
String key = populateValueRequest.getProperty().getName().substring(populateValueRequest.getProperty().getName().indexOf(FieldManager.MAPFIELDSEPARATOR) + FieldManager.MAPFIELDSEPARATOR.length(), populateValueRequest.getProperty().getName().length());
boolean persistValue = false;
if (assignableValue == null) {
assignableValue = (ValueAssignable) valueType.newInstance();
persistValue = true;
}
assignableValue.setName(key);
assignableValue.setValue(populateValueRequest.getProperty().getValue());
String fieldName = populateValueRequest.getProperty().getName().substring(0, populateValueRequest.getProperty().getName().indexOf(FieldManager.MAPFIELDSEPARATOR));
Field field = populateValueRequest.getFieldManager().getField(instance.getClass(), fieldName);
FieldInfo fieldInfo = buildFieldInfo(field);
String manyToField = null;
if (populateValueRequest.getMetadata().getManyToField() != null) {
manyToField = populateValueRequest.getMetadata().getManyToField();
}
if (manyToField == null) {
manyToField = fieldInfo.getManyToManyMappedBy();
}
if (manyToField == null) {
manyToField = fieldInfo.getOneToManyMappedBy();
}
if (manyToField != null) {
String propertyName = populateValueRequest.getProperty().getName();
Object middleInstance = instance;
if (propertyName.contains(".")) {
propertyName = propertyName.substring(0, propertyName.lastIndexOf("."));
middleInstance = populateValueRequest.getFieldManager().getFieldValue(instance, propertyName);
}
populateValueRequest.getFieldManager().setFieldValue(assignableValue, manyToField, middleInstance);
}
if (Searchable.class.isAssignableFrom(valueType)) {
((Searchable) assignableValue).setSearchable(populateValueRequest.getMetadata().getSearchable());
}
if (persistValue) {
populateValueRequest.getPersistenceManager().getDynamicEntityDao().persist(assignableValue);
populateValueRequest.getFieldManager().setFieldValue(instance, populateValueRequest.getProperty().getName(), assignableValue);
}
} else {
//handle the map value set itself
if (FieldProviderResponse.NOT_HANDLED==super.populateValue(populateValueRequest, instance)) {
return FieldProviderResponse.NOT_HANDLED;
}
}
} catch (Exception e) {
throw new PersistenceException(e);
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse extractValue(ExtractValueRequest extractValueRequest, Property property) throws PersistenceException {
if (extractValueRequest.getRequestedValue() != null && extractValueRequest.getRequestedValue() instanceof ValueAssignable) {
ValueAssignable assignableValue = (ValueAssignable) extractValueRequest.getRequestedValue();
String val = (String) assignableValue.getValue();
property.setValue(val);
property.setDisplayValue(extractValueRequest.getDisplayVal());
} else {
if (FieldProviderResponse.NOT_HANDLED==super.extractValue(extractValueRequest, property)) {
return FieldProviderResponse.NOT_HANDLED;
}
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse addSearchMapping(AddSearchMappingRequest addSearchMappingRequest, List<FilterMapping> filterMappings) {
return FieldProviderResponse.NOT_HANDLED;
}
@Override
public int getOrder() {
return FieldPersistenceProvider.MAP_FIELD;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_module_provider_MapFieldPersistenceProvider.java
|
71 |
{
@Override
public TransactionState create( Transaction tx )
{
return TransactionState.NO_STATE;
}
};
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_TransactionStateFactory.java
|
28 |
static final class ThenAccept<T> extends Completion {
final CompletableFuture<? extends T> src;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
final Executor executor;
ThenAccept(CompletableFuture<? extends T> src,
Action<? super T> fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAccept<T>(t, fn, dst));
else
fn.accept(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
1,528 |
public class EdgesMap {
public static final String PROCESS_VERTICES = Tokens.makeNamespace(EdgesMap.class) + ".processVertices";
public enum Counters {
VERTICES_PROCESSED,
OUT_EDGES_PROCESSED,
IN_EDGES_PROCESSED
}
public static Configuration createConfiguration(final boolean processVertices) {
final Configuration configuration = new EmptyConfiguration();
configuration.setBoolean(PROCESS_VERTICES, processVertices);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean processVertices;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.processVertices = context.getConfiguration().getBoolean(PROCESS_VERTICES, true);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.processVertices) {
value.clearPaths();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
long edgesProcessed = 0;
for (final Edge edge : value.getEdges(Direction.IN)) {
((StandardFaunusEdge) edge).startPath();
edgesProcessed++;
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_PROCESSED, edgesProcessed);
edgesProcessed = 0;
for (final Edge edge : value.getEdges(Direction.OUT)) {
((StandardFaunusEdge) edge).startPath();
edgesProcessed++;
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
context.write(NullWritable.get(), value);
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_EdgesMap.java
|
709 |
static class UpdateResult {
final UpdateHelper.Result result;
final ActionRequest actionRequest;
final boolean retry;
final Throwable error;
final WriteResult writeResult;
final UpdateResponse noopResult;
UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) {
this.result = result;
this.actionRequest = actionRequest;
this.retry = retry;
this.error = error;
this.writeResult = writeResult;
this.noopResult = null;
}
UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) {
this.result = result;
this.actionRequest = actionRequest;
this.writeResult = writeResult;
this.retry = false;
this.error = null;
this.noopResult = null;
}
public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) {
this.result = result;
this.noopResult = updateResponse;
this.actionRequest = null;
this.writeResult = null;
this.retry = false;
this.error = null;
}
boolean failure() {
return error != null;
}
boolean success() {
return noopResult != null || writeResult != null;
}
@SuppressWarnings("unchecked")
<T extends ActionRequest> T request() {
return (T) actionRequest;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_bulk_TransportShardBulkAction.java
|
1,266 |
public class OMultiFileSegment extends OSegment {
protected OStorageSegmentConfiguration config;
protected OFile[] files = new OFile[0];
private final String fileExtension;
private final String type;
private final long maxSize;
@SuppressWarnings("unused")
private final String defrag;
private int fileStartSize;
final private int fileMaxSize;
private final int fileIncrementSize;
private boolean wasSoftlyClosedAtPreviousTime = true;
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
public OMultiFileSegment(final OStorageLocalAbstract storage, final OStorageSegmentConfiguration config,
final String fileExtension, final int roundMaxSize) throws IOException {
super(storage, config.name);
readWriteLock.writeLock().lock();
try {
this.config = config;
this.fileExtension = fileExtension;
type = config.fileType;
defrag = config.defrag;
maxSize = OFileUtils.getSizeAsNumber(config.maxSize);
fileStartSize = (int) OFileUtils.getSizeAsNumber(config.fileStartSize);
final int tmpFileMaxSize = (int) OFileUtils.getSizeAsNumber(config.fileMaxSize);
fileIncrementSize = (int) OFileUtils.getSizeAsNumber(config.fileIncrementSize);
if (roundMaxSize > 0)
// ROUND THE FILE SIZE TO AVOID ERRORS ON ROUNDING BY DIVIDING FOR FIXED RECORD SIZE
fileMaxSize = (tmpFileMaxSize / roundMaxSize) * roundMaxSize;
else
fileMaxSize = tmpFileMaxSize;
// INSTANTIATE ALL THE FILES
int perFileMaxSize;
if (config.infoFiles.length == 0) {
// EMPTY FILE: CREATE THE FIRST FILE BY DEFAULT
files = new OFile[1];
files[0] = OFileFactory.instance().create(type,
storage.getVariableParser().resolveVariables(this.config.getLocation() + "/" + name + "." + 0 + this.fileExtension),
storage.getMode());
perFileMaxSize = fileMaxSize;
files[0].setMaxSize(perFileMaxSize);
files[0].setIncrementSize(fileIncrementSize);
} else {
files = new OFile[config.infoFiles.length];
for (int i = 0; i < files.length; ++i) {
files[i] = OFileFactory.instance().create(type, storage.getVariableParser().resolveVariables(config.infoFiles[i].path),
storage.getMode());
perFileMaxSize = fileMaxSize;
files[i].setMaxSize(perFileMaxSize);
files[i].setIncrementSize(fileIncrementSize);
}
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void open() throws IOException {
readWriteLock.writeLock().lock();
try {
// @TODO: LAZY OPEN FILES
for (OFile file : files)
if (!file.open()) {
// LAST TIME THE FILE WAS NOT CLOSED IN SOFT WAY
OLogManager.instance().warn(this, "segment file '%s' was not closed correctly last time",
OFileUtils.getPath(file.getName()));
// TODO VERIFY DATA?
wasSoftlyClosedAtPreviousTime = false;
}
} finally {
readWriteLock.writeLock().unlock();
}
}
/**
* Create the first file for current segment
*
* @param iStartSize
* @throws IOException
*/
public void create(final int iStartSize) throws IOException {
readWriteLock.writeLock().lock();
try {
files = new OFile[1];
fileStartSize = iStartSize;
createNewFile();
} finally {
readWriteLock.writeLock().unlock();
}
}
public void close() throws IOException {
readWriteLock.writeLock().lock();
try {
for (OFile file : files) {
if (file != null)
file.close();
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void delete() throws IOException {
readWriteLock.writeLock().lock();
try {
for (OFile file : files) {
if (file != null)
file.delete();
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public boolean exists() {
readWriteLock.readLock().lock();
try {
return files[0].exists();
} finally {
readWriteLock.readLock().unlock();
}
}
public void truncate() throws IOException {
readWriteLock.writeLock().lock();
try {
// SHRINK TO 0
files[0].shrink(0);
if (files.length > 1) {
// LEAVE JUST ONE FILE
for (int i = 1; i < files.length; ++i) {
if (files[i] != null)
files[i].delete();
}
// UPDATE FILE STRUCTURE
final OFile f = files[0];
files = new OFile[1];
files[0] = f;
// UPDATE CONFIGURATION
final OStorageFileConfiguration fileConfig = config.infoFiles[0];
config.infoFiles = new OStorageFileConfiguration[1];
config.infoFiles[0] = fileConfig;
config.root.update();
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void synch() throws IOException {
readWriteLock.readLock().lock();
try {
for (OFile file : files) {
if (file != null && file.isOpen())
file.synch();
}
} finally {
readWriteLock.readLock().unlock();
}
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
readWriteLock.writeLock().lock();
try {
for (OFile file : files)
if (file != null && file.isOpen())
file.setSoftlyClosed(softlyClosed);
} finally {
readWriteLock.writeLock().unlock();
}
}
public OStorageSegmentConfiguration getConfig() {
readWriteLock.readLock().lock();
try {
return config;
} finally {
readWriteLock.readLock().unlock();
}
}
public long getFilledUpTo() {
readWriteLock.readLock().lock();
try {
long filled = 0;
for (OFile file : files)
filled += file.getFilledUpTo();
return filled;
} finally {
readWriteLock.readLock().unlock();
}
}
public long getSize() {
readWriteLock.readLock().lock();
try {
long size = 0;
for (OFile file : files)
size += file.getFileSize();
return size;
} finally {
readWriteLock.readLock().unlock();
}
}
/**
* Find free space for iRecordSize bytes.
*
* @param iRecordSize
* @return a pair file-id/file-pos
* @throws IOException
*/
public long[] allocateSpace(final int iRecordSize) throws IOException {
readWriteLock.writeLock().lock();
try {
// IT'S PREFEREABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
for (int i = 0; i < files.length; ++i) {
file = files[i];
if (file.getFreeSpace() >= iRecordSize)
// FOUND: RETURN THIS OFFSET
return new long[] { i, file.allocateSpace(iRecordSize) };
}
// NOT FOUND: CHECK IF CAN OVERSIZE SOME FILES
for (int i = 0; i < files.length; ++i) {
file = files[i];
if (file.canOversize(iRecordSize)) {
// FOUND SPACE: ENLARGE IT
return new long[] { i, file.allocateSpace(iRecordSize) };
}
}
// TRY TO CREATE A NEW FILE
if (maxSize > 0 && getSize() >= maxSize)
// OUT OF MAX SIZE
throw new OStorageException("Unable to allocate the requested space of " + iRecordSize
+ " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo());
// COPY THE OLD ARRAY TO THE NEW ONE
OFile[] newFiles = new OFile[files.length + 1];
System.arraycopy(files, 0, newFiles, 0, files.length);
files = newFiles;
// CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY
file = createNewFile();
file.allocateSpace(iRecordSize);
config.root.update();
return new long[] { files.length - 1, 0 };
} finally {
readWriteLock.writeLock().unlock();
}
}
/**
* Return the absolute position receiving the pair file-id/file-pos.
*
* @param iFilePosition
* as pair file-id/file-pos
* @return
*/
public long getAbsolutePosition(final long[] iFilePosition) {
readWriteLock.readLock().lock();
try {
long position = 0;
for (int i = 0; i < iFilePosition[0]; ++i) {
position += fileMaxSize;
}
return position + iFilePosition[1];
} finally {
readWriteLock.readLock().unlock();
}
}
public long[] getRelativePosition(final long iPosition) {
readWriteLock.readLock().lock();
try {
if (iPosition < fileMaxSize)
return new long[] { 0l, iPosition };
final int fileNum = (int) (iPosition / fileMaxSize);
if (fileNum >= files.length && fileNum < 0)
throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum
+ " that is out of limit (files range 0-" + (files.length - 1) + ")");
final int fileRec = (int) (iPosition % fileMaxSize);
if (fileNum >= files.length)
throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum
+ " but configured files are only " + files.length);
if (fileRec >= files[fileNum].getFilledUpTo() && fileRec < 0)
throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum + " but the position #"
+ fileRec + " is out of file size " + files[fileNum].getFilledUpTo());
return new long[] { fileNum, fileRec };
} finally {
readWriteLock.readLock().unlock();
}
}
private OFile createNewFile() throws IOException {
final int num = files.length - 1;
final OFile file = OFileFactory.instance().create(type, config.getLocation() + "/" + name + "." + num + fileExtension,
storage.getMode());
file.setMaxSize(fileMaxSize);
file.create(fileStartSize);
files[num] = file;
addInfoFileConfigEntry(file);
return file;
}
private void addInfoFileConfigEntry(final OFile file) throws IOException {
OStorageFileConfiguration[] newConfigFiles = new OStorageFileConfiguration[config.infoFiles.length + 1];
for (int i = 0; i < config.infoFiles.length; ++i)
newConfigFiles[i] = config.infoFiles[i];
config.infoFiles = newConfigFiles;
// CREATE A NEW ENTRY FOR THE NEW FILE
String fileNameToStore = storage.getVariableParser().convertPathToRelative(OFileUtils.getPath(file.getPath()));
final OStorageSegmentConfiguration template = config.root.fileTemplate;
config.infoFiles[config.infoFiles.length - 1] = new OStorageFileConfiguration(config, fileNameToStore, template.fileType,
template.fileMaxSize, template.fileIncrementSize);
}
public long allocateSpaceContinuously(final int iSize) throws IOException {
readWriteLock.writeLock().lock();
try {
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = iSize;
// IF SOME FILES ALREADY CREATED
long offset = -1;
int fileNumber = -1;
if (files.length > 0) {
// CHECK IF THERE IS FREE SPACE IN LAST FILE IN CHAIN
file = files[files.length - 1];
if (file.getFreeSpace() > 0) {
fileNumber = files.length - 1;
if (remainingSize > file.getFreeSpace()) {
remainingSize -= file.getFreeSpace();
offset = file.allocateSpace(file.getFreeSpace());
} else {
return (long) (files.length - 1) * fileMaxSize + file.allocateSpace(remainingSize);
}
}
// NOT FOUND FREE SPACE: CHECK IF CAN OVERSIZE LAST FILE
final long oversize = fileMaxSize - file.getFileSize();
if (oversize > 0 && remainingSize > 0) {
fileNumber = files.length - 1;
if (remainingSize > oversize) {
remainingSize -= oversize;
long newOffset = file.allocateSpace(oversize);
// SAVE OFFSET IF IT WASN'T SAVED EARLIER
if (offset == -1)
offset = newOffset;
} else {
long newOffset = file.allocateSpace(remainingSize);
if (offset == -1)
offset = newOffset;
if (fileNumber == -1) {
fileNumber = files.length - 1;
}
return (long) fileNumber * fileMaxSize + offset;
}
}
}
// CREATE NEW FILE BECAUSE THERE IS NO FILES OR WE CANNOT ENLARGE EXISTING ENOUGH
if (remainingSize > 0) {
if (maxSize > 0 && getSize() >= maxSize)
// OUT OF MAX SIZE
throw new OStorageException("Unable to allocate the requested space of " + iSize
+ " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo());
// COPY THE OLD ARRAY TO THE NEW ONE
OFile[] newFiles = new OFile[files.length + 1];
for (int i = 0; i < files.length; ++i)
newFiles[i] = files[i];
files = newFiles;
// CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY
file = createNewFile();
file.allocateSpace(iSize);
config.root.update();
if (fileNumber == -1) {
fileNumber = files.length - 1;
}
if (offset == -1)
offset = 0;
}
return (long) fileNumber * fileMaxSize + offset;
} finally {
readWriteLock.writeLock().unlock();
}
}
public void writeContinuously(long iPosition, byte[] iData) throws IOException {
readWriteLock.writeLock().lock();
try {
long[] pos = getRelativePosition(iPosition);
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = iData.length;
long offset = pos[1];
for (int i = (int) pos[0]; remainingSize > 0; ++i) {
file = files[i];
if (remainingSize > file.getFilledUpTo() - offset) {
if (file.getFilledUpTo() < offset) {
throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset);
}
file.write(offset, iData, (int) (file.getFilledUpTo() - offset), iData.length - remainingSize);
remainingSize -= (file.getFilledUpTo() - offset);
} else {
file.write(offset, iData, remainingSize, iData.length - remainingSize);
remainingSize = 0;
}
offset = 0;
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void writeContinuously(long iPosition, byte[] iData, int arrayOffset, int length) throws IOException {
readWriteLock.writeLock().lock();
try {
long[] pos = getRelativePosition(iPosition);
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = length;
long offset = pos[1];
for (int i = (int) pos[0]; remainingSize > 0; ++i) {
file = files[i];
if (remainingSize > file.getFilledUpTo() - offset) {
if (file.getFilledUpTo() < offset) {
throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset);
}
file.write(offset, iData, (int) (file.getFilledUpTo() - offset), arrayOffset + iData.length - remainingSize);
remainingSize -= (file.getFilledUpTo() - offset);
} else {
file.write(offset, iData, remainingSize, arrayOffset + iData.length - remainingSize);
remainingSize = 0;
}
offset = 0;
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void readContinuously(final long iPosition, byte[] iBuffer, final int iSize) throws IOException {
readWriteLock.readLock().lock();
try {
long[] pos = getRelativePosition(iPosition);
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = iSize;
long offset = pos[1];
assert offset < Integer.MAX_VALUE;
assert offset > -1;
for (int i = (int) pos[0]; remainingSize > 0; ++i) {
file = files[i];
if (remainingSize > file.getFilledUpTo() - offset) {
if (file.getFilledUpTo() < offset) {
throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset);
}
int toRead = (int) (file.getFilledUpTo() - offset);
file.read(offset, iBuffer, toRead, iSize - remainingSize);
remainingSize -= toRead;
} else {
file.read(offset, iBuffer, remainingSize, iSize - remainingSize);
remainingSize = 0;
}
offset = 0;
}
} finally {
readWriteLock.readLock().unlock();
}
}
public void rename(String iOldName, String iNewName) {
readWriteLock.writeLock().lock();
try {
for (OFile file : files) {
final String osFileName = file.getName();
if (osFileName.startsWith(name)) {
final File newFile = new File(storage.getStoragePath() + "/" + iNewName
+ osFileName.substring(osFileName.lastIndexOf(name) + name.length()));
for (OStorageFileConfiguration conf : config.infoFiles) {
if (conf.parent.name.equals(name))
conf.parent.name = iNewName;
if (conf.path.endsWith(osFileName))
conf.path = new String(conf.path.replace(osFileName, newFile.getName()));
}
boolean renamed = file.renameTo(newFile);
while (!renamed) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
renamed = file.renameTo(newFile);
}
}
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public boolean wasSoftlyClosedAtPreviousTime() {
readWriteLock.readLock().lock();
try {
return wasSoftlyClosedAtPreviousTime;
} finally {
readWriteLock.readLock().unlock();
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OMultiFileSegment.java
|
2,794 |
public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ASCIIFoldingTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new ASCIIFoldingFilter(tokenStream);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_analysis_ASCIIFoldingTokenFilterFactory.java
|
173 |
public class OBinary implements Comparable<OBinary> {
private byte[] value;
public OBinary(final byte[] buffer) {
value = buffer;
}
public int compareTo(final OBinary o) {
final int size = value.length;
for (int i = 0; i < size; ++i) {
if (value[i] > o.value[i])
return 1;
else if (value[i] < o.value[i])
return -1;
}
return 0;
}
public byte[] toByteArray() {
return value;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_types_OBinary.java
|
3,745 |
public class SessionAttributePredicate implements Predicate, IdentifiedDataSerializable {
private String sessionId;
// Serialization Constructor
public SessionAttributePredicate() {
}
public SessionAttributePredicate(String sessionId) {
this.sessionId = sessionId;
}
@Override
public boolean apply(Entry mapEntry) {
Object key = mapEntry.getKey();
if (key instanceof String) {
String k = (String) key;
return k.startsWith(sessionId + WebFilter.HAZELCAST_SESSION_ATTRIBUTE_SEPARATOR);
}
return false;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(sessionId);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
sessionId = in.readUTF();
}
@Override
public int getFactoryId() {
return WebDataSerializerHook.F_ID;
}
@Override
public int getId() {
return WebDataSerializerHook.SESSION_ATTRIBUTE_ID;
}
}
| 1no label
|
hazelcast-wm_src_main_java_com_hazelcast_web_SessionAttributePredicate.java
|
11 |
@edu.umd.cs.findbugs.annotations.SuppressWarnings("MS_OOI_PKGPROTECT")
public interface TextCommandConstants {
int MONTH_SECONDS = 60 * 60 * 24 * 30;
byte[] SPACE = stringToBytes(" ");
byte[] RETURN = stringToBytes("\r\n");
byte[] FLAG_ZERO = stringToBytes(" 0 ");
byte[] VALUE_SPACE = stringToBytes("VALUE ");
byte[] DELETED = stringToBytes("DELETED\r\n");
byte[] STORED = stringToBytes("STORED\r\n");
byte[] TOUCHED = stringToBytes("TOUCHED\r\n");
byte[] NOT_STORED = stringToBytes("NOT_STORED\r\n");
byte[] NOT_FOUND = stringToBytes("NOT_FOUND\r\n");
byte[] RETURN_END = stringToBytes("\r\nEND\r\n");
byte[] END = stringToBytes("END\r\n");
byte[] ERROR = stringToBytes("ERROR");
byte[] CLIENT_ERROR = stringToBytes("CLIENT_ERROR ");
byte[] SERVER_ERROR = stringToBytes("SERVER_ERROR ");
enum TextCommandType {
GET((byte) 0),
PARTIAL_GET((byte) 1),
GETS((byte) 2),
SET((byte) 3),
APPEND((byte) 4),
PREPEND((byte) 5),
ADD((byte) 6),
REPLACE((byte) 7),
DELETE((byte) 8),
QUIT((byte) 9),
STATS((byte) 10),
GET_END((byte) 11),
ERROR_CLIENT((byte) 12),
ERROR_SERVER((byte) 13),
UNKNOWN((byte) 14),
VERSION((byte) 15),
TOUCH((byte) 16),
INCREMENT((byte) 17),
DECREMENT((byte) 18),
HTTP_GET((byte) 30),
HTTP_POST((byte) 31),
HTTP_PUT((byte) 32),
HTTP_DELETE((byte) 33),
NO_OP((byte) 98),
STOP((byte) 99);
final byte value;
TextCommandType(byte type) {
value = type;
}
public byte getValue() {
return value;
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_TextCommandConstants.java
|
3,689 |
public class TTLFieldMapper extends LongFieldMapper implements InternalMapper, RootMapper {
public static final String NAME = "_ttl";
public static final String CONTENT_TYPE = "_ttl";
public static class Defaults extends LongFieldMapper.Defaults {
public static final String NAME = TTLFieldMapper.CONTENT_TYPE;
public static final FieldType TTL_FIELD_TYPE = new FieldType(LongFieldMapper.Defaults.FIELD_TYPE);
static {
TTL_FIELD_TYPE.setStored(true);
TTL_FIELD_TYPE.setIndexed(true);
TTL_FIELD_TYPE.setTokenized(false);
TTL_FIELD_TYPE.freeze();
}
public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
public static final long DEFAULT = -1;
}
public static class Builder extends NumberFieldMapper.Builder<Builder, TTLFieldMapper> {
private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
private long defaultTTL = Defaults.DEFAULT;
public Builder() {
super(Defaults.NAME, new FieldType(Defaults.TTL_FIELD_TYPE));
}
public Builder enabled(EnabledAttributeMapper enabled) {
this.enabledState = enabled;
return builder;
}
public Builder defaultTTL(long defaultTTL) {
this.defaultTTL = defaultTTL;
return builder;
}
@Override
public TTLFieldMapper build(BuilderContext context) {
return new TTLFieldMapper(fieldType, enabledState, defaultTTL, ignoreMalformed(context),coerce(context), postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
TTLFieldMapper.Builder builder = ttl();
parseField(builder, builder.name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("enabled")) {
EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
builder.enabled(enabledState);
} else if (fieldName.equals("default")) {
TimeValue ttlTimeValue = nodeTimeValue(fieldNode, null);
if (ttlTimeValue != null) {
builder.defaultTTL(ttlTimeValue.millis());
}
}
}
return builder;
}
}
private EnabledAttributeMapper enabledState;
private long defaultTTL;
public TTLFieldMapper() {
this(new FieldType(Defaults.TTL_FIELD_TYPE), Defaults.ENABLED_STATE, Defaults.DEFAULT, Defaults.IGNORE_MALFORMED, Defaults.COERCE, null, null, null, ImmutableSettings.EMPTY);
}
protected TTLFieldMapper(FieldType fieldType, EnabledAttributeMapper enabled, long defaultTTL, Explicit<Boolean> ignoreMalformed,
Explicit<Boolean> coerce, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,
@Nullable Settings fieldDataSettings, Settings indexSettings) {
super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), Defaults.PRECISION_STEP,
Defaults.BOOST, fieldType, null, Defaults.NULL_VALUE, ignoreMalformed, coerce,
postingsProvider, docValuesProvider, null, null, fieldDataSettings, indexSettings, MultiFields.empty(), null);
this.enabledState = enabled;
this.defaultTTL = defaultTTL;
}
public boolean enabled() {
return this.enabledState.enabled;
}
public long defaultTTL() {
return this.defaultTTL;
}
@Override
public boolean hasDocValues() {
return false;
}
// Overrides valueForSearch to display live value of remaining ttl
@Override
public Object valueForSearch(Object value) {
long now;
SearchContext searchContext = SearchContext.current();
if (searchContext != null) {
now = searchContext.nowInMillis();
} else {
now = System.currentTimeMillis();
}
long val = value(value);
return val - now;
}
// Other implementation for realtime get display
public Object valueForSearch(long expirationTime) {
return expirationTime - System.currentTimeMillis();
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public void preParse(ParseContext context) throws IOException {
}
@Override
public void postParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
public void parse(ParseContext context) throws IOException, MapperParsingException {
if (context.sourceToParse().ttl() < 0) { // no ttl has been provided externally
long ttl;
if (context.parser().currentToken() == XContentParser.Token.VALUE_STRING) {
ttl = TimeValue.parseTimeValue(context.parser().text(), null).millis();
} else {
ttl = context.parser().longValue(coerce.value());
}
if (ttl <= 0) {
throw new MapperParsingException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
}
context.sourceToParse().ttl(ttl);
}
}
@Override
public boolean includeInObject() {
return true;
}
@Override
protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException, AlreadyExpiredException {
if (enabledState.enabled && !context.sourceToParse().flyweight()) {
long ttl = context.sourceToParse().ttl();
if (ttl <= 0 && defaultTTL > 0) { // no ttl provided so we use the default value
ttl = defaultTTL;
context.sourceToParse().ttl(ttl);
}
if (ttl > 0) { // a ttl has been provided either externally or in the _source
long timestamp = context.sourceToParse().timestamp();
long expire = new Date(timestamp + ttl).getTime();
long now = System.currentTimeMillis();
// there is not point indexing already expired doc
if (context.sourceToParse().origin() == SourceToParse.Origin.PRIMARY && now >= expire) {
throw new AlreadyExpiredException(context.index(), context.type(), context.id(), timestamp, ttl, now);
}
// the expiration timestamp (timestamp + ttl) is set as field
fields.add(new CustomLongNumericField(this, expire, fieldType));
}
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// if all are defaults, no sense to write it at all
if (!includeDefaults && enabledState == Defaults.ENABLED_STATE && defaultTTL == Defaults.DEFAULT) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (includeDefaults || enabledState != Defaults.ENABLED_STATE) {
builder.field("enabled", enabledState.enabled);
}
if (includeDefaults || defaultTTL != Defaults.DEFAULT && enabledState.enabled) {
builder.field("default", defaultTTL);
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
if (!mergeContext.mergeFlags().simulate()) {
if (ttlMergeWith.defaultTTL != -1) {
this.defaultTTL = ttlMergeWith.defaultTTL;
}
if (ttlMergeWith.enabledState != enabledState && !ttlMergeWith.enabledState.unset()) {
this.enabledState = ttlMergeWith.enabledState;
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_TTLFieldMapper.java
|
687 |
public class BulkAction extends Action<BulkRequest, BulkResponse, BulkRequestBuilder> {
public static final BulkAction INSTANCE = new BulkAction();
public static final String NAME = "bulk";
private BulkAction() {
super(NAME);
}
@Override
public BulkResponse newResponse() {
return new BulkResponse();
}
@Override
public BulkRequestBuilder newRequestBuilder(Client client) {
return new BulkRequestBuilder(client);
}
@Override
public TransportRequestOptions transportOptions(Settings settings) {
return TransportRequestOptions.options()
.withType(TransportRequestOptions.Type.BULK)
.withCompress(settings.getAsBoolean("action.bulk.compress", true)
);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_bulk_BulkAction.java
|
3,867 |
public class IdsQueryParser implements QueryParser {
public static final String NAME = "ids";
@Inject
public IdsQueryParser() {
}
@Override
public String[] names() {
return new String[]{NAME};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
List<BytesRef> ids = new ArrayList<BytesRef>();
Collection<String> types = null;
String currentFieldName = null;
float boost = 1.0f;
String queryName = null;
XContentParser.Token token;
boolean idsProvided = false;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("values".equals(currentFieldName)) {
idsProvided = true;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
BytesRef value = parser.bytesOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
}
ids.add(value);
}
} else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) {
types = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String value = parser.textOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No type specified for term filter");
}
types.add(value);
}
} else {
throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
types = ImmutableList.of(parser.text());
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
}
}
}
if (!idsProvided) {
throw new QueryParsingException(parseContext.index(), "[ids] query, no ids values provided");
}
if (ids.isEmpty()) {
return Queries.newMatchNoDocsQuery();
}
if (types == null || types.isEmpty()) {
types = parseContext.queryTypes();
} else if (types.size() == 1 && Iterables.getFirst(types, null).equals("_all")) {
types = parseContext.mapperService().types();
}
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
ConstantScoreQuery query = new ConstantScoreQuery(filter);
query.setBoost(boost);
if (queryName != null) {
parseContext.addNamedQuery(queryName, query);
}
return query;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_IdsQueryParser.java
|
495 |
return scheduledExecutor.schedule(new Runnable() {
public void run() {
executeInternal(command);
}
}, delay, unit);
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientExecutionServiceImpl.java
|
53 |
public abstract class OAbstractLock implements OLock {
@Override
public <V> V callInLock(final Callable<V> iCallback) throws Exception {
lock();
try {
return iCallback.call();
} finally {
unlock();
}
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_lock_OAbstractLock.java
|
292 |
new Thread(new Runnable() {
public void run() {
try {
latch.await(30, TimeUnit.SECONDS);
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
lock.destroy();
}
}).start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientConditionTest.java
|
2 |
public final class OAlwaysLessKey implements Comparable<Comparable<?>> {
public int compareTo(Comparable<?> o) {
return -1;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OAlwaysLessKey.java
|
314 |
new Thread() {
public void run() {
map.tryPut(key, "NEW_VALUE", 1, TimeUnit.SECONDS);
tryPutReturned.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
436 |
public class ClusterStatsNodeResponse extends NodeOperationResponse {
private NodeInfo nodeInfo;
private NodeStats nodeStats;
private ShardStats[] shardsStats;
private ClusterHealthStatus clusterStatus;
ClusterStatsNodeResponse() {
}
public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) {
super(node);
this.nodeInfo = nodeInfo;
this.nodeStats = nodeStats;
this.shardsStats = shardsStats;
this.clusterStatus = clusterStatus;
}
public NodeInfo nodeInfo() {
return this.nodeInfo;
}
public NodeStats nodeStats() {
return this.nodeStats;
}
/**
* Cluster Health Status, only populated on master nodes.
*/
@Nullable
public ClusterHealthStatus clusterStatus() {
return clusterStatus;
}
public ShardStats[] shardsStats() {
return this.shardsStats;
}
public static ClusterStatsNodeResponse readNodeResponse(StreamInput in) throws IOException {
ClusterStatsNodeResponse nodeResponse = new ClusterStatsNodeResponse();
nodeResponse.readFrom(in);
return nodeResponse;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
clusterStatus = null;
if (in.readBoolean()) {
clusterStatus = ClusterHealthStatus.fromValue(in.readByte());
}
this.nodeInfo = NodeInfo.readNodeInfo(in);
this.nodeStats = NodeStats.readNodeStats(in);
int size = in.readVInt();
shardsStats = new ShardStats[size];
for (size--; size >= 0; size--) {
shardsStats[size] = ShardStats.readShardStats(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (clusterStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(clusterStatus.value());
}
nodeInfo.writeTo(out);
nodeStats.writeTo(out);
out.writeVInt(shardsStats.length);
for (ShardStats ss : shardsStats) {
ss.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodeResponse.java
|
143 |
public class GetDistributedObjectsRequest extends ClientRequest {
@Override
void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
Collection<DistributedObject> distributedObjects = clientEngine.getProxyService().getAllDistributedObjects();
SerializationService serializationService = clientEngine.getSerializationService();
List<Data> dataArrayList = new ArrayList<Data>(distributedObjects.size());
for (DistributedObject distributedObject : distributedObjects) {
DistributedObjectInfo distributedObjectInfo = new DistributedObjectInfo(
distributedObject.getServiceName(), distributedObject.getName());
Data data = serializationService.toData(distributedObjectInfo);
dataArrayList.add(data);
}
SerializableCollection collection = new SerializableCollection(dataArrayList);
endpoint.sendResponse(collection, getCallId());
}
@Override
public String getServiceName() {
return ClientEngineImpl.SERVICE_NAME;
}
public int getFactoryId() {
return ClientPortableHook.ID;
}
public int getClassId() {
return ClientPortableHook.GET_DISTRIBUTED_OBJECT_INFO;
}
@Override
public Permission getRequiredPermission() {
return null;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_GetDistributedObjectsRequest.java
|
1,603 |
public class Imports {
private static final List<String> imports = new ArrayList<String>();
private static final List<String> evaluates = new ArrayList<String>();
public static final String HDFS = "hdfs";
public static final String LOCAL = "local";
static {
// hadoop
imports.add("org.apache.hadoop.hdfs.*");
imports.add("org.apache.hadoop.conf.*");
imports.add("org.apache.hadoop.fs.*");
imports.add("org.apache.hadoop.util.*");
imports.add("org.apache.hadoop.io.*");
imports.add("org.apache.hadoop.io.compress.*");
imports.add("org.apache.hadoop.mapreduce.lib.input.*");
imports.add("org.apache.hadoop.mapreduce.lib.output.*");
// faunus
imports.add("com.thinkaurelius.titan.hadoop.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.edgelist.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.edgelist.rdf.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.graphson.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.noop.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.rexster.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.script.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.util.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.hbase.*");
imports.add("com.thinkaurelius.titan.hadoop.formats.cassandra.*");
imports.add("com.thinkaurelius.titan.hadoop.hdfs.*");
imports.add("com.thinkaurelius.titan.hadoop.tinkerpop.gremlin.*");
imports.add("com.tinkerpop.gremlin.Tokens.T");
imports.add("com.tinkerpop.gremlin.groovy.*");
imports.add("static " + TransformPipe.Order.class.getName() + ".*");
// titan
imports.addAll(com.thinkaurelius.titan.tinkerpop.gremlin.Imports.getImports());
// tinkerpop (most likely inherited from Titan, but just to be safe)
imports.addAll(com.tinkerpop.gremlin.Imports.getImports());
evaluates.add("hdfs = FileSystem.get(new Configuration())");
evaluates.add("local = FileSystem.getLocal(new Configuration())");
}
public static List<String> getImports() {
return Imports.imports;
}
public static List<String> getEvaluates() {
return Imports.evaluates;
}
public static Bindings getEvaluateBindings() throws IOException {
Bindings bindings = new SimpleBindings();
bindings.put(Imports.HDFS, FileSystem.get(new Configuration()));
bindings.put(Imports.LOCAL, FileSystem.getLocal(new Configuration()));
return bindings;
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_tinkerpop_gremlin_Imports.java
|
164 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166y_CountedCompleter.java
|
5,291 |
public class DoubleTerms extends InternalTerms {
public static final Type TYPE = new Type("terms", "dterms");
public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public DoubleTerms readResult(StreamInput in) throws IOException {
DoubleTerms buckets = new DoubleTerms();
buckets.readFrom(in);
return buckets;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
static class Bucket extends InternalTerms.Bucket {
double term;
public Bucket(double term, long docCount, InternalAggregations aggregations) {
super(docCount, aggregations);
this.term = term;
}
@Override
public String getKey() {
return String.valueOf(term);
}
@Override
public Text getKeyAsText() {
return new StringText(String.valueOf(term));
}
@Override
public Number getKeyAsNumber() {
return term;
}
@Override
int compareTerm(Terms.Bucket other) {
return Double.compare(term, other.getKeyAsNumber().doubleValue());
}
}
private ValueFormatter valueFormatter;
DoubleTerms() {} // for serialization
public DoubleTerms(String name, InternalOrder order, int requiredSize, long minDocCount, Collection<InternalTerms.Bucket> buckets) {
this(name, order, null, requiredSize, minDocCount, buckets);
}
public DoubleTerms(String name, InternalOrder order, ValueFormatter valueFormatter, int requiredSize, long minDocCount, Collection<InternalTerms.Bucket> buckets) {
super(name, order, requiredSize, minDocCount, buckets);
this.valueFormatter = valueFormatter;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalTerms reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
InternalTerms terms = (InternalTerms) aggregations.get(0);
terms.trimExcessEntries(reduceContext.cacheRecycler());
return terms;
}
InternalTerms reduced = null;
Recycler.V<DoubleObjectOpenHashMap<List<Bucket>>> buckets = null;
for (InternalAggregation aggregation : aggregations) {
InternalTerms terms = (InternalTerms) aggregation;
if (terms instanceof UnmappedTerms) {
continue;
}
if (reduced == null) {
reduced = terms;
}
if (buckets == null) {
buckets = reduceContext.cacheRecycler().doubleObjectMap(terms.buckets.size());
}
for (Terms.Bucket bucket : terms.buckets) {
List<Bucket> existingBuckets = buckets.v().get(((Bucket) bucket).term);
if (existingBuckets == null) {
existingBuckets = new ArrayList<Bucket>(aggregations.size());
buckets.v().put(((Bucket) bucket).term, existingBuckets);
}
existingBuckets.add((Bucket) bucket);
}
}
if (reduced == null) {
// there are only unmapped terms, so we just return the first one (no need to reduce)
return (UnmappedTerms) aggregations.get(0);
}
// TODO: would it be better to sort the backing array buffer of hppc map directly instead of using a PQ?
final int size = Math.min(requiredSize, buckets.v().size());
BucketPriorityQueue ordered = new BucketPriorityQueue(size, order.comparator(null));
boolean[] states = buckets.v().allocated;
Object[] internalBuckets = buckets.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
List<DoubleTerms.Bucket> sameTermBuckets = (List<DoubleTerms.Bucket>) internalBuckets[i];
final InternalTerms.Bucket b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext.cacheRecycler());
if (b.getDocCount() >= minDocCount) {
ordered.insertWithOverflow(b);
}
}
}
buckets.release();
InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];
for (int i = ordered.size() - 1; i >= 0; i--) {
list[i] = (Bucket) ordered.pop();
}
reduced.buckets = Arrays.asList(list);
return reduced;
}
@Override
public void readFrom(StreamInput in) throws IOException {
this.name = in.readString();
this.order = InternalOrder.Streams.readOrder(in);
this.valueFormatter = ValueFormatterStreams.readOptional(in);
this.requiredSize = readSize(in);
this.minDocCount = in.readVLong();
int size = in.readVInt();
List<InternalTerms.Bucket> buckets = new ArrayList<InternalTerms.Bucket>(size);
for (int i = 0; i < size; i++) {
buckets.add(new Bucket(in.readDouble(), in.readVLong(), InternalAggregations.readAggregations(in)));
}
this.buckets = buckets;
this.bucketMap = null;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
InternalOrder.Streams.writeOrder(order, out);
ValueFormatterStreams.writeOptional(valueFormatter, out);
writeSize(requiredSize, out);
out.writeVLong(minDocCount);
out.writeVInt(buckets.size());
for (InternalTerms.Bucket bucket : buckets) {
out.writeDouble(((Bucket) bucket).term);
out.writeVLong(bucket.getDocCount());
((InternalAggregations) bucket.getAggregations()).writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.startArray(CommonFields.BUCKETS);
for (InternalTerms.Bucket bucket : buckets) {
builder.startObject();
builder.field(CommonFields.KEY, ((Bucket) bucket).term);
if (valueFormatter != null) {
builder.field(CommonFields.KEY_AS_STRING, valueFormatter.format(((Bucket) bucket).term));
}
builder.field(CommonFields.DOC_COUNT, bucket.getDocCount());
((InternalAggregations) bucket.getAggregations()).toXContentInternal(builder, params);
builder.endObject();
}
builder.endArray();
builder.endObject();
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_DoubleTerms.java
|
79 |
public abstract class OSharedResourceTimeout {
private final ReadWriteLock lock = new ReentrantReadWriteLock();
protected int timeout;
public OSharedResourceTimeout(final int timeout) {
this.timeout = timeout;
}
protected void acquireSharedLock() throws OTimeoutException {
try {
if (timeout == 0) {
lock.readLock().lock();
return;
} else if (lock.readLock().tryLock(timeout, TimeUnit.MILLISECONDS))
// OK
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw new OTimeoutException("Timeout on acquiring shared lock against resource: " + this);
}
protected void releaseSharedLock() {
lock.readLock().unlock();
}
protected void acquireExclusiveLock() throws OTimeoutException {
try {
if (timeout == 0) {
lock.writeLock().lock();
return;
} else if (lock.writeLock().tryLock(timeout, TimeUnit.MILLISECONDS))
// OK
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw new OTimeoutException("Timeout on acquiring exclusive lock against resource: " + this);
}
protected void releaseExclusiveLock() {
lock.writeLock().unlock();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedResourceTimeout.java
|
1,322 |
return new PortableFactory() {
@Override
public Portable create(int classId) {
switch (classId) {
case IS_SHUTDOWN_REQUEST:
return new IsShutdownRequest();
case CANCELLATION_REQUEST:
return new CancellationRequest();
case TARGET_CALLABLE_REQUEST:
return new TargetCallableRequest();
case PARTITION_CALLABLE_REQUEST:
return new PartitionCallableRequest();
default:
return null;
}
}
};
| 1no label
|
hazelcast_src_main_java_com_hazelcast_executor_ExecutorPortableHook.java
|
897 |
public abstract class TransportSearchTypeAction extends TransportAction<SearchRequest, SearchResponse> {
protected final ClusterService clusterService;
protected final SearchServiceTransportAction searchService;
protected final SearchPhaseController searchPhaseController;
public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
super(settings, threadPool);
this.clusterService = clusterService;
this.searchService = searchService;
this.searchPhaseController = searchPhaseController;
}
protected abstract class BaseAsyncAction<FirstResult extends SearchPhaseResult> {
protected final ActionListener<SearchResponse> listener;
protected final GroupShardsIterator shardsIts;
protected final SearchRequest request;
protected final ClusterState clusterState;
protected final DiscoveryNodes nodes;
protected final int expectedSuccessfulOps;
private final int expectedTotalOps;
protected final AtomicInteger successulOps = new AtomicInteger();
private final AtomicInteger totalOps = new AtomicInteger();
protected final AtomicArray<FirstResult> firstResults;
private volatile AtomicArray<ShardSearchFailure> shardFailures;
private final Object shardFailuresMutex = new Object();
protected volatile ScoreDoc[] sortedShardList;
protected final long startTime = System.currentTimeMillis();
protected BaseAsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
this.request = request;
this.listener = listener;
this.clusterState = clusterService.state();
nodes = clusterState.nodes();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
for (String index : concreteIndices) {
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
}
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
shardsIts = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
expectedSuccessfulOps = shardsIts.size();
// we need to add 1 for non active partition, since we count it in the total!
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
firstResults = new AtomicArray<FirstResult>(shardsIts.size());
}
public void start() {
if (expectedSuccessfulOps == 0) {
// no search shards to search on, bail with empty response (it happens with search across _all with no indices around and consistent with broadcast operations)
listener.onResponse(new SearchResponse(InternalSearchResponse.EMPTY, null, 0, 0, System.currentTimeMillis() - startTime, ShardSearchFailure.EMPTY_ARRAY));
return;
}
request.beforeStart();
// count the local operations, and perform the non local ones
int localOperations = 0;
int shardIndex = -1;
for (final ShardIterator shardIt : shardsIts) {
shardIndex++;
final ShardRouting shard = shardIt.firstOrNull();
if (shard != null) {
if (shard.currentNodeId().equals(nodes.localNodeId())) {
localOperations++;
} else {
// do the remote operation here, the localAsync flag is not relevant
performFirstPhase(shardIndex, shardIt);
}
} else {
// really, no shards active in this group
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
}
}
// we have local operations, perform them now
if (localOperations > 0) {
if (request.operationThreading() == SearchOperationThreading.SINGLE_THREAD) {
request.beforeLocalFork();
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
int shardIndex = -1;
for (final ShardIterator shardIt : shardsIts) {
shardIndex++;
final ShardRouting shard = shardIt.firstOrNull();
if (shard != null) {
if (shard.currentNodeId().equals(nodes.localNodeId())) {
performFirstPhase(shardIndex, shardIt);
}
}
}
}
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
if (localAsync) {
request.beforeLocalFork();
}
shardIndex = -1;
for (final ShardIterator shardIt : shardsIts) {
shardIndex++;
final int fShardIndex = shardIndex;
final ShardRouting shard = shardIt.firstOrNull();
if (shard != null) {
if (shard.currentNodeId().equals(nodes.localNodeId())) {
if (localAsync) {
try {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
performFirstPhase(fShardIndex, shardIt);
}
});
} catch (Throwable t) {
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t);
}
} else {
performFirstPhase(fShardIndex, shardIt);
}
}
}
}
}
}
}
void performFirstPhase(final int shardIndex, final ShardIterator shardIt) {
performFirstPhase(shardIndex, shardIt, shardIt.nextOrNull());
}
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
if (shard == null) {
// no more active shards... (we should not really get here, but just for safety)
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
} else {
final DiscoveryNode node = nodes.get(shard.currentNodeId());
if (node == null) {
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
} else {
String[] filteringAliases = clusterState.metaData().filteringAliases(shard.index(), request.indices());
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime), new SearchServiceListener<FirstResult>() {
@Override
public void onResult(FirstResult result) {
onFirstPhaseResult(shardIndex, shard, result, shardIt);
}
@Override
public void onFailure(Throwable t) {
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
}
});
}
}
}
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
processFirstPhaseResult(shardIndex, shard, result);
// increment all the "future" shards to update the total ops since we some may work and some may not...
// and when that happens, we break on total ops, so we must maintain them
int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
successulOps.incrementAndGet();
if (xTotalOps == expectedTotalOps) {
try {
innerMoveToSecondPhase();
} catch (Throwable e) {
if (logger.isDebugEnabled()) {
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
}
listener.onFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
}
}
}
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, final ShardIterator shardIt, Throwable t) {
// we always add the shard failure for a specific shard instance
// we do make sure to clean it on a successful response from a shard
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
addShardFailure(shardIndex, shardTarget, t);
if (totalOps.incrementAndGet() == expectedTotalOps) {
if (logger.isDebugEnabled()) {
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
if (shard != null) {
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
} else {
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
}
}
}
if (successulOps.get() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("All shards failed for phase: [{}]", firstPhaseName(), t);
}
// no successful ops, raise an exception
listener.onFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", buildShardFailures()));
} else {
try {
innerMoveToSecondPhase();
} catch (Throwable e) {
listener.onFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
}
}
} else {
final ShardRouting nextShard = shardIt.nextOrNull();
final boolean lastShard = nextShard == null;
// trace log this exception
if (logger.isTraceEnabled() && t != null) {
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
}
if (!lastShard) {
try {
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
performFirstPhase(shardIndex, shardIt, nextShard);
}
});
} catch (Throwable t1) {
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
}
} else {
// no more shards active, add a failure
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
}
}
}
}
}
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, boolean lastShard) {
if (shard != null) {
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
} else {
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
}
}
/**
* Builds how long it took to execute the search.
*/
protected final long buildTookInMillis() {
return System.currentTimeMillis() - startTime;
}
protected final ShardSearchFailure[] buildShardFailures() {
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
if (shardFailures == null) {
return ShardSearchFailure.EMPTY_ARRAY;
}
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
for (int i = 0; i < failures.length; i++) {
failures[i] = entries.get(i).value;
}
return failures;
}
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
if (TransportActions.isShardNotAvailableException(t)) {
return;
}
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
if (shardFailures == null) {
synchronized (shardFailuresMutex) {
if (shardFailures == null) {
shardFailures = new AtomicArray<ShardSearchFailure>(shardsIts.size());
}
}
}
ShardSearchFailure failure = shardFailures.get(shardIndex);
if (failure == null) {
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
} else {
// the failure is already present, try and not override it with an exception that is less meaningless
// for example, getting illegal shard state
if (TransportActions.isReadOverrideException(t)) {
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
}
}
}
/**
* Releases shard targets that are not used in the docsIdsToLoad.
*/
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
AtomicArray<IntArrayList> docIdsToLoad) {
if (docIdsToLoad == null) {
return;
}
// we only release search context that we did not fetch from if we are not scrolling
if (request.scroll() == null) {
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
if (docIdsToLoad.get(entry.index) == null) {
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
if (node != null) { // should not happen (==null) but safeguard anyhow
searchService.sendFreeContext(node, entry.value.queryResult().id(), request);
}
}
}
}
}
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchRequest request, SearchServiceListener<FirstResult> listener);
protected final void processFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result) {
firstResults.set(shardIndex, result);
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
if (shardFailures != null) {
shardFailures.set(shardIndex, null);
}
}
final void innerMoveToSecondPhase() throws Exception {
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
boolean hadOne = false;
for (int i = 0; i < firstResults.length(); i++) {
FirstResult result = firstResults.get(i);
if (result == null) {
continue; // failure
}
if (hadOne) {
sb.append(",");
} else {
hadOne = true;
}
sb.append(result.shardTarget());
}
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
}
moveToSecondPhase();
}
protected abstract void moveToSecondPhase() throws Exception;
protected abstract String firstPhaseName();
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchTypeAction.java
|
23 |
public class EndCommand extends NoOpCommand {
public EndCommand() {
super(END);
}
@Override
public String toString() {
return "EndCommand{}";
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_EndCommand.java
|
2,290 |
public class MapReduceDataSerializerHook
implements DataSerializerHook {
public static final int F_ID = FactoryIdHelper.getFactoryId(FactoryIdHelper.MAP_REDUCE_DS_FACTORY, -23);
public static final int KEY_VALUE_SOURCE_MAP = 0;
public static final int KEY_VALUE_SOURCE_MULTIMAP = 1;
public static final int REDUCER_CHUNK_MESSAGE = 2;
public static final int REDUCER_LAST_CHUNK_MESSAGE = 3;
public static final int TRACKED_JOB_OPERATION = 4;
public static final int REQUEST_PARTITION_MAPPING = 5;
public static final int REQUEST_PARTITION_REDUCING = 6;
public static final int REQUEST_PARTITION_PROCESSED = 7;
public static final int GET_RESULT_OPERATION = 8;
public static final int START_PROCESSING_OPERATION = 9;
public static final int REQUEST_PARTITION_RESULT = 10;
public static final int REDUCING_FINISHED_MESSAGE = 11;
public static final int FIRE_NOTIFICATION_OPERATION = 12;
public static final int REQUEST_MEMBERID_ASSIGNMENT = 13;
public static final int PROCESS_STATS_UPDATE_OPERATION = 14;
public static final int NOTIFY_REMOTE_EXCEPTION_OPERATION = 15;
public static final int CANCEL_JOB_SUPERVISOR_OPERATION = 16;
public static final int POSTPONE_PARTITION_PROCESSING_OPERATION = 17;
public static final int KEY_VALUE_SOURCE_LIST = 18;
public static final int KEY_VALUE_SOURCE_SET = 19;
public static final int KEYS_ASSIGNMENT_RESULT = 20;
public static final int KEYS_ASSIGNMENT_OPERATION = 21;
private static final int LEN = KEYS_ASSIGNMENT_OPERATION + 1;
@Override
public int getFactoryId() {
return F_ID;
}
@Override
public DataSerializableFactory createFactory() {
ConstructorFunction<Integer, IdentifiedDataSerializable> constructors[] = new ConstructorFunction[LEN];
constructors[KEY_VALUE_SOURCE_MAP] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new MapKeyValueSource();
}
};
constructors[KEY_VALUE_SOURCE_MULTIMAP] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new MultiMapKeyValueSource();
}
};
constructors[REDUCER_CHUNK_MESSAGE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new IntermediateChunkNotification();
}
};
constructors[REDUCER_LAST_CHUNK_MESSAGE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new LastChunkNotification();
}
};
constructors[TRACKED_JOB_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new KeyValueJobOperation();
}
};
constructors[REQUEST_PARTITION_MAPPING] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new RequestPartitionMapping();
}
};
constructors[REQUEST_PARTITION_REDUCING] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new RequestPartitionReducing();
}
};
constructors[REQUEST_PARTITION_PROCESSED] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new RequestPartitionProcessed();
}
};
constructors[GET_RESULT_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new GetResultOperation();
}
};
constructors[START_PROCESSING_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new StartProcessingJobOperation();
}
};
constructors[REQUEST_PARTITION_RESULT] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new RequestPartitionResult();
}
};
constructors[REDUCING_FINISHED_MESSAGE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new ReducingFinishedNotification();
}
};
constructors[FIRE_NOTIFICATION_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new FireNotificationOperation();
}
};
constructors[REQUEST_MEMBERID_ASSIGNMENT] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new RequestMemberIdAssignment();
}
};
constructors[PROCESS_STATS_UPDATE_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new ProcessStatsUpdateOperation();
}
};
constructors[NOTIFY_REMOTE_EXCEPTION_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new NotifyRemoteExceptionOperation();
}
};
constructors[CANCEL_JOB_SUPERVISOR_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new CancelJobSupervisorOperation();
}
};
constructors[KEY_VALUE_SOURCE_LIST] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new ListKeyValueSource();
}
};
constructors[KEY_VALUE_SOURCE_SET] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new SetKeyValueSource();
}
};
constructors[KEYS_ASSIGNMENT_RESULT] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new KeysAssignmentResult();
}
};
constructors[KEYS_ASSIGNMENT_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new KeysAssignmentOperation();
}
};
constructors[POSTPONE_PARTITION_PROCESSING_OPERATION] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
@Override
public IdentifiedDataSerializable createNew(Integer arg) {
return new PostPonePartitionProcessing();
}
};
return new ArrayDataSerializableFactory(constructors);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_MapReduceDataSerializerHook.java
|
1,254 |
public abstract class FaunusElement extends LifeCycleElement implements InternalElement, Comparable<FaunusElement> {
protected static final Predicate<FaunusProperty> FILTER_DELETED_PROPERTIES = new Predicate<FaunusProperty>() {
@Override
public boolean apply(@Nullable FaunusProperty p) {
return !p.isRemoved();
}
};
protected static final Predicate<StandardFaunusEdge> FILTER_DELETED_EDGES = new Predicate<StandardFaunusEdge>() {
@Override
public boolean apply(@Nullable StandardFaunusEdge e) {
return !e.isRemoved();
}
};
private static final Logger log =
LoggerFactory.getLogger(FaunusElement.class);
public static final long NO_ID = -1;
static final SetMultimap<FaunusRelationType, FaunusRelation> EMPTY_ADJACENCY = ImmutableSetMultimap.of();
protected long id;
protected SetMultimap<FaunusRelationType, FaunusRelation> outAdjacency = EMPTY_ADJACENCY;
protected SetMultimap<FaunusRelationType, FaunusRelation> inAdjacency = EMPTY_ADJACENCY;
public FaunusElement(final long id) {
this.id = id;
}
public abstract FaunusSchemaManager getTypeManager();
@Override
public InternalElement it() {
return this;
}
@Override
public StandardTitanTx tx() {
throw new UnsupportedOperationException();
}
@Override
public void remove() throws UnsupportedOperationException {
lifecycle = ElementLifeCycle.Removed;
throw new UnsupportedOperationException();
}
@Override
public Object getId() {
return this.id;
}
@Override
public long getLongId() {
return this.id;
}
@Override
public boolean hasId() {
return id>=0;
}
@Override
public void setId(final long id) {
Preconditions.checkArgument(id>=0);
this.id = id;
}
void updateSchema(FaunusSerializer.Schema schema) {
schema.addAll(inAdjacency.keySet());
schema.addAll(outAdjacency.keySet());
}
@Override
public boolean isHidden() {
return false;
}
public boolean isModified() {
if (super.isModified()) return true;
if (!(this instanceof FaunusVertex)) return false;
for (Direction dir : Direction.proper) {
for (FaunusRelation r : getAdjacency(dir).values()) {
if (r.isModified()) return true;
}
}
return false;
}
//##################################
// General Relation Handling
//##################################
protected Multiplicity getAdjustedMultiplicity(FaunusRelationType type) {
if (this instanceof FaunusRelation) {
return Multiplicity.MANY2ONE;
} return type.getMultiplicity();
}
SetMultimap<FaunusRelationType, FaunusRelation> getAdjacency(Direction dir) {
assert dir==Direction.IN || dir==Direction.OUT;
if (dir==Direction.IN) return inAdjacency;
else return outAdjacency;
}
protected void initializeAdjacency(Direction dir) {
if ((dir==Direction.OUT || dir==Direction.BOTH) && this.outAdjacency == EMPTY_ADJACENCY)
outAdjacency = HashMultimap.create();
if ((dir==Direction.IN || dir==Direction.BOTH) && this.inAdjacency == EMPTY_ADJACENCY)
inAdjacency = HashMultimap.create();
}
protected void setRelation(final FaunusRelation relation) {
int killedRels = 0;
final Iterator<FaunusRelation> rels = outAdjacency.get(relation.getType()).iterator();
while (rels.hasNext()) {
FaunusRelation r = rels.next();
if (r.isNew()) rels.remove();
r.updateLifeCycle(ElementLifeCycle.Event.REMOVED);
updateLifeCycle(ElementLifeCycle.Event.REMOVED_RELATION);
killedRels++;
}
final Multiplicity adjMulti = getAdjustedMultiplicity(relation.getType());
if (adjMulti != Multiplicity.MANY2ONE && 0 < killedRels) {
// Calling setRelation on a multi-valued type will delete any
// existing relations of that type, no matter how many -- log this
// behavior and suggest addRelation to suppress the warning when
// using a multi-valued type
log.info( "setRelation deleted {} relations of type {} with multiplicity {}; " +
"use addRelation instead of setRelation to avoid deletion",
killedRels, relation.getType(), adjMulti);
}
addRelation(relation);
}
protected FaunusRelation addRelation(final FaunusRelation relation) {
Preconditions.checkNotNull(relation);
FaunusRelation old = null;
for (Direction dir : Direction.proper) {
//Determine applicable directions
if (relation.isProperty() && dir==Direction.IN) {
continue;
} else if (relation.isEdge()) {
FaunusEdge edge = (FaunusEdge)relation;
if (edge.getEdgeLabel().isUnidirected()) {
if (dir==Direction.IN) continue;
} else if (!edge.getVertex(dir).equals(this)) {
continue;
}
}
initializeAdjacency(dir);
SetMultimap<FaunusRelationType, FaunusRelation> adjacency = getAdjacency(dir);
if ((this instanceof FaunusVertex) && adjacency.containsEntry(relation.getType(), relation)) {
//First, check if this relation already exists; if so, consolidate
old = Iterables.getOnlyElement(Iterables.filter(adjacency.get(relation.getType()),
new Predicate<FaunusRelation>() {
@Override
public boolean apply(@Nullable FaunusRelation rel) {
return relation.equals(rel);
}
}));
if (relation.isNew() && old.isRemoved()) {
old.setLifeCycle(ElementLifeCycle.Loaded);
updateLifeCycle(ElementLifeCycle.Event.ADDED_RELATION);
} else if (relation.isLoaded() && old.isNew()) {
old.setLifeCycle(ElementLifeCycle.Loaded);
}
} else {
//Verify multiplicity constraint
switch(relation.getType().getMultiplicity()) {
case MANY2ONE:
if (dir==Direction.OUT)
ensureUniqueness(relation.getType(),adjacency);
break;
case ONE2MANY:
if (dir==Direction.IN)
ensureUniqueness(relation.getType(),adjacency);
break;
case ONE2ONE:
ensureUniqueness(relation.getType(),adjacency);
break;
case SIMPLE:
for (FaunusRelation rel : adjacency.get(relation.getType())) {
if (rel.isRemoved()) continue;
if (relation.isEdge()) {
FaunusEdge e1 = (FaunusEdge)relation, e2 = (FaunusEdge)rel;
if (e1.getVertex(Direction.OUT).equals(e2.getVertex(Direction.OUT)) &&
e1.getVertex(Direction.IN).equals(e2.getVertex(Direction.IN))) {
throw new IllegalArgumentException("A relation already exists which" +
"violates the multiplicity constraint: " + relation.getType().getMultiplicity());
}
} else {
FaunusProperty p1 = (FaunusProperty)relation, p2 = (FaunusProperty)rel;
if (p1.getValue().equals(p2.getValue())) {
throw new IllegalArgumentException("A relation already exists which" +
"violates the multiplicity constraint: " + relation.getType().getMultiplicity());
}
}
}
break;
case MULTI: //Nothing to check
break;
default: throw new AssertionError();
}
adjacency.put(relation.getType(), relation);
updateLifeCycle(ElementLifeCycle.Event.ADDED_RELATION);
log.trace("Added relation {} to {}", relation, this);
}
}
if (old!=null) return old;
else return relation;
}
private static void ensureUniqueness(FaunusRelationType type, SetMultimap<FaunusRelationType, FaunusRelation> adjacency) {
for (FaunusRelation rel : adjacency.get(type)) {
if (!rel.isRemoved()) throw new IllegalArgumentException("A relation already exists which " +
"violates the multiplicity constraint: " + type.getMultiplicity() + " on type " + type);
}
}
public abstract FaunusVertexQuery query();
//##################################
// Property Handling
//##################################
public void setProperty(EdgeLabel label, TitanVertex vertex) {
setProperty((FaunusRelationType)label,vertex);
}
@Override
public void setProperty(PropertyKey key, Object value) {
setProperty((FaunusRelationType)key,value);
}
@Override
public void setProperty(final String key, final Object value) {
FaunusRelationType rt = getTypeManager().getRelationType(key);
if (rt==null) rt = getTypeManager().getOrCreatePropertyKey(key);
setProperty(rt,value);
}
public abstract void setProperty(final FaunusRelationType type, final Object value);
@Override
public <T> T removeProperty(final String key) {
FaunusRelationType rt = getTypeManager().getRelationType(key);
if (rt==null) return null;
return removeProperty(rt);
}
@Override
public <O> O removeProperty(RelationType type) {
if (type.isEdgeLabel() && !(this instanceof FaunusVertex)) throw new IllegalArgumentException("Provided argument" +
"identifies an edge label. Use edge methods to remove those: " + type);
if (outAdjacency.isEmpty()) return null;
FaunusRelationType rtype = (FaunusRelationType)type;
final List<Object> removed = Lists.newArrayList();
final Iterator<FaunusRelation> rels = outAdjacency.get(rtype).iterator();
while (rels.hasNext()) {
FaunusRelation r = rels.next();
if (!r.isRemoved()) {
if (r.isProperty()) removed.add(((FaunusProperty)r).getValue());
else removed.add(((FaunusEdge)r).getVertex(Direction.IN));
}
if (r.isNew()) rels.remove();
r.updateLifeCycle(ElementLifeCycle.Event.REMOVED);
updateLifeCycle(ElementLifeCycle.Event.REMOVED_RELATION);
}
if (removed.isEmpty()) return null;
else if (getAdjustedMultiplicity(rtype)==Multiplicity.MANY2ONE) return (O)removed.iterator().next();
else return (O) removed;
}
public TitanVertex getProperty(EdgeLabel label) {
Preconditions.checkArgument(label!=null);
Preconditions.checkArgument(!(this instanceof FaunusVertex),"Use getEdges() to query for edges on a vertex");
return Iterables.getOnlyElement(query().type(label).titanEdges()).getVertex(Direction.IN);
}
@Override
public <T> T getProperty(PropertyKey key) {
FaunusPropertyKey type = (FaunusPropertyKey)key;
Iterator<TitanProperty> properties = query().type(type).properties().iterator();
if (type.getCardinality()==Cardinality.SINGLE) {
if (properties.hasNext()) return properties.next().getValue();
else return (T)null;
}
List result = Lists.newArrayList();
while (properties.hasNext()) result.add(properties.next().getValue());
return (T)result;
}
@Override
public <T> T getProperty(final String key) {
FaunusRelationType rt = getTypeManager().getRelationType(key);
if (rt==null) return null;
if (rt.isPropertyKey()) return getProperty((FaunusPropertyKey)rt);
else return (T)getProperty((FaunusEdgeLabel)rt);
}
@Override
public Set<String> getPropertyKeys() {
return Sets.newHashSet(Iterables.transform(getPropertyKeysDirect(),new Function<RelationType, String>() {
@Nullable
@Override
public String apply(@Nullable RelationType relationType) {
return relationType.getName();
}
}));
}
protected Iterable<RelationType> getPropertyKeysDirect() {
final Set<RelationType> result = Sets.newHashSet();
for (final TitanRelation r : query().relations()) {
if (r.isEdge() && (this instanceof FaunusVertex)) continue;
result.add(r.getType());
}
return result;
}
public void addAllProperties(final Iterable<FaunusRelation> properties) {
for (final FaunusRelation p : properties) addRelation(p);
}
public Collection<FaunusRelation> getPropertyCollection() {
return (Collection)Lists.newArrayList(
(this instanceof FaunusVertex)?query().properties():query().relations());
}
//##################################
// General Utility
//##################################
@Override
public boolean equals(final Object other) {
if (this==other) return true;
else if (other==null || !(other instanceof TitanElement)) return false;
TitanElement o = (TitanElement)other;
if (!hasId() || !o.hasId()) return o==this;
if (getLongId()!=o.getLongId()) return false;
return true;
}
@Override
public int hashCode() {
return ((Long) this.id).hashCode();
}
@Override
public int compareTo(FaunusElement o) {
return Longs.compare(id, o.getLongId());
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_FaunusElement.java
|
241 |
public interface OCacheLevelTwoLocator {
public OCache primaryCache(final String storageName);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_cache_OCacheLevelTwoLocator.java
|
263 |
@Service("blEmailService")
public class EmailServiceImpl implements EmailService {
@Resource(name = "blEmailTrackingManager")
protected EmailTrackingManager emailTrackingManager;
@Resource(name = "blServerInfo")
protected ServerInfo serverInfo;
protected EmailServiceProducer emailServiceProducer;
@Resource(name = "blMessageCreator")
protected MessageCreator messageCreator;
@Resource(name = "blEmailReportingDao")
protected EmailReportingDao emailReportingDao;
public boolean sendTemplateEmail(EmailTarget emailTarget, EmailInfo emailInfo, HashMap<String, Object> props) {
if (props == null) {
props = new HashMap<String, Object>();
}
if (emailInfo == null) {
emailInfo = new EmailInfo();
}
props.put(EmailPropertyType.INFO.getType(), emailInfo);
props.put(EmailPropertyType.USER.getType(), emailTarget);
Long emailId = emailTrackingManager.createTrackedEmail(emailTarget.getEmailAddress(), emailInfo.getEmailType(), null);
props.put("emailTrackingId", emailId);
return sendBasicEmail(emailInfo, emailTarget, props);
}
public boolean sendTemplateEmail(String emailAddress, EmailInfo emailInfo, HashMap<String, Object> props) {
if (!(emailInfo instanceof NullEmailInfo)) {
EmailTarget emailTarget = emailReportingDao.createTarget();
emailTarget.setEmailAddress(emailAddress);
return sendTemplateEmail(emailTarget, emailInfo, props);
} else {
return true;
}
}
public boolean sendBasicEmail(EmailInfo emailInfo, EmailTarget emailTarget, HashMap<String, Object> props) {
if (props == null) {
props = new HashMap<String, Object>();
}
if (emailInfo == null) {
emailInfo = new EmailInfo();
}
props.put(EmailPropertyType.INFO.getType(), emailInfo);
props.put(EmailPropertyType.USER.getType(), emailTarget);
if (Boolean.parseBoolean(emailInfo.getSendEmailReliableAsync())) {
if (emailServiceProducer == null) {
throw new EmailException("The property sendEmailReliableAsync on EmailInfo is true, but the EmailService does not have an instance of JMSEmailServiceProducer set.");
}
emailServiceProducer.send(props);
} else {
messageCreator.sendMessage(props);
}
return true;
}
/**
* @return the emailTrackingManager
*/
public EmailTrackingManager getEmailTrackingManager() {
return emailTrackingManager;
}
/**
* @param emailTrackingManager the emailTrackingManager to set
*/
public void setEmailTrackingManager(EmailTrackingManager emailTrackingManager) {
this.emailTrackingManager = emailTrackingManager;
}
/**
* @return the serverInfo
*/
public ServerInfo getServerInfo() {
return serverInfo;
}
/**
* @param serverInfo the serverInfo to set
*/
public void setServerInfo(ServerInfo serverInfo) {
this.serverInfo = serverInfo;
}
/**
* @return the emailServiceProducer
*/
public EmailServiceProducer getEmailServiceProducer() {
return emailServiceProducer;
}
/**
* @param emailServiceProducer the emailServiceProducer to set
*/
public void setEmailServiceProducer(EmailServiceProducer emailServiceProducer) {
this.emailServiceProducer = emailServiceProducer;
}
/**
* @return the messageCreator
*/
public MessageCreator getMessageCreator() {
return messageCreator;
}
/**
* @param messageCreator the messageCreator to set
*/
public void setMessageCreator(MessageCreator messageCreator) {
this.messageCreator = messageCreator;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_EmailServiceImpl.java
|
2,349 |
public class StartProcessingJobOperation<K>
extends AbstractOperation
implements IdentifiedDataSerializable {
private String name;
private Collection<K> keys;
private String jobId;
private KeyPredicate<K> predicate;
public StartProcessingJobOperation() {
}
public StartProcessingJobOperation(String name, String jobId, Collection<K> keys, KeyPredicate<K> predicate) {
this.name = name;
this.keys = keys;
this.jobId = jobId;
this.predicate = predicate;
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
public String getServiceName() {
return MapReduceService.SERVICE_NAME;
}
@Override
public void run()
throws Exception {
MapReduceService mapReduceService = getService();
JobSupervisor supervisor = mapReduceService.getJobSupervisor(name, jobId);
if (supervisor == null) {
if (mapReduceService.unregisterJobSupervisorCancellation(name, jobId)) {
// Supervisor was cancelled prior to creation
AbstractJobTracker jobTracker = (AbstractJobTracker) mapReduceService.getJobTracker(name);
TrackableJobFuture future = jobTracker.unregisterTrackableJob(jobId);
if (future != null) {
Exception exception = new CancellationException("Operation was cancelled by the user");
future.setResult(exception);
}
}
return;
}
MappingPhase mappingPhase = new KeyValueSourceMappingPhase(keys, predicate);
supervisor.startTasks(mappingPhase);
}
@Override
public void writeInternal(ObjectDataOutput out)
throws IOException {
out.writeUTF(name);
out.writeUTF(jobId);
out.writeInt(keys == null ? 0 : keys.size());
if (keys != null) {
for (Object key : keys) {
out.writeObject(key);
}
}
out.writeObject(predicate);
}
@Override
public void readInternal(ObjectDataInput in)
throws IOException {
name = in.readUTF();
jobId = in.readUTF();
int size = in.readInt();
keys = new ArrayList<K>();
for (int i = 0; i < size; i++) {
keys.add((K) in.readObject());
}
predicate = in.readObject();
}
@Override
public int getFactoryId() {
return MapReduceDataSerializerHook.F_ID;
}
@Override
public int getId() {
return MapReduceDataSerializerHook.START_PROCESSING_OPERATION;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_operation_StartProcessingJobOperation.java
|
364 |
public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
/**
* Constructs unregister repository request builder
*
* @param clusterAdminClient cluster admin client
*/
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest());
}
/**
* Constructs unregister repository request builder with specified repository name
*
* @param clusterAdminClient cluster adming client
*/
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest(name));
}
/**
* Sets the repository name
*
* @param name the repository name
*/
public DeleteRepositoryRequestBuilder setName(String name) {
request.name(name);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteRepositoryResponse> listener) {
((ClusterAdminClient) client).deleteRepository(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_delete_DeleteRepositoryRequestBuilder.java
|
20 |
return Iterables.transform(Iterables.filter(set, new Predicate<ByteEntry>() {
@Override
public boolean apply(@Nullable ByteEntry entry) {
return !CHECK_VALUE || entry.value.getInt(0) == value;
}
}), new Function<ByteEntry, Vertex>() {
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.