Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
2,613 |
private static class DefaultStringCreator implements UTFEncoderDecoder.StringCreator {
@Override
public String buildString(char[] chars) {
return new String(chars);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_UTFEncoderDecoder.java
|
249 |
service.submitToMembers(runnable, collection, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
responseLatch.countDown();
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
1,425 |
clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "], order [" + order + "]", Priority.HIGH, new ClusterStateUpdateTask() {
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(final ClusterState currentState) throws Exception {
return executeRefreshOrUpdate(currentState, insertOrder);
}
});
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaDataMappingService.java
|
1,507 |
public class ElectReplicaAsPrimaryDuringRelocationTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class);
@Test
public void testElectReplicaAsPrimaryDuringRelocation() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("Start the replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(2));
assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
logger.info("Start another node and perform rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("find the replica shard that gets relocated");
IndexShardRoutingTable indexShardRoutingTable = null;
if (routingTable.index("test").shard(0).replicaShards().get(0).relocating()) {
indexShardRoutingTable = routingTable.index("test").shard(0);
} else if (routingTable.index("test").shard(1).replicaShards().get(0).relocating()) {
indexShardRoutingTable = routingTable.index("test").shard(1);
}
// we might have primary relocating, and the test is only for replicas, so only test in the case of replica allocation
if (indexShardRoutingTable != null) {
logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId());
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("make sure all the primary shards are active");
assertThat(routingTable.index("test").shard(0).primaryShard().active(), equalTo(true));
assertThat(routingTable.index("test").shard(1).primaryShard().active(), equalTo(true));
}
}
}
| 0true
|
src_test_java_org_elasticsearch_cluster_routing_allocation_ElectReplicaAsPrimaryDuringRelocationTests.java
|
832 |
public class SearchResponse extends ActionResponse implements ToXContent {
private InternalSearchResponse internalResponse;
private String scrollId;
private int totalShards;
private int successfulShards;
private ShardSearchFailure[] shardFailures;
private long tookInMillis;
public SearchResponse() {
}
public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards, long tookInMillis, ShardSearchFailure[] shardFailures) {
this.internalResponse = internalResponse;
this.scrollId = scrollId;
this.totalShards = totalShards;
this.successfulShards = successfulShards;
this.tookInMillis = tookInMillis;
this.shardFailures = shardFailures;
}
public RestStatus status() {
if (shardFailures.length == 0) {
if (successfulShards == 0 && totalShards > 0) {
return RestStatus.SERVICE_UNAVAILABLE;
}
return RestStatus.OK;
}
// if total failure, bubble up the status code to the response level
if (successfulShards == 0 && totalShards > 0) {
RestStatus status = RestStatus.OK;
for (int i = 0; i < shardFailures.length; i++) {
RestStatus shardStatus = shardFailures[i].status();
if (shardStatus.getStatus() >= status.getStatus()) {
status = shardFailures[i].status();
}
}
return status;
}
return RestStatus.OK;
}
/**
* The search hits.
*/
public SearchHits getHits() {
return internalResponse.hits();
}
/**
* The search facets.
*/
public Facets getFacets() {
return internalResponse.facets();
}
public Aggregations getAggregations() {
return internalResponse.aggregations();
}
public Suggest getSuggest() {
return internalResponse.suggest();
}
/**
* Has the search operation timed out.
*/
public boolean isTimedOut() {
return internalResponse.timedOut();
}
/**
* How long the search took.
*/
public TimeValue getTook() {
return new TimeValue(tookInMillis);
}
/**
* How long the search took in milliseconds.
*/
public long getTookInMillis() {
return tookInMillis;
}
/**
* The total number of shards the search was executed on.
*/
public int getTotalShards() {
return totalShards;
}
/**
* The successful number of shards the search was executed on.
*/
public int getSuccessfulShards() {
return successfulShards;
}
/**
* The failed number of shards the search was executed on.
*/
public int getFailedShards() {
// we don't return totalShards - successfulShards, we don't count "no shards available" as a failed shard, just don't
// count it in the successful counter
return shardFailures.length;
}
/**
* The failures that occurred during the search.
*/
public ShardSearchFailure[] getShardFailures() {
return this.shardFailures;
}
/**
* If scrolling was enabled ({@link SearchRequest#scroll(org.elasticsearch.search.Scroll)}, the
* scroll id that can be used to continue scrolling.
*/
public String getScrollId() {
return scrollId;
}
static final class Fields {
static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id");
static final XContentBuilderString _SHARDS = new XContentBuilderString("_shards");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful");
static final XContentBuilderString FAILED = new XContentBuilderString("failed");
static final XContentBuilderString FAILURES = new XContentBuilderString("failures");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString INDEX = new XContentBuilderString("index");
static final XContentBuilderString SHARD = new XContentBuilderString("shard");
static final XContentBuilderString REASON = new XContentBuilderString("reason");
static final XContentBuilderString TOOK = new XContentBuilderString("took");
static final XContentBuilderString TIMED_OUT = new XContentBuilderString("timed_out");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (scrollId != null) {
builder.field(Fields._SCROLL_ID, scrollId);
}
builder.field(Fields.TOOK, tookInMillis);
builder.field(Fields.TIMED_OUT, isTimedOut());
builder.startObject(Fields._SHARDS);
builder.field(Fields.TOTAL, getTotalShards());
builder.field(Fields.SUCCESSFUL, getSuccessfulShards());
builder.field(Fields.FAILED, getFailedShards());
if (shardFailures.length > 0) {
builder.startArray(Fields.FAILURES);
for (ShardSearchFailure shardFailure : shardFailures) {
builder.startObject();
if (shardFailure.shard() != null) {
builder.field(Fields.INDEX, shardFailure.shard().index());
builder.field(Fields.SHARD, shardFailure.shard().shardId());
}
builder.field(Fields.STATUS, shardFailure.status().getStatus());
builder.field(Fields.REASON, shardFailure.reason());
builder.endObject();
}
builder.endArray();
}
builder.endObject();
internalResponse.toXContent(builder, params);
return builder;
}
public static SearchResponse readSearchResponse(StreamInput in) throws IOException {
SearchResponse response = new SearchResponse();
response.readFrom(in);
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
internalResponse = readInternalSearchResponse(in);
totalShards = in.readVInt();
successfulShards = in.readVInt();
int size = in.readVInt();
if (size == 0) {
shardFailures = ShardSearchFailure.EMPTY_ARRAY;
} else {
shardFailures = new ShardSearchFailure[size];
for (int i = 0; i < shardFailures.length; i++) {
shardFailures[i] = readShardSearchFailure(in);
}
}
scrollId = in.readOptionalString();
tookInMillis = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
internalResponse.writeTo(out);
out.writeVInt(totalShards);
out.writeVInt(successfulShards);
out.writeVInt(shardFailures.length);
for (ShardSearchFailure shardSearchFailure : shardFailures) {
shardSearchFailure.writeTo(out);
}
out.writeOptionalString(scrollId);
out.writeVLong(tookInMillis);
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_search_SearchResponse.java
|
427 |
public class ClientMultiMapProxy<K, V> extends ClientProxy implements MultiMap<K, V> {
private final String name;
public ClientMultiMapProxy(String instanceName, String serviceName, String name) {
super(instanceName, serviceName, name);
this.name = name;
}
public boolean put(K key, V value) {
Data keyData = toData(key);
Data valueData = toData(value);
PutRequest request = new PutRequest(name, keyData, valueData, -1, ThreadUtil.getThreadId());
Boolean result = invoke(request, keyData);
return result;
}
public Collection<V> get(K key) {
Data keyData = toData(key);
GetAllRequest request = new GetAllRequest(name, keyData);
PortableCollection result = invoke(request, keyData);
return toObjectCollection(result, true);
}
public boolean remove(Object key, Object value) {
Data keyData = toData(key);
Data valueData = toData(value);
RemoveRequest request = new RemoveRequest(name, keyData, valueData, ThreadUtil.getThreadId());
Boolean result = invoke(request, keyData);
return result;
}
public Collection<V> remove(Object key) {
Data keyData = toData(key);
RemoveAllRequest request = new RemoveAllRequest(name, keyData, ThreadUtil.getThreadId());
PortableCollection result = invoke(request, keyData);
return toObjectCollection(result, true);
}
public Set<K> localKeySet() {
throw new UnsupportedOperationException("Locality for client is ambiguous");
}
public Set<K> keySet() {
KeySetRequest request = new KeySetRequest(name);
PortableCollection result = invoke(request);
return (Set) toObjectCollection(result, false);
}
public Collection<V> values() {
ValuesRequest request = new ValuesRequest(name);
PortableCollection result = invoke(request);
return toObjectCollection(result, true);
}
public Set<Map.Entry<K, V>> entrySet() {
EntrySetRequest request = new EntrySetRequest(name);
PortableEntrySetResponse result = invoke(request);
Set<Map.Entry> dataEntrySet = result.getEntrySet();
Set<Map.Entry<K, V>> entrySet = new HashSet<Map.Entry<K, V>>(dataEntrySet.size());
for (Map.Entry entry : dataEntrySet) {
Object key = toObject((Data) entry.getKey());
Object val = toObject((Data) entry.getValue());
entrySet.add(new AbstractMap.SimpleEntry(key, val));
}
return entrySet;
}
public boolean containsKey(K key) {
Data keyData = toData(key);
ContainsEntryRequest request = new ContainsEntryRequest(name, keyData, null);
Boolean result = invoke(request, keyData);
return result;
}
public boolean containsValue(Object value) {
Data valueData = toData(value);
ContainsEntryRequest request = new ContainsEntryRequest(name, null, valueData);
Boolean result = invoke(request);
return result;
}
public boolean containsEntry(K key, V value) {
Data keyData = toData(key);
Data valueData = toData(value);
ContainsEntryRequest request = new ContainsEntryRequest(name, keyData, valueData);
Boolean result = invoke(request, keyData);
return result;
}
public int size() {
SizeRequest request = new SizeRequest(name);
Integer result = invoke(request);
return result;
}
public void clear() {
ClearRequest request = new ClearRequest(name);
invoke(request);
}
public int valueCount(K key) {
Data keyData = toData(key);
CountRequest request = new CountRequest(name, keyData);
Integer result = invoke(request, keyData);
return result;
}
public String addLocalEntryListener(EntryListener<K, V> listener) {
throw new UnsupportedOperationException("Locality for client is ambiguous");
}
public String addEntryListener(EntryListener<K, V> listener, boolean includeValue) {
isNotNull(listener, "listener");
AddEntryListenerRequest request = new AddEntryListenerRequest(name, null, includeValue);
EventHandler<PortableEntryEvent> handler = createHandler(listener, includeValue);
return listen(request, handler);
}
public boolean removeEntryListener(String registrationId) {
final RemoveEntryListenerRequest request = new RemoveEntryListenerRequest(name, registrationId);
return stopListening(request, registrationId);
}
public String addEntryListener(EntryListener<K, V> listener, K key, boolean includeValue) {
final Data keyData = toData(key);
AddEntryListenerRequest request = new AddEntryListenerRequest(name, keyData, includeValue);
EventHandler<PortableEntryEvent> handler = createHandler(listener, includeValue);
return listen(request, keyData, handler);
}
public void lock(K key) {
final Data keyData = toData(key);
MultiMapLockRequest request = new MultiMapLockRequest(keyData, ThreadUtil.getThreadId(), name);
invoke(request, keyData);
}
public void lock(K key, long leaseTime, TimeUnit timeUnit) {
shouldBePositive(leaseTime, "leaseTime");
final Data keyData = toData(key);
MultiMapLockRequest request = new MultiMapLockRequest(keyData, ThreadUtil.getThreadId(),
getTimeInMillis(leaseTime, timeUnit), -1, name);
invoke(request, keyData);
}
public boolean isLocked(K key) {
final Data keyData = toData(key);
final MultiMapIsLockedRequest request = new MultiMapIsLockedRequest(keyData, name);
final Boolean result = invoke(request, keyData);
return result;
}
public boolean tryLock(K key) {
try {
return tryLock(key, 0, null);
} catch (InterruptedException e) {
return false;
}
}
public boolean tryLock(K key, long time, TimeUnit timeunit) throws InterruptedException {
final Data keyData = toData(key);
MultiMapLockRequest request = new MultiMapLockRequest(keyData, ThreadUtil.getThreadId(),
Long.MAX_VALUE, getTimeInMillis(time, timeunit), name);
Boolean result = invoke(request, keyData);
return result;
}
public void unlock(K key) {
final Data keyData = toData(key);
MultiMapUnlockRequest request = new MultiMapUnlockRequest(keyData, ThreadUtil.getThreadId(), name);
invoke(request, keyData);
}
public void forceUnlock(K key) {
final Data keyData = toData(key);
MultiMapUnlockRequest request = new MultiMapUnlockRequest(keyData, ThreadUtil.getThreadId(), true, name);
invoke(request, keyData);
}
public LocalMultiMapStats getLocalMultiMapStats() {
throw new UnsupportedOperationException("Locality is ambiguous for client!!!");
}
protected void onDestroy() {
}
private Collection toObjectCollection(PortableCollection result, boolean list) {
Collection<Data> coll = result.getCollection();
Collection collection;
if (list) {
collection = new ArrayList(coll == null ? 0 : coll.size());
} else {
collection = new HashSet(coll == null ? 0 : coll.size());
}
if (coll == null) {
return collection;
}
for (Data data : coll) {
collection.add(toObject(data));
}
return collection;
}
protected long getTimeInMillis(final long time, final TimeUnit timeunit) {
return timeunit != null ? timeunit.toMillis(time) : time;
}
private EventHandler<PortableEntryEvent> createHandler(final EntryListener<K, V> listener, final boolean includeValue) {
return new EventHandler<PortableEntryEvent>() {
public void handle(PortableEntryEvent event) {
V value = null;
V oldValue = null;
if (includeValue) {
value = (V) toObject(event.getValue());
oldValue = (V) toObject(event.getOldValue());
}
K key = (K) toObject(event.getKey());
Member member = getContext().getClusterService().getMember(event.getUuid());
EntryEvent<K, V> entryEvent = new EntryEvent<K, V>(name, member,
event.getEventType().getType(), key, oldValue, value);
switch (event.getEventType()) {
case ADDED:
listener.entryAdded(entryEvent);
break;
case REMOVED:
listener.entryRemoved(entryEvent);
break;
case UPDATED:
listener.entryUpdated(entryEvent);
break;
case EVICTED:
listener.entryEvicted(entryEvent);
break;
default:
throw new IllegalArgumentException("Not a known event type " + event.getEventType());
}
}
@Override
public void onListenerRegister() {
}
};
}
@Override
public String toString() {
return "MultiMap{" + "name='" + getName() + '\'' + '}';
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientMultiMapProxy.java
|
1,095 |
public abstract class AbstractOrderItemRequest {
protected Sku sku;
protected Category category;
protected Product product;
protected Order order;
protected int quantity;
protected Money salePriceOverride;
protected Money retailPriceOverride;
protected PersonalMessage personalMessage;
protected Map<String,String> itemAttributes = new HashMap<String,String>();
public Sku getSku() {
return sku;
}
public void setSku(Sku sku) {
this.sku = sku;
}
public Category getCategory() {
return category;
}
public void setCategory(Category category) {
this.category = category;
}
public Product getProduct() {
return product;
}
public void setProduct(Product product) {
this.product = product;
}
public void setOrder(Order order) {
this.order = order;
}
public Order getOrder() {
return order;
}
public int getQuantity() {
return quantity;
}
public void setQuantity(int quantity) {
this.quantity = quantity;
}
public Map<String, String> getItemAttributes() {
return itemAttributes;
}
public void setItemAttributes(Map<String, String> itemAttributes) {
this.itemAttributes = itemAttributes;
}
public Money getSalePriceOverride() {
return salePriceOverride;
}
public void setSalePriceOverride(Money salePriceOverride) {
this.salePriceOverride = salePriceOverride;
}
public Money getRetailPriceOverride() {
return retailPriceOverride;
}
public void setRetailPriceOverride(Money retailPriceOverride) {
this.retailPriceOverride = retailPriceOverride;
}
protected void copyProperties(AbstractOrderItemRequest newRequest) {
newRequest.setCategory(category);
newRequest.setItemAttributes(itemAttributes);
newRequest.setPersonalMessage(personalMessage);
newRequest.setProduct(product);
newRequest.setQuantity(quantity);
newRequest.setSku(sku);
newRequest.setOrder(order);
newRequest.setSalePriceOverride(salePriceOverride);
newRequest.setRetailPriceOverride(retailPriceOverride);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof AbstractOrderItemRequest)) return false;
AbstractOrderItemRequest that = (AbstractOrderItemRequest) o;
if (quantity != that.quantity) return false;
if (category != null ? !category.equals(that.category) : that.category != null) return false;
if (product != null ? !product.equals(that.product) : that.product != null) return false;
if (salePriceOverride != null ? !salePriceOverride.equals(that.salePriceOverride) : that.salePriceOverride != null)
return false;
if (sku != null ? !sku.equals(that.sku) : that.sku != null) return false;
if (order != null ? !order.equals(that.order) : that.order != null) return false;
return true;
}
@Override
public int hashCode() {
int result = sku != null ? sku.hashCode() : 0;
result = 31 * result + (category != null ? category.hashCode() : 0);
result = 31 * result + (product != null ? product.hashCode() : 0);
result = 31 * result + (order != null ? order.hashCode() : 0);
result = 31 * result + quantity;
result = 31 * result + (salePriceOverride != null ? salePriceOverride.hashCode() : 0);
return result;
}
public PersonalMessage getPersonalMessage() {
return personalMessage;
}
public void setPersonalMessage(PersonalMessage personalMessage) {
this.personalMessage = personalMessage;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_call_AbstractOrderItemRequest.java
|
361 |
@Deprecated
public class OGraphDatabase extends ODatabaseDocumentTx {
private final boolean preferSBTreeSet = OGlobalConfiguration.PREFER_SBTREE_SET.getValueAsBoolean();
public enum LOCK_MODE {
NO_LOCKING, DATABASE_LEVEL_LOCKING, RECORD_LEVEL_LOCKING
}
public enum DIRECTION {
BOTH, IN, OUT
}
public static final String TYPE = "graph";
public static final String VERTEX_CLASS_NAME = "OGraphVertex";
public static final String VERTEX_ALIAS = "V";
public static final String VERTEX_FIELD_IN = "in_";
public static final String VERTEX_FIELD_OUT = "out_";
public static final String VERTEX_FIELD_IN_OLD = "in";
public static final String VERTEX_FIELD_OUT_OLD = "out";
public static final String EDGE_CLASS_NAME = "OGraphEdge";
public static final String EDGE_ALIAS = "E";
public static final String EDGE_FIELD_IN = "in";
public static final String EDGE_FIELD_OUT = "out";
public static final String LABEL = "label";
private String outV = VERTEX_FIELD_OUT;
private String inV = VERTEX_FIELD_IN;
private boolean useCustomTypes = true;
private boolean safeMode = false;
private LOCK_MODE lockMode = LOCK_MODE.NO_LOCKING;
private boolean retroCompatibility = false;
protected OClass vertexBaseClass;
protected OClass edgeBaseClass;
public OGraphDatabase(final String iURL) {
super(iURL);
}
public OGraphDatabase(final ODatabaseRecordTx iSource) {
super(iSource);
checkForGraphSchema();
}
@Override
@SuppressWarnings("unchecked")
public <THISDB extends ODatabase> THISDB open(final String iUserName, final String iUserPassword) {
super.open(iUserName, iUserPassword);
checkForGraphSchema();
return (THISDB) this;
}
@Override
@SuppressWarnings("unchecked")
public <THISDB extends ODatabase> THISDB create() {
super.create();
checkForGraphSchema();
return (THISDB) this;
}
@Override
public void close() {
super.close();
vertexBaseClass = null;
edgeBaseClass = null;
}
public long countVertexes() {
return countClass(VERTEX_ALIAS);
}
public long countEdges() {
return countClass(EDGE_ALIAS);
}
public Iterable<ODocument> browseVertices() {
return browseElements(VERTEX_ALIAS, true);
}
public Iterable<ODocument> browseVertices(final boolean iPolymorphic) {
return browseElements(VERTEX_ALIAS, iPolymorphic);
}
public Iterable<ODocument> browseEdges() {
return browseElements(EDGE_ALIAS, true);
}
public Iterable<ODocument> browseEdges(final boolean iPolymorphic) {
return browseElements(EDGE_ALIAS, iPolymorphic);
}
public Iterable<ODocument> browseElements(final String iClass, final boolean iPolymorphic) {
return new ORecordIteratorClass<ODocument>(this, (ODatabaseRecordAbstract) getUnderlying(), iClass, iPolymorphic, true, false);
}
public ODocument createVertex() {
return createVertex(null);
}
public ODocument createVertex(final String iClassName) {
return createVertex(iClassName, (Object[]) null);
}
@SuppressWarnings("unchecked")
public ODocument createVertex(final String iClassName, final Object... iFields) {
final OClass cls = checkVertexClass(iClassName);
final ODocument vertex = new ODocument(cls).setOrdered(true);
if (iFields != null)
// SET THE FIELDS
if (iFields != null)
if (iFields.length == 1) {
Object f = iFields[0];
if (f instanceof Map<?, ?>)
vertex.fields((Map<String, Object>) f);
else
throw new IllegalArgumentException(
"Invalid fields: expecting a pairs of fields as String,Object or a single Map<String,Object>, but found: " + f);
} else
// SET THE FIELDS
for (int i = 0; i < iFields.length; i += 2)
vertex.field(iFields[i].toString(), iFields[i + 1]);
return vertex;
}
public ODocument createEdge(final ORID iSourceVertexRid, final ORID iDestVertexRid) {
return createEdge(iSourceVertexRid, iDestVertexRid, null);
}
public ODocument createEdge(final ORID iSourceVertexRid, final ORID iDestVertexRid, final String iClassName) {
final ODocument sourceVertex = load(iSourceVertexRid);
if (sourceVertex == null)
throw new IllegalArgumentException("Source vertex '" + iSourceVertexRid + "' does not exist");
final ODocument destVertex = load(iDestVertexRid);
if (destVertex == null)
throw new IllegalArgumentException("Source vertex '" + iDestVertexRid + "' does not exist");
return createEdge(sourceVertex, destVertex, iClassName);
}
public ODocument createEdge(final ODocument iSourceVertex, final ODocument iDestVertex) {
return createEdge(iSourceVertex, iDestVertex, null);
}
public ODocument createEdge(final ODocument iOutVertex, final ODocument iInVertex, final String iClassName) {
return createEdge(iOutVertex, iInVertex, iClassName, (Object[]) null);
}
@SuppressWarnings("unchecked")
public ODocument createEdge(final ODocument iOutVertex, final ODocument iInVertex, final String iClassName, Object... iFields) {
if (iOutVertex == null)
throw new IllegalArgumentException("iOutVertex is null");
if (iInVertex == null)
throw new IllegalArgumentException("iInVertex is null");
final OClass cls = checkEdgeClass(iClassName);
final boolean safeMode = beginBlock();
try {
final ODocument edge = new ODocument(cls).setOrdered(true);
edge.field(EDGE_FIELD_OUT, iOutVertex);
edge.field(EDGE_FIELD_IN, iInVertex);
if (iFields != null)
if (iFields.length == 1) {
Object f = iFields[0];
if (f instanceof Map<?, ?>)
edge.fields((Map<String, Object>) f);
else
throw new IllegalArgumentException(
"Invalid fields: expecting a pairs of fields as String,Object or a single Map<String,Object>, but found: " + f);
} else
// SET THE FIELDS
for (int i = 0; i < iFields.length; i += 2)
edge.field(iFields[i].toString(), iFields[i + 1]);
// OUT FIELD
updateVertexLinks(iOutVertex, edge, outV);
// IN FIELD
updateVertexLinks(iInVertex, edge, inV);
edge.setDirty();
if (safeMode) {
save(edge);
commitBlock(safeMode);
}
return edge;
} catch (RuntimeException e) {
rollbackBlock(safeMode);
throw e;
}
}
private void updateVertexLinks(ODocument iVertex, ODocument edge, String vertexField) {
acquireWriteLock(iVertex);
try {
final Object field = iVertex.field(vertexField);
final Set<OIdentifiable> links;
if (field instanceof OMVRBTreeRIDSet || field instanceof OSBTreeRIDSet) {
links = (Set<OIdentifiable>) field;
} else if (field instanceof Collection<?>) {
if (preferSBTreeSet)
links = new OSBTreeRIDSet(iVertex, (Collection<OIdentifiable>) field);
else
links = new OMVRBTreeRIDSet(iVertex, (Collection<OIdentifiable>) field);
iVertex.field(vertexField, links);
} else {
links = createRIDSet(iVertex);
iVertex.field(vertexField, links);
}
links.add(edge);
} finally {
releaseWriteLock(iVertex);
}
}
@SuppressWarnings("unchecked")
public boolean removeEdge(final OIdentifiable iEdge) {
if (iEdge == null)
return false;
final ODocument edge = iEdge.getRecord();
if (edge == null)
return false;
final boolean safeMode = beginBlock();
try {
// OUT VERTEX
final ODocument outVertex = edge.field(EDGE_FIELD_OUT);
acquireWriteLock(outVertex);
try {
if (outVertex != null) {
final Set<OIdentifiable> out = getEdgeSet(outVertex, outV);
if (out != null)
out.remove(edge);
save(outVertex);
}
} finally {
releaseWriteLock(outVertex);
}
// IN VERTEX
final ODocument inVertex = edge.field(EDGE_FIELD_IN);
acquireWriteLock(inVertex);
try {
if (inVertex != null) {
final Set<OIdentifiable> in = getEdgeSet(inVertex, inV);
if (in != null)
in.remove(edge);
save(inVertex);
}
} finally {
releaseWriteLock(inVertex);
}
delete(edge);
commitBlock(safeMode);
} catch (RuntimeException e) {
rollbackBlock(safeMode);
throw e;
}
return true;
}
public boolean removeVertex(final OIdentifiable iVertex) {
if (iVertex == null)
return false;
final ODocument vertex = (ODocument) iVertex.getRecord();
if (vertex == null)
return false;
final boolean safeMode = beginBlock();
try {
ODocument otherVertex;
Set<OIdentifiable> otherEdges;
// REMOVE OUT EDGES
acquireWriteLock(vertex);
try {
Set<OIdentifiable> edges = getEdgeSet(vertex, outV);
if (edges != null) {
for (OIdentifiable e : edges) {
if (e != null) {
final ODocument edge = e.getRecord();
if (edge != null) {
otherVertex = edge.field(EDGE_FIELD_IN);
if (otherVertex != null) {
otherEdges = getEdgeSet(otherVertex, inV);
if (otherEdges != null && otherEdges.remove(edge))
save(otherVertex);
}
delete(edge);
}
}
}
}
// REMOVE IN EDGES
edges = getEdgeSet(vertex, inV);
if (edges != null) {
for (OIdentifiable e : edges) {
if (e != null) {
if (e != null) {
final ODocument edge = e.getRecord();
otherVertex = edge.field(EDGE_FIELD_OUT);
if (otherVertex != null) {
otherEdges = getEdgeSet(otherVertex, outV);
if (otherEdges != null && otherEdges.remove(edge))
save(otherVertex);
}
delete(edge);
}
}
}
}
// DELETE VERTEX AS DOCUMENT
delete(vertex);
} finally {
releaseWriteLock(vertex);
}
commitBlock(safeMode);
return true;
} catch (RuntimeException e) {
rollbackBlock(safeMode);
throw e;
}
}
/**
* Returns all the edges between the vertexes iVertex1 and iVertex2.
*
* @param iVertex1
* First Vertex
* @param iVertex2
* Second Vertex
* @return The Set with the common Edges between the two vertexes. If edges aren't found the set is empty
*/
public Set<OIdentifiable> getEdgesBetweenVertexes(final OIdentifiable iVertex1, final OIdentifiable iVertex2) {
return getEdgesBetweenVertexes(iVertex1, iVertex2, null, null);
}
/**
* Returns all the edges between the vertexes iVertex1 and iVertex2 with label between the array of labels passed as iLabels.
*
* @param iVertex1
* First Vertex
* @param iVertex2
* Second Vertex
* @param iLabels
* Array of strings with the labels to get as filter
* @return The Set with the common Edges between the two vertexes. If edges aren't found the set is empty
*/
public Set<OIdentifiable> getEdgesBetweenVertexes(final OIdentifiable iVertex1, final OIdentifiable iVertex2,
final String[] iLabels) {
return getEdgesBetweenVertexes(iVertex1, iVertex2, iLabels, null);
}
/**
* Returns all the edges between the vertexes iVertex1 and iVertex2 with label between the array of labels passed as iLabels and
* with class between the array of class names passed as iClassNames.
*
* @param iVertex1
* First Vertex
* @param iVertex2
* Second Vertex
* @param iLabels
* Array of strings with the labels to get as filter
* @param iClassNames
* Array of strings with the name of the classes to get as filter
* @return The Set with the common Edges between the two vertexes. If edges aren't found the set is empty
*/
public Set<OIdentifiable> getEdgesBetweenVertexes(final OIdentifiable iVertex1, final OIdentifiable iVertex2,
final String[] iLabels, final String[] iClassNames) {
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
if (iVertex1 != null && iVertex2 != null) {
acquireReadLock(iVertex1);
try {
// CHECK OUT EDGES
for (OIdentifiable e : getOutEdges(iVertex1)) {
final ODocument edge = (ODocument) e.getRecord();
if (checkEdge(edge, iLabels, iClassNames)) {
final OIdentifiable in = edge.<ODocument> field("in");
if (in != null && in.equals(iVertex2))
result.add(edge);
}
}
// CHECK IN EDGES
for (OIdentifiable e : getInEdges(iVertex1)) {
final ODocument edge = (ODocument) e.getRecord();
if (checkEdge(edge, iLabels, iClassNames)) {
final OIdentifiable out = edge.<ODocument> field("out");
if (out != null && out.equals(iVertex2))
result.add(edge);
}
}
} finally {
releaseReadLock(iVertex1);
}
}
return result;
}
public Set<OIdentifiable> getOutEdges(final OIdentifiable iVertex) {
return getOutEdges(iVertex, null);
}
/**
* Retrieves the outgoing edges of vertex iVertex having label equals to iLabel.
*
* @param iVertex
* Target vertex
* @param iLabel
* Label to search
* @return
*/
public Set<OIdentifiable> getOutEdges(final OIdentifiable iVertex, final String iLabel) {
if (iVertex == null)
return null;
final ODocument vertex = iVertex.getRecord();
checkVertexClass(vertex);
Set<OIdentifiable> result = null;
acquireReadLock(iVertex);
try {
final Set<OIdentifiable> set = getEdgeSet(vertex, outV);
if (iLabel == null)
// RETURN THE ENTIRE COLLECTION
if (set != null)
return Collections.unmodifiableSet(set);
else
return Collections.emptySet();
// FILTER BY LABEL
result = new HashSet<OIdentifiable>();
if (set != null)
for (OIdentifiable item : set) {
if (iLabel == null || iLabel.equals(((ODocument) item).field(LABEL)))
result.add(item);
}
} finally {
releaseReadLock(iVertex);
}
return result;
}
@SuppressWarnings("unchecked")
protected Set<OIdentifiable> getEdgeSet(final ODocument iVertex, final String iFieldName) {
final Object value = iVertex.field(iFieldName);
if (value != null && (value instanceof OMVRBTreeRIDSet || value instanceof OSBTreeRIDSet))
return (Set<OIdentifiable>) value;
final Set<OIdentifiable> set = createRIDSet(iVertex);
if (OMultiValue.isMultiValue(value))
// AUTOCONVERT FROM COLLECTION
set.addAll((Collection<? extends OIdentifiable>) value);
else
// AUTOCONVERT FROM SINGLE VALUE
set.add((OIdentifiable) value);
return set;
}
private Set<OIdentifiable> createRIDSet(ODocument iVertex) {
if (preferSBTreeSet)
return new OSBTreeRIDSet(iVertex);
else
return new OMVRBTreeRIDSet(iVertex);
}
/**
* Retrieves the outgoing edges of vertex iVertex having the requested properties iProperties set to the passed values
*
* @param iVertex
* Target vertex
* @param iProperties
* Map where keys are property names and values the expected values
* @return
*/
public Set<OIdentifiable> getOutEdgesHavingProperties(final OIdentifiable iVertex, final Map<String, Object> iProperties) {
if (iVertex == null)
return null;
final ODocument vertex = iVertex.getRecord();
checkVertexClass(vertex);
return filterEdgesByProperties(getEdgeSet(vertex, outV), iProperties);
}
/**
* Retrieves the outgoing edges of vertex iVertex having the requested properties iProperties
*
* @param iVertex
* Target vertex
* @param iProperties
* Map where keys are property names and values the expected values
* @return
*/
public Set<OIdentifiable> getOutEdgesHavingProperties(final OIdentifiable iVertex, Iterable<String> iProperties) {
if (iVertex == null)
return null;
final ODocument vertex = iVertex.getRecord();
checkVertexClass(vertex);
return filterEdgesByProperties(getEdgeSet(vertex, outV), iProperties);
}
public Set<OIdentifiable> getInEdges(final OIdentifiable iVertex) {
return getInEdges(iVertex, null);
}
public Set<OIdentifiable> getInEdges(final OIdentifiable iVertex, final String iLabel) {
if (iVertex == null)
return null;
final ODocument vertex = iVertex.getRecord();
checkVertexClass(vertex);
Set<OIdentifiable> result = null;
acquireReadLock(iVertex);
try {
final Set<OIdentifiable> set = getEdgeSet(vertex, inV);
if (iLabel == null)
// RETURN THE ENTIRE COLLECTION
if (set != null)
return Collections.unmodifiableSet(set);
else
return Collections.emptySet();
// FILTER BY LABEL
result = new HashSet<OIdentifiable>();
if (set != null)
for (OIdentifiable item : set) {
if (iLabel == null || iLabel.equals(((ODocument) item).field(LABEL)))
result.add(item);
}
} finally {
releaseReadLock(iVertex);
}
return result;
}
/**
* Retrieves the incoming edges of vertex iVertex having the requested properties iProperties
*
* @param iVertex
* Target vertex
* @param iProperties
* Map where keys are property names and values the expected values
* @return
*/
public Set<OIdentifiable> getInEdgesHavingProperties(final OIdentifiable iVertex, Iterable<String> iProperties) {
if (iVertex == null)
return null;
final ODocument vertex = iVertex.getRecord();
checkVertexClass(vertex);
return filterEdgesByProperties(getEdgeSet(vertex, inV), iProperties);
}
/**
* Retrieves the incoming edges of vertex iVertex having the requested properties iProperties set to the passed values
*
* @param iVertex
* Target vertex
* @param iProperties
* Map where keys are property names and values the expected values
* @return
*/
public Set<OIdentifiable> getInEdgesHavingProperties(final ODocument iVertex, final Map<String, Object> iProperties) {
if (iVertex == null)
return null;
checkVertexClass(iVertex);
return filterEdgesByProperties(getEdgeSet(iVertex, inV), iProperties);
}
public ODocument getInVertex(final OIdentifiable iEdge) {
if (iEdge == null)
return null;
final ODocument e = (ODocument) iEdge.getRecord();
checkEdgeClass(e);
OIdentifiable v = e.field(EDGE_FIELD_IN);
if (v != null && v instanceof ORID) {
// REPLACE WITH THE DOCUMENT
v = v.getRecord();
final boolean wasDirty = e.isDirty();
e.field(EDGE_FIELD_IN, v);
if (!wasDirty)
e.unsetDirty();
}
return (ODocument) v;
}
public ODocument getOutVertex(final OIdentifiable iEdge) {
if (iEdge == null)
return null;
final ODocument e = (ODocument) iEdge.getRecord();
checkEdgeClass(e);
OIdentifiable v = e.field(EDGE_FIELD_OUT);
if (v != null && v instanceof ORID) {
// REPLACE WITH THE DOCUMENT
v = v.getRecord();
final boolean wasDirty = e.isDirty();
e.field(EDGE_FIELD_OUT, v);
if (!wasDirty)
e.unsetDirty();
}
return (ODocument) v;
}
public Set<OIdentifiable> filterEdgesByProperties(final Set<OIdentifiable> iEdges, final Iterable<String> iPropertyNames) {
acquireReadLock(null);
try {
if (iPropertyNames == null)
// RETURN THE ENTIRE COLLECTION
if (iEdges != null)
return Collections.unmodifiableSet(iEdges);
else
return Collections.emptySet();
// FILTER BY PROPERTY VALUES
final Set<OIdentifiable> result = new HashSet<OIdentifiable>();
if (iEdges != null)
for (OIdentifiable item : iEdges) {
final ODocument doc = (ODocument) item;
for (String propName : iPropertyNames) {
if (doc.containsField(propName))
// FOUND: ADD IT
result.add(item);
}
}
return result;
} finally {
releaseReadLock(null);
}
}
public Set<OIdentifiable> filterEdgesByProperties(final Set<OIdentifiable> iEdges, final Map<String, Object> iProperties) {
acquireReadLock(null);
try {
if (iProperties == null)
// RETURN THE ENTIRE COLLECTION
if (iEdges != null)
return Collections.unmodifiableSet(iEdges);
else
return Collections.emptySet();
// FILTER BY PROPERTY VALUES
final OMVRBTreeRIDSet result;
result = new OMVRBTreeRIDSet();
if (iEdges != null)
for (OIdentifiable item : iEdges) {
final ODocument doc = (ODocument) item;
for (Entry<String, Object> prop : iProperties.entrySet()) {
if (prop.getKey() != null && doc.containsField(prop.getKey())) {
if (prop.getValue() == null) {
if (doc.field(prop.getKey()) == null)
// BOTH NULL: ADD IT
result.add(item);
} else if (prop.getValue().equals(doc.field(prop.getKey())))
// SAME VALUE: ADD IT
result.add(item);
}
}
}
return result;
} finally {
releaseReadLock(null);
}
}
public ODocument getRoot(final String iName) {
return getDictionary().get(iName);
}
public ODocument getRoot(final String iName, final String iFetchPlan) {
return getDictionary().get(iName, iFetchPlan);
}
public OGraphDatabase setRoot(final String iName, final ODocument iNode) {
if (iNode == null)
getDictionary().remove(iName);
else
getDictionary().put(iName, iNode);
return this;
}
public OClass createVertexType(final String iClassName) {
return getMetadata().getSchema().createClass(iClassName, vertexBaseClass);
}
public OClass createVertexType(final String iClassName, final String iSuperClassName) {
return getMetadata().getSchema().createClass(iClassName, checkVertexClass(iSuperClassName));
}
public OClass createVertexType(final String iClassName, final OClass iSuperClass) {
checkVertexClass(iSuperClass);
return getMetadata().getSchema().createClass(iClassName, iSuperClass);
}
public OClass getVertexType(final String iClassName) {
return getMetadata().getSchema().getClass(iClassName);
}
public OClass createEdgeType(final String iClassName) {
return getMetadata().getSchema().createClass(iClassName, edgeBaseClass);
}
public OClass createEdgeType(final String iClassName, final String iSuperClassName) {
return getMetadata().getSchema().createClass(iClassName, checkEdgeClass(iSuperClassName));
}
public OClass createEdgeType(final String iClassName, final OClass iSuperClass) {
checkEdgeClass(iSuperClass);
return getMetadata().getSchema().createClass(iClassName, iSuperClass);
}
public OClass getEdgeType(final String iClassName) {
return getMetadata().getSchema().getClass(iClassName);
}
public boolean isSafeMode() {
return safeMode;
}
public void setSafeMode(boolean safeMode) {
this.safeMode = safeMode;
}
public OClass getVertexBaseClass() {
return vertexBaseClass;
}
public OClass getEdgeBaseClass() {
return edgeBaseClass;
}
public void checkVertexClass(final ODocument iVertex) {
// FORCE EARLY UNMARSHALLING
iVertex.deserializeFields();
if (useCustomTypes && !iVertex.getSchemaClass().isSubClassOf(vertexBaseClass))
throw new IllegalArgumentException("The document received is not a vertex. Found class '" + iVertex.getSchemaClass() + "'");
}
public OClass checkVertexClass(final String iVertexTypeName) {
if (iVertexTypeName == null || !useCustomTypes)
return getVertexBaseClass();
final OClass cls = getMetadata().getSchema().getClass(iVertexTypeName);
if (cls == null)
throw new IllegalArgumentException("The class '" + iVertexTypeName + "' was not found");
if (!cls.isSubClassOf(vertexBaseClass))
throw new IllegalArgumentException("The class '" + iVertexTypeName + "' does not extend the vertex type");
return cls;
}
public void checkVertexClass(final OClass iVertexType) {
if (useCustomTypes && iVertexType != null) {
if (!iVertexType.isSubClassOf(vertexBaseClass))
throw new IllegalArgumentException("The class '" + iVertexType + "' does not extend the vertex type");
}
}
public void checkEdgeClass(final ODocument iEdge) {
// FORCE EARLY UNMARSHALLING
iEdge.deserializeFields();
if (useCustomTypes && !iEdge.getSchemaClass().isSubClassOf(edgeBaseClass))
throw new IllegalArgumentException("The document received is not an edge. Found class '" + iEdge.getSchemaClass() + "'");
}
public OClass checkEdgeClass(final String iEdgeTypeName) {
if (iEdgeTypeName == null || !useCustomTypes)
return getEdgeBaseClass();
final OClass cls = getMetadata().getSchema().getClass(iEdgeTypeName);
if (cls == null)
throw new IllegalArgumentException("The class '" + iEdgeTypeName + "' was not found");
if (!cls.isSubClassOf(edgeBaseClass))
throw new IllegalArgumentException("The class '" + iEdgeTypeName + "' does not extend the edge type");
return cls;
}
public void checkEdgeClass(final OClass iEdgeType) {
if (useCustomTypes && iEdgeType != null) {
if (!iEdgeType.isSubClassOf(edgeBaseClass))
throw new IllegalArgumentException("The class '" + iEdgeType + "' does not extend the edge type");
}
}
public boolean isUseCustomTypes() {
return useCustomTypes;
}
public void setUseCustomTypes(boolean useCustomTypes) {
this.useCustomTypes = useCustomTypes;
}
/**
* Returns true if the document is a vertex (its class is OGraphVertex or any subclasses)
*
* @param iRecord
* Document to analyze.
* @return true if the document is a vertex (its class is OGraphVertex or any subclasses)
*/
public boolean isVertex(final ODocument iRecord) {
return iRecord != null ? iRecord.getSchemaClass().isSubClassOf(vertexBaseClass) : false;
}
/**
* Returns true if the document is an edge (its class is OGraphEdge or any subclasses)
*
* @param iRecord
* Document to analyze.
* @return true if the document is a edge (its class is OGraphEdge or any subclasses)
*/
public boolean isEdge(final ODocument iRecord) {
return iRecord != null ? iRecord.getSchemaClass().isSubClassOf(edgeBaseClass) : false;
}
/**
* Locks the record in exclusive mode to avoid concurrent access.
*
* @param iRecord
* Record to lock
* @return The current instance as fluent interface to allow calls in chain.
*/
public OGraphDatabase acquireWriteLock(final OIdentifiable iRecord) {
switch (lockMode) {
case DATABASE_LEVEL_LOCKING:
((OStorage) getStorage()).getLock().acquireExclusiveLock();
break;
case RECORD_LEVEL_LOCKING:
((OStorageEmbedded) getStorage()).acquireWriteLock(iRecord.getIdentity());
break;
case NO_LOCKING:
break;
}
return this;
}
/**
* Releases the exclusive lock against a record previously acquired by current thread.
*
* @param iRecord
* Record to unlock
* @return The current instance as fluent interface to allow calls in chain.
*/
public OGraphDatabase releaseWriteLock(final OIdentifiable iRecord) {
switch (lockMode) {
case DATABASE_LEVEL_LOCKING:
((OStorage) getStorage()).getLock().releaseExclusiveLock();
break;
case RECORD_LEVEL_LOCKING:
((OStorageEmbedded) getStorage()).releaseWriteLock(iRecord.getIdentity());
break;
case NO_LOCKING:
break;
}
return this;
}
/**
* Locks the record in shared mode to avoid concurrent writes.
*
* @param iRecord
* Record to lock
* @return The current instance as fluent interface to allow calls in chain.
*/
public OGraphDatabase acquireReadLock(final OIdentifiable iRecord) {
switch (lockMode) {
case DATABASE_LEVEL_LOCKING:
((OStorage) getStorage()).getLock().acquireSharedLock();
break;
case RECORD_LEVEL_LOCKING:
((OStorageEmbedded) getStorage()).acquireReadLock(iRecord.getIdentity());
break;
case NO_LOCKING:
break;
}
return this;
}
/**
* Releases the shared lock against a record previously acquired by current thread.
*
* @param iRecord
* Record to unlock
* @return The current instance as fluent interface to allow calls in chain.
*/
public OGraphDatabase releaseReadLock(final OIdentifiable iRecord) {
switch (lockMode) {
case DATABASE_LEVEL_LOCKING:
((OStorage) getStorage()).getLock().releaseSharedLock();
break;
case RECORD_LEVEL_LOCKING:
((OStorageEmbedded) getStorage()).releaseReadLock(iRecord.getIdentity());
break;
case NO_LOCKING:
break;
}
return this;
}
@Override
public String getType() {
return TYPE;
}
public void checkForGraphSchema() {
getMetadata().getSchema().getOrCreateClass(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME);
vertexBaseClass = getMetadata().getSchema().getClass(VERTEX_ALIAS);
edgeBaseClass = getMetadata().getSchema().getClass(EDGE_ALIAS);
if (vertexBaseClass == null) {
// CREATE THE META MODEL USING THE ORIENT SCHEMA
vertexBaseClass = getMetadata().getSchema().createClass(VERTEX_ALIAS);
vertexBaseClass.setOverSize(2);
}
if (edgeBaseClass == null) {
edgeBaseClass = getMetadata().getSchema().createClass(EDGE_ALIAS);
edgeBaseClass.setShortName(EDGE_ALIAS);
}
}
protected boolean beginBlock() {
if (safeMode && !(getTransaction() instanceof OTransactionNoTx)) {
begin();
return true;
}
return false;
}
protected void commitBlock(final boolean iOpenTxInSafeMode) {
if (iOpenTxInSafeMode)
commit();
}
protected void rollbackBlock(final boolean iOpenTxInSafeMode) {
if (iOpenTxInSafeMode)
rollback();
}
protected boolean checkEdge(final ODocument iEdge, final String[] iLabels, final String[] iClassNames) {
boolean good = true;
if (iClassNames != null) {
// CHECK AGAINST CLASS NAMES
good = false;
for (String c : iClassNames) {
if (c.equals(iEdge.getClassName())) {
good = true;
break;
}
}
}
if (good && iLabels != null) {
// CHECK AGAINST LABELS
good = false;
for (String c : iLabels) {
if (c.equals(iEdge.field(LABEL))) {
good = true;
break;
}
}
}
return good;
}
public LOCK_MODE getLockMode() {
return lockMode;
}
public void setLockMode(final LOCK_MODE lockMode) {
if (lockMode == LOCK_MODE.RECORD_LEVEL_LOCKING && !(getStorage() instanceof OStorageEmbedded))
// NOT YET SUPPORETD REMOTE LOCKING
throw new IllegalArgumentException("Record leve locking is not supported for remote connections");
this.lockMode = lockMode;
}
public boolean isRetroCompatibility() {
return retroCompatibility;
}
public void setRetroCompatibility(final boolean retroCompatibility) {
this.retroCompatibility = retroCompatibility;
if (retroCompatibility) {
inV = VERTEX_FIELD_IN_OLD;
outV = VERTEX_FIELD_OUT_OLD;
} else {
inV = VERTEX_FIELD_IN;
outV = VERTEX_FIELD_OUT;
}
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_graph_OGraphDatabase.java
|
1,809 |
public class MapContainer {
private volatile MapConfig mapConfig;
private final String name;
private final RecordFactory recordFactory;
private final MapService mapService;
private MapStoreWrapper storeWrapper;
private final List<MapInterceptor> interceptors;
private final Map<String, MapInterceptor> interceptorMap;
private final IndexService indexService = new IndexService();
private final boolean nearCacheEnabled;
private final ReachabilityHandlerChain reachabilityHandlerChain;
private final SizeEstimator nearCacheSizeEstimator;
private final Map<Data, Object> initialKeys = new ConcurrentHashMap<Data, Object>();
private final PartitioningStrategy partitioningStrategy;
private WriteBehindManager writeBehindQueueManager;
private WanReplicationPublisher wanReplicationPublisher;
private MapMergePolicy wanMergePolicy;
private final boolean evictionEnabled;
public MapContainer(final String name, final MapConfig mapConfig, final MapService mapService) {
this.name = name;
this.mapConfig = mapConfig;
this.mapService = mapService;
this.partitioningStrategy = createPartitioningStrategy();
this.reachabilityHandlerChain = ReachabilityHandlers.newHandlerChain(MapContainer.this);
final NodeEngine nodeEngine = mapService.getNodeEngine();
recordFactory = createRecordFactory(nodeEngine);
initMapStoreOperations(nodeEngine);
initWanReplication(nodeEngine);
interceptors = new CopyOnWriteArrayList<MapInterceptor>();
interceptorMap = new ConcurrentHashMap<String, MapInterceptor>();
nearCacheEnabled = mapConfig.getNearCacheConfig() != null;
nearCacheSizeEstimator = SizeEstimators.createNearCacheSizeEstimator();
evictionEnabled = !MapConfig.EvictionPolicy.NONE.equals(mapConfig.getEvictionPolicy());
mapService.getNodeEngine().getExecutionService()
.scheduleAtFixedRate(new ClearExpiredRecordsTask(), 5, 5, TimeUnit.SECONDS);
}
/**
* Periodically clears expired entries.(ttl & idle)
*/
private class ClearExpiredRecordsTask implements Runnable {
public void run() {
final MapService mapService = MapContainer.this.mapService;
final NodeEngine nodeEngine = mapService.getNodeEngine();
for (int partitionId = 0; partitionId < nodeEngine.getPartitionService().getPartitionCount(); partitionId++) {
InternalPartition partition = nodeEngine.getPartitionService().getPartition(partitionId);
if (partition.isOwnerOrBackup(nodeEngine.getThisAddress())) {
// check if record store has already been initialized or not.
final PartitionContainer partitionContainer = mapService.getPartitionContainer(partitionId);
if (isUninitializedRecordStore(partitionContainer)) {
continue;
}
final Operation expirationOperation = createExpirationOperation(partitionId);
OperationService operationService = mapService.getNodeEngine().getOperationService();
operationService.executeOperation(expirationOperation);
}
}
}
private boolean isUninitializedRecordStore(PartitionContainer partitionContainer) {
final RecordStore recordStore = partitionContainer.getExistingRecordStore(name);
return recordStore == null;
}
}
private Operation createExpirationOperation(int partitionId) {
final MapService mapService = this.mapService;
final ClearExpiredOperation clearExpiredOperation = new ClearExpiredOperation(name);
clearExpiredOperation
.setNodeEngine(mapService.getNodeEngine())
.setCallerUuid(mapService.getNodeEngine().getLocalMember().getUuid())
.setPartitionId(partitionId)
.setValidateTarget(false)
.setService(mapService);
return clearExpiredOperation;
}
public boolean isEvictionEnabled() {
return evictionEnabled;
}
private void initMapStoreOperations(NodeEngine nodeEngine) {
if (!isMapStoreEnabled()) {
this.writeBehindQueueManager = WriteBehindManagers.emptyWriteBehindManager();
return;
}
storeWrapper = createMapStoreWrapper(mapConfig.getMapStoreConfig(), nodeEngine);
if (storeWrapper != null) {
initMapStore(storeWrapper.getImpl(), mapConfig.getMapStoreConfig(), nodeEngine);
}
if (!isWriteBehindMapStoreEnabled()) {
this.writeBehindQueueManager = WriteBehindManagers.emptyWriteBehindManager();
return;
}
initWriteBehindMapStore();
}
private void initWriteBehindMapStore() {
if (!isWriteBehindMapStoreEnabled()) {
return;
}
this.writeBehindQueueManager
= WriteBehindManagers.createWriteBehindManager(name, mapService, storeWrapper);
// listener for delete operations.
this.writeBehindQueueManager.addStoreListener(new StoreListener<DelayedEntry>() {
@Override
public void beforeStore(StoreEvent<DelayedEntry> storeEvent) {
}
@Override
public void afterStore(StoreEvent<DelayedEntry> storeEvent) {
final DelayedEntry delayedEntry = storeEvent.getSource();
final Object value = delayedEntry.getValue();
// only process store delete operations.
if (value != null) {
return;
}
final Data key = (Data) storeEvent.getSource().getKey();
final int partitionId = mapService.getNodeEngine().getPartitionService().getPartitionId(key);
final PartitionContainer partitionContainer = mapService.getPartitionContainer(partitionId);
final RecordStore recordStore = partitionContainer.getExistingRecordStore(name);
if (recordStore != null) {
recordStore.removeFromWriteBehindWaitingDeletions(key);
}
}
});
this.writeBehindQueueManager.start();
}
private RecordFactory createRecordFactory(NodeEngine nodeEngine) {
RecordFactory recordFactory;
switch (mapConfig.getInMemoryFormat()) {
case BINARY:
recordFactory = new DataRecordFactory(mapConfig, nodeEngine.getSerializationService(), partitioningStrategy);
break;
case OBJECT:
recordFactory = new ObjectRecordFactory(mapConfig, nodeEngine.getSerializationService());
break;
case OFFHEAP:
recordFactory = new OffHeapRecordFactory(mapConfig, nodeEngine.getOffHeapStorage(),
nodeEngine.getSerializationService(), partitioningStrategy);
break;
default:
throw new IllegalArgumentException("Invalid storage format: " + mapConfig.getInMemoryFormat());
}
return recordFactory;
}
public boolean isMapStoreEnabled() {
final MapStoreConfig mapStoreConfig = mapConfig.getMapStoreConfig();
if (mapStoreConfig == null || !mapStoreConfig.isEnabled()) {
return false;
}
return true;
}
private MapStoreWrapper createMapStoreWrapper(MapStoreConfig mapStoreConfig, NodeEngine nodeEngine) {
Object store;
MapStoreWrapper storeWrapper;
try {
MapStoreFactory factory = (MapStoreFactory) mapStoreConfig.getFactoryImplementation();
if (factory == null) {
String factoryClassName = mapStoreConfig.getFactoryClassName();
if (factoryClassName != null && !"".equals(factoryClassName)) {
factory = ClassLoaderUtil.newInstance(nodeEngine.getConfigClassLoader(), factoryClassName);
}
}
store = (factory == null ? mapStoreConfig.getImplementation()
: factory.newMapStore(name, mapStoreConfig.getProperties()));
if (store == null) {
String mapStoreClassName = mapStoreConfig.getClassName();
store = ClassLoaderUtil.newInstance(nodeEngine.getConfigClassLoader(), mapStoreClassName);
}
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
storeWrapper = new MapStoreWrapper(store, name, mapStoreConfig.isEnabled());
return storeWrapper;
}
private void initMapStore(Object store, MapStoreConfig mapStoreConfig, NodeEngine nodeEngine) {
if (store instanceof MapLoaderLifecycleSupport) {
((MapLoaderLifecycleSupport) store).init(nodeEngine.getHazelcastInstance(),
mapStoreConfig.getProperties(), name);
}
loadInitialKeys();
}
public void initWanReplication(NodeEngine nodeEngine) {
WanReplicationRef wanReplicationRef = mapConfig.getWanReplicationRef();
if (wanReplicationRef == null) {
return;
}
String wanReplicationRefName = wanReplicationRef.getName();
WanReplicationService wanReplicationService = nodeEngine.getWanReplicationService();
wanReplicationPublisher = wanReplicationService.getWanReplicationPublisher(wanReplicationRefName);
wanMergePolicy = mapService.getMergePolicy(wanReplicationRef.getMergePolicy());
}
private PartitioningStrategy createPartitioningStrategy() {
PartitioningStrategy strategy = null;
PartitioningStrategyConfig partitioningStrategyConfig = mapConfig.getPartitioningStrategyConfig();
if (partitioningStrategyConfig != null) {
strategy = partitioningStrategyConfig.getPartitioningStrategy();
if (strategy == null && partitioningStrategyConfig.getPartitioningStrategyClass() != null) {
try {
strategy = ClassLoaderUtil.newInstance(mapService.getNodeEngine().getConfigClassLoader(),
partitioningStrategyConfig.getPartitioningStrategyClass());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
}
return strategy;
}
public void loadInitialKeys() {
initialKeys.clear();
Set keys = storeWrapper.loadAllKeys();
if (keys == null || keys.isEmpty()) {
return;
}
for (Object key : keys) {
Data dataKey = mapService.toData(key, partitioningStrategy);
initialKeys.put(dataKey, key);
}
// remove the keys remains more than 20 minutes.
mapService.getNodeEngine().getExecutionService().schedule(new Runnable() {
@Override
public void run() {
initialKeys.clear();
}
}, 20, TimeUnit.MINUTES);
}
public void shutDownMapStoreScheduledExecutor() {
writeBehindQueueManager.stop();
}
public Map<Data, Object> getInitialKeys() {
return initialKeys;
}
public IndexService getIndexService() {
return indexService;
}
public WanReplicationPublisher getWanReplicationPublisher() {
return wanReplicationPublisher;
}
public MapMergePolicy getWanMergePolicy() {
return wanMergePolicy;
}
public boolean isWriteBehindMapStoreEnabled() {
final MapStoreConfig mapStoreConfig = this.getMapConfig().getMapStoreConfig();
return mapStoreConfig != null && mapStoreConfig.isEnabled()
&& mapStoreConfig.getWriteDelaySeconds() > 0;
}
public String addInterceptor(MapInterceptor interceptor) {
String id = UuidUtil.buildRandomUuidString();
interceptorMap.put(id, interceptor);
interceptors.add(interceptor);
return id;
}
public void addInterceptor(String id, MapInterceptor interceptor) {
interceptorMap.put(id, interceptor);
interceptors.add(interceptor);
}
public List<MapInterceptor> getInterceptors() {
return interceptors;
}
public Map<String, MapInterceptor> getInterceptorMap() {
return interceptorMap;
}
public void removeInterceptor(String id) {
MapInterceptor interceptor = interceptorMap.remove(id);
interceptors.remove(interceptor);
}
public String getName() {
return name;
}
public boolean isNearCacheEnabled() {
return nearCacheEnabled;
}
public int getTotalBackupCount() {
return getBackupCount() + getAsyncBackupCount();
}
public int getBackupCount() {
return mapConfig.getBackupCount();
}
public long getWriteDelayMillis() {
return TimeUnit.SECONDS.toMillis(mapConfig.getMapStoreConfig().getWriteDelaySeconds());
}
public int getAsyncBackupCount() {
return mapConfig.getAsyncBackupCount();
}
public void setMapConfig(MapConfig mapConfig) {
this.mapConfig = mapConfig;
}
public MapConfig getMapConfig() {
return mapConfig;
}
public MapStoreWrapper getStore() {
return storeWrapper != null && storeWrapper.isEnabled() ? storeWrapper : null;
}
public PartitioningStrategy getPartitioningStrategy() {
return partitioningStrategy;
}
public SizeEstimator getNearCacheSizeEstimator() {
return nearCacheSizeEstimator;
}
public RecordFactory getRecordFactory() {
return recordFactory;
}
public MapService getMapService() {
return mapService;
}
public ReachabilityHandlerChain getReachabilityHandlerChain() {
return reachabilityHandlerChain;
}
public WriteBehindManager getWriteBehindManager() {
return writeBehindQueueManager;
}
public MapStoreWrapper getStoreWrapper() {
return storeWrapper;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_MapContainer.java
|
2,092 |
public class PutIfAbsentOperation extends BasePutOperation {
private boolean successful;
public PutIfAbsentOperation(String name, Data dataKey, Data value, long ttl) {
super(name, dataKey, value, ttl);
}
public PutIfAbsentOperation() {
}
public void run() {
dataOldValue = mapService.toData(recordStore.putIfAbsent(dataKey, dataValue, ttl));
successful = dataOldValue == null;
}
public void afterRun() {
if (successful)
super.afterRun();
}
@Override
public Object getResponse() {
return dataOldValue;
}
public boolean shouldBackup() {
return successful;
}
@Override
public String toString() {
return "PutIfAbsentOperation{" + name + "}";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_PutIfAbsentOperation.java
|
5,826 |
public class PostingsHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-postings";
@Override
public String[] names() {
return new String[]{"postings", "postings-highlighter"};
}
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
FieldMapper<?> fieldMapper = highlighterContext.mapper;
SearchContextHighlight.Field field = highlighterContext.field;
if (fieldMapper.fieldType().indexOptions() != FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
throw new ElasticsearchIllegalArgumentException("the field [" + field.field() + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter");
}
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
//get the non rewritten query and rewrite it
Query query;
try {
query = rewrite(highlighterContext, hitContext.topLevelReader());
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
SortedSet<Term> queryTerms = extractTerms(query);
hitContext.cache().put(CACHE_KEY, new HighlighterEntry(queryTerms));
}
HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
if (mapperHighlighterEntry == null) {
Encoder encoder = field.encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.preTags()[0], field.postTags()[0], encoder);
BytesRef[] filteredQueryTerms = filterTerms(highlighterEntry.queryTerms, fieldMapper.names().indexName(), field.requireFieldMatch());
mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter, filteredQueryTerms);
}
//we merge back multiple values into a single value using the paragraph separator, unless we have to highlight every single value separately (number_of_fragments=0).
boolean mergeValues = field.numberOfFragments() != 0;
List<Snippet> snippets = new ArrayList<Snippet>();
int numberOfFragments;
try {
//we manually load the field values (from source if needed)
List<Object> textsToHighlight = HighlightUtils.loadFieldValues(fieldMapper, context, hitContext, field.forceSource());
CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(mapperHighlighterEntry.passageFormatter, textsToHighlight, mergeValues, Integer.MAX_VALUE-1, field.noMatchSize());
if (field.numberOfFragments() == 0) {
highlighter.setBreakIterator(new WholeBreakIterator());
numberOfFragments = 1; //1 per value since we highlight per value
} else {
numberOfFragments = field.numberOfFragments();
}
//we highlight every value separately calling the highlight method multiple times, only if we need to have back a snippet per value (whole value)
int values = mergeValues ? 1 : textsToHighlight.size();
for (int i = 0; i < values; i++) {
Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.searcher(), hitContext.docId(), numberOfFragments);
if (fieldSnippets != null) {
for (Snippet fieldSnippet : fieldSnippets) {
if (Strings.hasText(fieldSnippet.getText())) {
snippets.add(fieldSnippet);
}
}
}
}
} catch(IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
snippets = filterSnippets(snippets, field.numberOfFragments());
if (field.scoreOrdered()) {
//let's sort the snippets by score if needed
CollectionUtil.introSort(snippets, new Comparator<Snippet>() {
public int compare(Snippet o1, Snippet o2) {
return (int) Math.signum(o2.getScore() - o1.getScore());
}
});
}
String[] fragments = new String[snippets.size()];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = snippets.get(i).getText();
}
if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
}
return null;
}
private static Query rewrite(HighlighterContext highlighterContext, IndexReader reader) throws IOException {
//rewrite is expensive: if the query was already rewritten we try not to rewrite
boolean mustRewrite = !highlighterContext.query.queryRewritten();
Query original = highlighterContext.query.originalQuery();
MultiTermQuery originalMultiTermQuery = null;
MultiTermQuery.RewriteMethod originalRewriteMethod = null;
if (original instanceof MultiTermQuery) {
originalMultiTermQuery = (MultiTermQuery) original;
if (!allowsForTermExtraction(originalMultiTermQuery.getRewriteMethod())) {
originalRewriteMethod = originalMultiTermQuery.getRewriteMethod();
originalMultiTermQuery.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
//we need to rewrite anyway if it is a multi term query which was rewritten with the wrong rewrite method
mustRewrite = true;
}
}
if (!mustRewrite) {
//return the rewritten query
return highlighterContext.query.query();
}
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
rewrittenQuery = query.rewrite(reader)) {
query = rewrittenQuery;
}
if (originalMultiTermQuery != null) {
if (originalRewriteMethod != null) {
//set back the original rewrite method after the rewrite is done
originalMultiTermQuery.setRewriteMethod(originalRewriteMethod);
}
}
return query;
}
private static boolean allowsForTermExtraction(MultiTermQuery.RewriteMethod rewriteMethod) {
return rewriteMethod instanceof TopTermsRewrite || rewriteMethod instanceof ScoringRewrite;
}
private static SortedSet<Term> extractTerms(Query query) {
SortedSet<Term> queryTerms = new TreeSet<Term>();
query.extractTerms(queryTerms);
return queryTerms;
}
private static BytesRef[] filterTerms(SortedSet<Term> queryTerms, String field, boolean requireFieldMatch) {
SortedSet<Term> fieldTerms;
if (requireFieldMatch) {
Term floor = new Term(field, "");
Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
fieldTerms = queryTerms.subSet(floor, ceiling);
} else {
fieldTerms = queryTerms;
}
BytesRef terms[] = new BytesRef[fieldTerms.size()];
int termUpto = 0;
for(Term term : fieldTerms) {
terms[termUpto++] = term.bytes();
}
return terms;
}
private static List<Snippet> filterSnippets(List<Snippet> snippets, int numberOfFragments) {
//We need to filter the snippets as due to no_match_size we could have
//either highlighted snippets together non highlighted ones
//We don't want to mix those up
List<Snippet> filteredSnippets = new ArrayList<Snippet>(snippets.size());
for (Snippet snippet : snippets) {
if (snippet.isHighlighted()) {
filteredSnippets.add(snippet);
}
}
//if there's at least one highlighted snippet, we return all the highlighted ones
//otherwise we return the first non highlighted one if available
if (filteredSnippets.size() == 0) {
if (snippets.size() > 0) {
Snippet snippet = snippets.get(0);
//if we did discrete per value highlighting using whole break iterator (as number_of_fragments was 0)
//we need to obtain the first sentence of the first value
if (numberOfFragments == 0) {
BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT);
String text = snippet.getText();
bi.setText(text);
int next = bi.next();
if (next != BreakIterator.DONE) {
String newText = text.substring(0, next).trim();
snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted());
}
}
filteredSnippets.add(snippet);
}
}
return filteredSnippets;
}
private static class HighlighterEntry {
final SortedSet<Term> queryTerms;
Map<FieldMapper<?>, MapperHighlighterEntry> mappers = Maps.newHashMap();
private HighlighterEntry(SortedSet<Term> queryTerms) {
this.queryTerms = queryTerms;
}
}
private static class MapperHighlighterEntry {
final CustomPassageFormatter passageFormatter;
final BytesRef[] filteredQueryTerms;
private MapperHighlighterEntry(CustomPassageFormatter passageFormatter, BytesRef[] filteredQueryTerms) {
this.passageFormatter = passageFormatter;
this.filteredQueryTerms = filteredQueryTerms;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_PostingsHighlighter.java
|
147 |
public interface StructuredContentRule extends SimpleRule {
/**
* Gets the primary key.
*
* @return the primary key
*/
@Nullable
public Long getId();
/**
* Sets the primary key.
*
* @param id the new primary key
*/
public void setId(@Nullable Long id);
/**
* Builds a copy of this content rule. Used by the content management system when an
* item is edited.
*
* @return a copy of this rule
*/
@Nonnull
public StructuredContentRule cloneEntity();
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentRule.java
|
364 |
public static class TestKeyPredicate
implements KeyPredicate<Integer> {
@Override
public boolean evaluate(Integer key) {
return key == 50;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
178 |
@SuppressWarnings("unchecked")
public class OArrays {
public static <T> T[] copyOf(final T[] iSource, final int iNewSize) {
return (T[]) copyOf(iSource, iNewSize, iSource.getClass());
}
public static <T, U> T[] copyOf(final U[] iSource, final int iNewSize, final Class<? extends T[]> iNewType) {
final T[] copy = ((Object) iNewType == (Object) Object[].class) ? (T[]) new Object[iNewSize] : (T[]) Array.newInstance(
iNewType.getComponentType(), iNewSize);
System.arraycopy(iSource, 0, copy, 0, Math.min(iSource.length, iNewSize));
return copy;
}
public static <S> S[] copyOfRange(final S[] iSource, final int iBegin, final int iEnd) {
return copyOfRange(iSource, iBegin, iEnd, (Class<S[]>) iSource.getClass());
}
public static <D, S> D[] copyOfRange(final S[] iSource, final int iBegin, final int iEnd, final Class<? extends D[]> iClass) {
final int newLength = iEnd - iBegin;
if (newLength < 0)
throw new IllegalArgumentException(iBegin + " > " + iEnd);
final D[] copy = ((Object) iClass == (Object) Object[].class) ? (D[]) new Object[newLength] : (D[]) Array.newInstance(
iClass.getComponentType(), newLength);
System.arraycopy(iSource, iBegin, copy, 0, Math.min(iSource.length - iBegin, newLength));
return copy;
}
public static byte[] copyOfRange(final byte[] iSource, final int iBegin, final int iEnd) {
final int newLength = iEnd - iBegin;
if (newLength < 0)
throw new IllegalArgumentException(iBegin + " > " + iEnd);
try {
final byte[] copy = new byte[newLength];
System.arraycopy(iSource, iBegin, copy, 0, Math.min(iSource.length - iBegin, newLength));
return copy;
} catch (OutOfMemoryError e) {
OLogManager.instance().error(null, "Error on copying buffer of size %d bytes", e, newLength);
throw e;
}
}
public static int[] copyOf(final int[] iSource, final int iNewSize) {
final int[] copy = new int[iNewSize];
System.arraycopy(iSource, 0, copy, 0, Math.min(iSource.length, iNewSize));
return copy;
}
/**
* Returns true if an arrays contains a value, otherwise false
*/
public static boolean contains(final int[] iArray, final int iToFind) {
if (iArray == null || iArray.length == 0)
return false;
for (int e : iArray)
if (e == iToFind)
return true;
return false;
}
/**
* Returns true if an arrays contains a value, otherwise false
*/
public static <T> boolean contains(final T[] iArray, final T iToFind) {
if (iArray == null || iArray.length == 0)
return false;
for (T e : iArray)
if (e != null && e.equals(iToFind))
return true;
return false;
}
public static int hash(final Object[] iArray) {
int hash = 0;
for (Object o : iArray) {
if (o != null)
hash += o.hashCode();
}
return hash;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_util_OArrays.java
|
670 |
constructors[COLLECTION_TXN_REMOVE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionTxnRemoveOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
491 |
class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
private boolean filterCache = false;
private boolean fieldDataCache = false;
private boolean idCache = false;
private boolean recycler;
private String[] fields = null;
private String[] filterKeys = null;
ShardClearIndicesCacheRequest() {
}
public ShardClearIndicesCacheRequest(String index, int shardId, ClearIndicesCacheRequest request) {
super(index, shardId, request);
filterCache = request.filterCache();
fieldDataCache = request.fieldDataCache();
idCache = request.idCache();
fields = request.fields();
filterKeys = request.filterKeys();
recycler = request.recycler();
}
public boolean filterCache() {
return filterCache;
}
public boolean fieldDataCache() {
return this.fieldDataCache;
}
public boolean idCache() {
return this.idCache;
}
public boolean recycler() {
return this.recycler;
}
public String[] fields() {
return this.fields;
}
public String[] filterKeys() {
return this.filterKeys;
}
public ShardClearIndicesCacheRequest waitForOperations(boolean waitForOperations) {
this.filterCache = waitForOperations;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
filterCache = in.readBoolean();
fieldDataCache = in.readBoolean();
idCache = in.readBoolean();
recycler = in.readBoolean();
fields = in.readStringArray();
filterKeys = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(filterCache);
out.writeBoolean(fieldDataCache);
out.writeBoolean(idCache);
out.writeBoolean(recycler);
out.writeStringArrayNullable(fields);
out.writeStringArrayNullable(filterKeys);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_cache_clear_ShardClearIndicesCacheRequest.java
|
384 |
public class OMultiValueChangeEvent<K, V> {
/**
* Operation that is performed on collection.
*/
public static enum OChangeType {
ADD, UPDATE, REMOVE
}
/**
* Operation that is performed on collection.
*/
private final OChangeType changeType;
/**
* Value that indicates position of item inside collection.
*/
private final K key;
/**
* New item value.
*/
private V value;
/**
* Previous item value.
*/
private V oldValue;
public OMultiValueChangeEvent(final OChangeType changeType, final K key, final V value) {
this.changeType = changeType;
this.key = key;
this.value = value;
}
public OMultiValueChangeEvent(final OChangeType changeType, final K key, final V value, final V oldValue) {
this.changeType = changeType;
this.key = key;
this.value = value;
this.oldValue = oldValue;
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
public OChangeType getChangeType() {
return changeType;
}
public V getOldValue() {
return oldValue;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final OMultiValueChangeEvent<?, ?> that = (OMultiValueChangeEvent<?, ?>) o;
if (changeType != that.changeType) {
return false;
}
if (!key.equals(that.key)) {
return false;
}
if (oldValue != null ? !oldValue.equals(that.oldValue) : that.oldValue != null) {
return false;
}
if (value != null ? !value.equals(that.value) : that.value != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = changeType.hashCode();
result = 31 * result + key.hashCode();
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + (oldValue != null ? oldValue.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "OMultiValueChangeEvent{" + "changeType=" + changeType + ", key=" + key + ", value=" + value + ", oldValue=" + oldValue
+ '}';
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_OMultiValueChangeEvent.java
|
270 |
public class ElasticsearchNullPointerException extends ElasticsearchException {
public ElasticsearchNullPointerException() {
super(null);
}
public ElasticsearchNullPointerException(String msg) {
super(msg);
}
public ElasticsearchNullPointerException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 0true
|
src_main_java_org_elasticsearch_ElasticsearchNullPointerException.java
|
5,244 |
public class NestedAggregator extends SingleBucketAggregator implements ReaderContextAware {
private final Filter parentFilter;
private final Filter childFilter;
private Bits childDocs;
private FixedBitSet parentDocs;
public NestedAggregator(String name, AggregatorFactories factories, String nestedPath, AggregationContext aggregationContext, Aggregator parent) {
super(name, factories, aggregationContext, parent);
MapperService.SmartNameObjectMapper mapper = aggregationContext.searchContext().smartNameObjectMapper(nestedPath);
if (mapper == null) {
throw new AggregationExecutionException("facet nested path [" + nestedPath + "] not found");
}
ObjectMapper objectMapper = mapper.mapper();
if (objectMapper == null) {
throw new AggregationExecutionException("facet nested path [" + nestedPath + "] not found");
}
if (!objectMapper.nested().isNested()) {
throw new AggregationExecutionException("facet nested path [" + nestedPath + "] is not nested");
}
parentFilter = aggregationContext.searchContext().filterCache().cache(NonNestedDocsFilter.INSTANCE);
childFilter = aggregationContext.searchContext().filterCache().cache(objectMapper.nestedTypeFilter());
}
@Override
public void setNextReader(AtomicReaderContext reader) {
try {
DocIdSet docIdSet = parentFilter.getDocIdSet(reader, null);
// In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here.
childDocs = DocIdSets.toSafeBits(reader.reader(), childFilter.getDocIdSet(reader, null));
if (DocIdSets.isEmpty(docIdSet)) {
parentDocs = null;
} else {
parentDocs = (FixedBitSet) docIdSet;
}
} catch (IOException ioe) {
throw new AggregationExecutionException("Failed to aggregate [" + name + "]", ioe);
}
}
@Override
public void collect(int parentDoc, long bucketOrd) throws IOException {
// here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them
// so they'll be collected
if (parentDoc == 0 || parentDocs == null) {
return;
}
int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
int numChildren = 0;
for (int i = (parentDoc - 1); i > prevParentDoc; i--) {
if (childDocs.get(i)) {
++numChildren;
collectBucketNoCounts(i, bucketOrd);
}
}
incrementBucketDocCount(numChildren, bucketOrd);
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal));
}
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalNested(name, 0, buildEmptySubAggregations());
}
public static class Factory extends AggregatorFactory {
private final String path;
public Factory(String name, String path) {
super(name, InternalNested.TYPE.name());
this.path = path;
}
@Override
public Aggregator create(AggregationContext context, Aggregator parent, long expectedBucketsCount) {
return new NestedAggregator(name, factories, path, context, parent);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_nested_NestedAggregator.java
|
361 |
static class NodeStatsRequest extends NodeOperationRequest {
NodesStatsRequest request;
NodeStatsRequest() {
}
NodeStatsRequest(String nodeId, NodesStatsRequest request) {
super(request, nodeId);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new NodesStatsRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_stats_TransportNodesStatsAction.java
|
27 |
public class ClusterMembers
{
public static final Predicate<ClusterMember> ALIVE = new Predicate<ClusterMember>()
{
@Override
public boolean accept( ClusterMember item )
{
return item.isAlive();
}
};
private final InstanceId me;
public static Predicate<ClusterMember> inRole( final String role )
{
return new Predicate<ClusterMember>()
{
@Override
public boolean accept( ClusterMember item )
{
return item.hasRole( role );
}
};
}
private final Map<InstanceId, ClusterMember> members = new CopyOnWriteHashMap<InstanceId, ClusterMember>();
public ClusterMembers( Cluster cluster, Heartbeat heartbeat, ClusterMemberEvents clusterMemberEvents,
InstanceId me )
{
this.me = me;
cluster.addClusterListener( new HAMClusterListener() );
heartbeat.addHeartbeatListener( new HAMHeartbeatListener() );
clusterMemberEvents.addClusterMemberListener( new HAMClusterMemberListener() );
}
public Iterable<ClusterMember> getMembers()
{
return members.values();
}
public ClusterMember getSelf()
{
for ( ClusterMember clusterMember : getMembers() )
{
if ( clusterMember.getMemberId().equals( me ) )
{
return clusterMember;
}
}
return null;
}
private ClusterMember getMember( InstanceId server )
{
ClusterMember clusterMember = members.get( server );
if ( clusterMember == null )
throw new IllegalStateException( "Member " + server + " not found in " + new HashMap(members) );
return clusterMember;
}
private class HAMClusterListener extends ClusterListener.Adapter
{
@Override
public void enteredCluster( ClusterConfiguration configuration )
{
Map<InstanceId, ClusterMember> newMembers = new HashMap<InstanceId, ClusterMember>();
for ( InstanceId memberClusterUri : configuration.getMembers().keySet() )
newMembers.put( memberClusterUri, new ClusterMember( memberClusterUri ) );
members.clear();
members.putAll( newMembers );
}
@Override
public void leftCluster()
{
members.clear();
}
@Override
public void joinedCluster( InstanceId member, URI memberUri )
{
members.put( member, new ClusterMember( member ) );
}
@Override
public void leftCluster( InstanceId member )
{
members.remove( member );
}
}
private class HAMClusterMemberListener extends ClusterMemberListener.Adapter
{
private InstanceId masterId = null;
@Override
public void coordinatorIsElected( InstanceId coordinatorId )
{
if ( coordinatorId.equals( this.masterId ) )
{
return;
}
this.masterId = coordinatorId;
Map<InstanceId, ClusterMember> newMembers = new CopyOnWriteHashMap<InstanceId, ClusterMember>();
for ( Map.Entry<InstanceId, ClusterMember> memberEntry : members.entrySet() )
{
newMembers.put( memberEntry.getKey(), memberEntry.getValue().unavailableAs(
HighAvailabilityModeSwitcher.MASTER ).unavailableAs( HighAvailabilityModeSwitcher.SLAVE ) );
}
members.clear();
members.putAll( newMembers );
}
@Override
public void memberIsAvailable( String role, InstanceId instanceId, URI roleUri )
{
members.put( instanceId, getMember( instanceId ).availableAs( role, roleUri ) );
}
@Override
public void memberIsUnavailable( String role, InstanceId unavailableId )
{
ClusterMember member = null;
try
{
member = getMember( unavailableId );
members.put( unavailableId, member.unavailableAs( role ) );
}
catch ( IllegalStateException e )
{
// Unknown member
}
}
}
private class HAMHeartbeatListener extends HeartbeatListener.Adapter
{
@Override
public void failed( InstanceId server )
{
if (members.containsKey( server ))
{
members.put( server, getMember( server ).failed() );
}
}
@Override
public void alive( InstanceId server )
{
if (members.containsKey( server ))
members.put( server, getMember( server ).alive() );
}
}
}
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_member_ClusterMembers.java
|
4,194 |
blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
@Override
public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
recoveryStatus.index().addCurrentFilesSize(size);
indexOutput.writeBytes(data, offset, size);
if (restoreRateLimiter != null) {
rateLimiterListener.onRestorePause(restoreRateLimiter.pause(size));
}
}
@Override
public synchronized void onCompleted() {
int part = partIndex.incrementAndGet();
if (part < fileInfo.numberOfParts()) {
String partName = fileInfo.partName(part);
// continue with the new part
blobContainer.readBlob(partName, this);
return;
} else {
// we are done...
try {
indexOutput.close();
// write the checksum
if (fileInfo.checksum() != null) {
store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
}
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
} catch (IOException e) {
onFailure(e);
return;
}
}
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
failures.add(t);
latch.countDown();
}
});
| 1no label
|
src_main_java_org_elasticsearch_index_snapshots_blobstore_BlobStoreIndexShardRepository.java
|
456 |
public class PendingClusterTasksRequest extends MasterNodeReadOperationRequest<PendingClusterTasksRequest> {
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readLocal(in, Version.V_1_0_0_RC2);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeLocal(out, Version.V_1_0_0_RC2);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_tasks_PendingClusterTasksRequest.java
|
1,135 |
public class HistogramAggregationSearchBenchmark {
static final long COUNT = SizeValue.parseSizeValue("20m").singles();
static final int BATCH = 1000;
static final int QUERY_WARMUP = 5;
static final int QUERY_COUNT = 20;
static final int NUMBER_OF_TERMS = 1000;
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("refresh_interval", "-1")
.put("gateway.type", "local")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
String clusterName = HistogramAggregationSearchBenchmark.class.getSimpleName();
Node node1 = nodeBuilder()
.clusterName(clusterName)
.settings(settingsBuilder().put(settings).put("name", "node1")).node();
//Node clientNode = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
Client client = node1.client();
long[] lValues = new long[NUMBER_OF_TERMS];
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
lValues[i] = i;
}
Random r = new Random();
try {
client.admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put(settings))
.addMapping("type1", jsonBuilder()
.startObject()
.startObject("type1")
.startObject("properties")
.startObject("l_value")
.field("type", "long")
.endObject()
.startObject("i_value")
.field("type", "integer")
.endObject()
.startObject("s_value")
.field("type", "short")
.endObject()
.startObject("b_value")
.field("type", "byte")
.endObject()
.endObject()
.endObject()
.endObject())
.execute().actionGet();
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long iters = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= iters; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
final long value = lValues[r.nextInt(lValues.length)];
XContentBuilder source = jsonBuilder().startObject()
.field("id", Integer.valueOf(counter))
.field("l_value", value)
.field("i_value", (int) value)
.field("s_value", (short) value)
.field("b_value", (byte) value)
.field("date", new Date())
.endObject();
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
.source(source));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
client.admin().indices().prepareFlush("test").execute().actionGet();
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
throw new Error();
}
System.out.println("--> Number of docs in index: " + COUNT);
System.out.println("--> Warmup...");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(histogramFacet("l_value").field("l_value").interval(4))
.addFacet(histogramFacet("i_value").field("i_value").interval(4))
.addFacet(histogramFacet("s_value").field("s_value").interval(4))
.addFacet(histogramFacet("b_value").field("b_value").interval(4))
.addFacet(histogramFacet("date").field("date").interval(1000))
.addAggregation(histogram("l_value").field("l_value").interval(4))
.addAggregation(histogram("i_value").field("i_value").interval(4))
.addAggregation(histogram("s_value").field("s_value").interval(4))
.addAggregation(histogram("b_value").field("b_value").interval(4))
.addAggregation(histogram("date").field("date").interval(1000))
.execute().actionGet();
if (j == 0) {
System.out.println("--> Warmup took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
System.out.println("--> Warmup DONE");
long totalQueryTime = 0;
for (String field : new String[] {"b_value", "s_value", "i_value", "l_value"}) {
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(histogramFacet(field).field(field).interval(4))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Facet (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(histogram(field).field(field).interval(4))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(histogramFacet(field).field(field).valueField(field).interval(4))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Facet (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(histogram(field).field(field).subAggregation(stats(field).field(field)).interval(4))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(histogramFacet("date").field("date").interval(1000))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Facet (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(dateHistogram("date").field("date").interval(1000))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(histogramFacet("date").field("date").valueField("l_value").interval(1000))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Facet (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(dateHistogram("date").field("date").interval(1000).subAggregation(stats("stats").field("l_value")))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(dateHistogramFacet("date").field("date").interval("day").mode(FacetBuilder.Mode.COLLECTOR))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Date Histogram Facet (mode/collector) (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addFacet(dateHistogramFacet("date").field("date").interval("day").mode(FacetBuilder.Mode.POST))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Date Histogram Facet (mode/post) (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
node1.close();
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_search_aggregations_HistogramAggregationSearchBenchmark.java
|
1,441 |
public static class Entry {
private final State state;
private final SnapshotId snapshotId;
private final ImmutableMap<ShardId, ShardRestoreStatus> shards;
private final ImmutableList<String> indices;
/**
* Creates new restore metadata
*
* @param snapshotId snapshot id
* @param state current state of the restore process
* @param indices list of indices being restored
* @param shards list of shards being restored and thier current restore status
*/
public Entry(SnapshotId snapshotId, State state, ImmutableList<String> indices, ImmutableMap<ShardId, ShardRestoreStatus> shards) {
this.snapshotId = snapshotId;
this.state = state;
this.indices = indices;
if (shards == null) {
this.shards = ImmutableMap.of();
} else {
this.shards = shards;
}
}
/**
* Returns snapshot id
*
* @return snapshot id
*/
public SnapshotId snapshotId() {
return this.snapshotId;
}
/**
* Returns list of shards that being restore and their status
*
* @return list of shards
*/
public ImmutableMap<ShardId, ShardRestoreStatus> shards() {
return this.shards;
}
/**
* Returns current restore state
*
* @return restore state
*/
public State state() {
return state;
}
/**
* Returns list of indices
*
* @return list of indices
*/
public ImmutableList<String> indices() {
return indices;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Entry entry = (Entry) o;
if (!indices.equals(entry.indices)) return false;
if (!snapshotId.equals(entry.snapshotId)) return false;
if (!shards.equals(entry.shards)) return false;
if (state != entry.state) return false;
return true;
}
@Override
public int hashCode() {
int result = state.hashCode();
result = 31 * result + snapshotId.hashCode();
result = 31 * result + shards.hashCode();
result = 31 * result + indices.hashCode();
return result;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_RestoreMetaData.java
|
441 |
@Deprecated
public @interface AdminPresentationCollectionOverride {
/**
* The name of the property whose AdminPresentation annotation should be overwritten
*
* @return the name of the property that should be overwritten
*/
String name();
/**
* The AdminPresentation to overwrite the property with
*
* @return the AdminPresentation being mapped to the attribute
*/
AdminPresentationCollection value();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_presentation_override_AdminPresentationCollectionOverride.java
|
496 |
private static class LinkSetRewriter implements FieldRewriter<OMVRBTreeRIDSet> {
@Override
public OMVRBTreeRIDSet rewriteValue(OMVRBTreeRIDSet setValue) {
setValue.setAutoConvertToRecord(false);
OMVRBTreeRIDSet result = new OMVRBTreeRIDSet();
result.setAutoConvertToRecord(false);
boolean wasRewritten = false;
for (OIdentifiable identifiable : setValue) {
FieldRewriter<ORID> fieldRewriter = RewritersFactory.INSTANCE.findRewriter(null, null, identifiable.getIdentity());
ORID newRid = fieldRewriter.rewriteValue(identifiable.getIdentity());
if (newRid != null) {
wasRewritten = true;
result.add(newRid);
} else
result.add(identifiable);
}
if (wasRewritten)
return result;
result.clear();
return null;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseImport.java
|
797 |
public static class Match implements Streamable {
private Text index;
private Text id;
private float score;
private Map<String, HighlightField> hl;
public Match(Text index, Text id, float score, Map<String, HighlightField> hl) {
this.id = id;
this.score = score;
this.index = index;
this.hl = hl;
}
public Match(Text index, Text id, float score) {
this.id = id;
this.score = score;
this.index = index;
}
Match() {
}
public Text getIndex() {
return index;
}
public Text getId() {
return id;
}
public float getScore() {
return score;
}
public Map<String, HighlightField> getHighlightFields() {
return hl;
}
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readText();
index = in.readText();
score = in.readFloat();
int size = in.readVInt();
if (size > 0) {
hl = new HashMap<String, HighlightField>(size);
for (int j = 0; j < size; j++) {
hl.put(in.readString(), HighlightField.readHighlightField(in));
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeText(id);
out.writeText(index);
out.writeFloat(score);
if (hl != null) {
out.writeVInt(hl.size());
for (Map.Entry<String, HighlightField> entry : hl.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
} else {
out.writeVInt(0);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_percolate_PercolateResponse.java
|
315 |
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_health_TransportClusterHealthAction.java
|
1,155 |
public class HazelcastInstanceNotActiveException extends IllegalStateException {
public HazelcastInstanceNotActiveException() {
super("Hazelcast instance is not active!");
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_core_HazelcastInstanceNotActiveException.java
|
128 |
{
@Override
public void run()
{
db1.shutdown();
}
} );
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_TestApplyTransactions.java
|
297 |
public class OTraverseFieldProcess extends OTraverseAbstractProcess<Iterator<Object>> {
protected Object field;
public OTraverseFieldProcess(final OTraverse iCommand, final Iterator<Object> iTarget) {
super(iCommand, iTarget);
}
public OIdentifiable process() {
while (target.hasNext()) {
field = target.next();
final Object fieldValue;
if (field instanceof OSQLFilterItem)
fieldValue = ((OSQLFilterItem) field).getValue(((OTraverseRecordProcess) command.getContext().peek(-2)).getTarget(), null);
else
fieldValue = ((OTraverseRecordProcess) command.getContext().peek(-2)).getTarget().rawField(field.toString());
if (fieldValue != null) {
final OTraverseAbstractProcess<?> subProcess;
if (fieldValue instanceof Iterator<?> || OMultiValue.isMultiValue(fieldValue)) {
final Iterator<Object> coll = OMultiValue.getMultiValueIterator(fieldValue);
switch (command.getStrategy()) {
case BREADTH_FIRST:
subProcess = new OTraverseMultiValueBreadthFirstProcess(command, coll);
break;
case DEPTH_FIRST:
subProcess = new OTraverseMultiValueDepthFirstProcess(command, coll);
break;
default:
throw new IllegalArgumentException("Traverse strategy not supported: " + command.getStrategy());
}
} else if (fieldValue instanceof OIdentifiable && ((OIdentifiable) fieldValue).getRecord() instanceof ODocument)
subProcess = new OTraverseRecordProcess(command, (ODocument) ((OIdentifiable) fieldValue).getRecord());
else
continue;
final OIdentifiable subValue = subProcess.process();
if (subValue != null)
return subValue;
}
}
return drop();
}
@Override
public String getStatus() {
return field != null ? field.toString() : null;
}
@Override
public String toString() {
return field != null ? "[field:" + field.toString() + "]" : null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_command_traverse_OTraverseFieldProcess.java
|
1,143 |
public class OSQLMethodField extends OAbstractSQLMethod {
public static final String NAME = "field";
public OSQLMethodField() {
super(NAME, 0, 1);
}
@Override
public Object execute(final OIdentifiable iCurrentRecord, final OCommandContext iContext, Object ioResult,
final Object[] iMethodParams) {
if (ioResult != null)
if (ioResult instanceof String)
try {
ioResult = new ODocument(new ORecordId((String) ioResult));
} catch (Exception e) {
OLogManager.instance().error(this, "Error on reading rid with value '%s'", null, ioResult);
ioResult = null;
}
else if (ioResult instanceof OIdentifiable)
ioResult = ((OIdentifiable) ioResult).getRecord();
else if (ioResult instanceof Collection<?> || ioResult instanceof OMultiCollectionIterator<?>
|| ioResult.getClass().isArray()) {
final List<Object> result = new ArrayList<Object>(OMultiValue.getSize(ioResult));
for (Object o : OMultiValue.getMultiValueIterable(ioResult)) {
result.add(ODocumentHelper.getFieldValue(o, iMethodParams[0].toString()));
}
return result;
}
if (ioResult != null) {
if (ioResult instanceof OCommandContext) {
ioResult = ((OCommandContext) ioResult).getVariable(iMethodParams[0].toString());
} else {
ioResult = ODocumentHelper.getFieldValue(ioResult, iMethodParams[0].toString(), iContext);
}
}
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodField.java
|
39 |
{
@Override
public HighAvailabilityMemberState getHighAvailabilityMemberState()
{
return memberStateMachine.getCurrentState();
}
} );
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
662 |
public interface ProductOptionDao {
public List<ProductOption> readAllProductOptions();
public ProductOption readProductOptionById(Long id);
public ProductOption saveProductOption(ProductOption option);
public ProductOptionValue readProductOptionValueById(Long id);
}
| 0true
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_dao_ProductOptionDao.java
|
46 |
static final class PackageDescriptorProposal extends CompletionProposal {
PackageDescriptorProposal(int offset, String prefix, String packageName) {
super(offset, prefix, PACKAGE,
"package " + packageName,
"package " + packageName + ";");
}
@Override
protected boolean qualifiedNameIsPath() {
return true;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_PackageCompletions.java
|
555 |
public interface DynamicDaoHelper {
public Map<String, Object> getIdMetadata(Class<?> entityClass, HibernateEntityManager entityManager);
public List<String> getPropertyNames(Class<?> entityClass, HibernateEntityManager entityManager);
public List<Type> getPropertyTypes(Class<?> entityClass, HibernateEntityManager entityManager);
public SessionFactory getSessionFactory(HibernateEntityManager entityManager);
public Class<?>[] getAllPolymorphicEntitiesFromCeiling(Class<?> ceilingClass, SessionFactory sessionFactory, boolean includeUnqualifiedPolymorphicEntities, boolean useCache);
public Class<?>[] sortEntities(Class<?> ceilingClass, List<Class<?>> entities);
public boolean isExcludeClassFromPolymorphism(Class<?> clazz);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_dao_DynamicDaoHelper.java
|
1,486 |
public class RoutingTableValidation implements Serializable, Streamable {
private boolean valid = true;
private List<String> failures;
private Map<String, List<String>> indicesFailures;
public RoutingTableValidation() {
}
public boolean valid() {
return valid;
}
public List<String> allFailures() {
if (failures().isEmpty() && indicesFailures().isEmpty()) {
return ImmutableList.of();
}
List<String> allFailures = newArrayList(failures());
for (Map.Entry<String, List<String>> entry : indicesFailures().entrySet()) {
for (String failure : entry.getValue()) {
allFailures.add("Index [" + entry.getKey() + "]: " + failure);
}
}
return allFailures;
}
public List<String> failures() {
if (failures == null) {
return ImmutableList.of();
}
return failures;
}
public Map<String, List<String>> indicesFailures() {
if (indicesFailures == null) {
return ImmutableMap.of();
}
return indicesFailures;
}
public List<String> indexFailures(String index) {
if (indicesFailures == null) {
return ImmutableList.of();
}
List<String> indexFailures = indicesFailures.get(index);
if (indexFailures == null) {
return ImmutableList.of();
}
return indexFailures;
}
public void addFailure(String failure) {
valid = false;
if (failures == null) {
failures = newArrayList();
}
failures.add(failure);
}
public void addIndexFailure(String index, String failure) {
valid = false;
if (indicesFailures == null) {
indicesFailures = newHashMap();
}
List<String> indexFailures = indicesFailures.get(index);
if (indexFailures == null) {
indexFailures = Lists.newArrayList();
indicesFailures.put(index, indexFailures);
}
indexFailures.add(failure);
}
@Override
public String toString() {
return allFailures().toString();
}
@Override
public void readFrom(StreamInput in) throws IOException {
valid = in.readBoolean();
int size = in.readVInt();
if (size == 0) {
failures = ImmutableList.of();
} else {
failures = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; i++) {
failures.add(in.readString());
}
}
size = in.readVInt();
if (size == 0) {
indicesFailures = ImmutableMap.of();
} else {
indicesFailures = newHashMap();
for (int i = 0; i < size; i++) {
String index = in.readString();
int size2 = in.readVInt();
List<String> indexFailures = newArrayListWithCapacity(size2);
for (int j = 0; j < size2; j++) {
indexFailures.add(in.readString());
}
indicesFailures.put(index, indexFailures);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(valid);
if (failures == null) {
out.writeVInt(0);
} else {
out.writeVInt(failures.size());
for (String failure : failures) {
out.writeString(failure);
}
}
if (indicesFailures == null) {
out.writeVInt(0);
} else {
out.writeVInt(indicesFailures.size());
for (Map.Entry<String, List<String>> entry : indicesFailures.entrySet()) {
out.writeString(entry.getKey());
out.writeVInt(entry.getValue().size());
for (String failure : entry.getValue()) {
out.writeString(failure);
}
}
}
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_routing_RoutingTableValidation.java
|
547 |
metaDataMappingService.removeMapping(clusterStateUpdateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new DeleteMappingResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_TransportDeleteMappingAction.java
|
189 |
public class ThreadLocalRandom extends Random {
// same constants as Random, but must be redeclared because private
private static final long multiplier = 0x5DEECE66DL;
private static final long addend = 0xBL;
private static final long mask = (1L << 48) - 1;
/**
* The random seed. We can't use super.seed.
*/
private long rnd;
/**
* Initialization flag to permit calls to setSeed to succeed only
* while executing the Random constructor. We can't allow others
* since it would cause setting seed in one part of a program to
* unintentionally impact other usages by the thread.
*/
boolean initialized;
// Padding to help avoid memory contention among seed updates in
// different TLRs in the common case that they are located near
// each other.
private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
/**
* The actual ThreadLocal
*/
private static final ThreadLocal<ThreadLocalRandom> localRandom =
new ThreadLocal<ThreadLocalRandom>() {
protected ThreadLocalRandom initialValue() {
return new ThreadLocalRandom();
}
};
/**
* Constructor called only by localRandom.initialValue.
*/
ThreadLocalRandom() {
super();
initialized = true;
}
/**
* Returns the current thread's {@code ThreadLocalRandom}.
*
* @return the current thread's {@code ThreadLocalRandom}
*/
public static ThreadLocalRandom current() {
return localRandom.get();
}
/**
* Throws {@code UnsupportedOperationException}. Setting seeds in
* this generator is not supported.
*
* @throws UnsupportedOperationException always
*/
public void setSeed(long seed) {
if (initialized)
throw new UnsupportedOperationException();
rnd = (seed ^ multiplier) & mask;
}
protected int next(int bits) {
rnd = (rnd * multiplier + addend) & mask;
return (int) (rnd >>> (48-bits));
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public int nextInt(int least, int bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextInt(bound - least) + least;
}
/**
* Returns a pseudorandom, uniformly distributed value
* between 0 (inclusive) and the specified value (exclusive).
*
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
* @throws IllegalArgumentException if n is not positive
*/
public long nextLong(long n) {
if (n <= 0)
throw new IllegalArgumentException("n must be positive");
// Divide n by two until small enough for nextInt. On each
// iteration (at most 31 of them but usually much less),
// randomly choose both whether to include high bit in result
// (offset) and whether to continue with the lower vs upper
// half (which makes a difference only if odd).
long offset = 0;
while (n >= Integer.MAX_VALUE) {
int bits = next(2);
long half = n >>> 1;
long nextn = ((bits & 2) == 0) ? half : n - half;
if ((bits & 1) == 0)
offset += n - nextn;
n = nextn;
}
return offset + nextInt((int) n);
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public long nextLong(long least, long bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextLong(bound - least) + least;
}
/**
* Returns a pseudorandom, uniformly distributed {@code double} value
* between 0 (inclusive) and the specified value (exclusive).
*
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
* @throws IllegalArgumentException if n is not positive
*/
public double nextDouble(double n) {
if (n <= 0)
throw new IllegalArgumentException("n must be positive");
return nextDouble() * n;
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
*
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
* @throws IllegalArgumentException if least greater than or equal
* to bound
*/
public double nextDouble(double least, double bound) {
if (least >= bound)
throw new IllegalArgumentException();
return nextDouble() * (bound - least) + least;
}
private static final long serialVersionUID = -5851777807851030925L;
}
| 0true
|
src_main_java_jsr166y_ThreadLocalRandom.java
|
152 |
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel, ByteBuffer buffer ) throws IOException
{
return Command.readCommand( null, null, byteChannel, buffer );
}
};
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_TransactionReader.java
|
412 |
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD})
public @interface AdminPresentationAdornedTargetCollection {
/**
* <p>Optional - field name will be used if not specified</p>
*
* <p>The friendly name to present to a user for this field in a GUI. If supporting i18N,
* the friendly name may be a key to retrieve a localized friendly name using
* the GWT support for i18N.</p>
*
* @return the friendly name
*/
String friendlyName() default "";
/**
* <p>Optional - only required if you wish to apply security to this field</p>
*
* <p>If a security level is specified, it is registered with the SecurityManager.
* The SecurityManager checks the permission of the current user to
* determine if this field should be disabled based on the specified level.</p>
*
* @return the security level
*/
String securityLevel() default "";
/**
* <p>Optional - fields are not excluded by default</p>
*
* <p>Specify if this field should be excluded from inclusion in the
* admin presentation layer</p>
*
* @return whether or not the field should be excluded
*/
boolean excluded() default false;
/**
* <p>Optional - propertyName , only required if you want hide the field based on this property's value</p>
*
* <p>If the property is defined and found to be set to false, in the AppConfiguraionService, then this field will be excluded in the
* admin presentation layer</p>
*
* @return name of the property
*/
String showIfProperty() default "";
/**
* Optional - only required if you want to make the field immutable
*
* Explicityly specify whether or not this field is mutable.
*
* @return whether or not this field is read only
*/
boolean readOnly() default false;
/**
* <p>Optional - only required if you want to make the field ignore caching</p>
*
* <p>Explicitly specify whether or not this field will use server-side
* caching during inspection</p>
*
* @return whether or not this field uses caching
*/
boolean useServerSideInspectionCache() default true;
/**
* <p>Optional - only required in the absence of a "mappedBy" property
* on the JPA annotation</p>
*
* <p>This is the field in the adorned target entity that refers
* back to the parent entity</p>
*
* @return the field that refers back to the parent entity
*/
String parentObjectProperty() default "";
/**
* <p>Optional - only required if the primary key property of the
* parent entity is called something other than "id"</p>
*
* <p>This is the field in the parent entity that represents
* its primary key</p>
*
* @return primary key field of the parent entity
*/
String parentObjectIdProperty() default "id";
/**
* <p>This is the field in the adorned target entity that refers
* to the target entity</p>
*
* @return target entity field of the adorned target
*/
String targetObjectProperty() default "";
/**
* <p>Optional - only required if the adorned target has fields
* (other than the sort property) that should be populated
* by the user</p>
*
* <p>List of fields to include in the add/update form
* for the adorned target entity.</p>
*
* @return user populated fields on the adorned target
*/
String[] maintainedAdornedTargetFields() default {};
/**
* <p>Optional - only required when it is desirable to override
* the property prominence settings from the adorned target and the
* target object</p>
*
* <p>List of fields visible in the adorned target grid UI in the
* admin tool. Fields are referenced relative to the adorned target
* entity, or the target entity. For example, in CrossSaleProductImpl,
* to show the product name and promotionMesssage fields, the
* gridVisibleFields value would be : {"defaultSku.name", "promotionMessage"}</p>
*
*
* @return List of fields visible in the adorned target grid UI in the admin tool
*/
String[] gridVisibleFields() default {};
/**
* <p>Optional - only required if the primary key property of the
* target entity is called something other than "id"</p>
*
* <p>This is the field in the target entity that represents
* its primary key</p>
*
* <p>Note that this should just be the property name, not the path to the property.
* For example, if the target object is CountryImpl, then the value for the
* targetObjectIdProperty should just be "abbreviation".
*
* @return primary key field of the target entity
*/
String targetObjectIdProperty() default "id";
/**
* <p>Optional - only required if there is an entity that is responsible
* for modeling the join table for this adorned collection.</p>
*
* <p>For example, consider the scenario that a product has many possible
* parent categories. Also consider that you might want to sort the parent
* categories in a specific way. The join entity in this case would hold a
* link to both a category and a product as well as a sequence field. This
* property provides the ability to specify that mapping.</p>
*
* @return the join entity class (if any)
*/
String joinEntityClass() default "";
/**
* <p>Optional - only required if the adorned target has
* a field used for sorting</p>
*
* <p>This is the field by which the adorned targets are sorted</p>
*
* @return the sort field in the adorned target entity
*/
String sortProperty() default "";
/**
* <p>Optional - only required if the sort order should be
* descending</p>
*
* <p>This is the sort direction for the adorned targets</p>
*
* @return the sort direction
*/
boolean sortAscending() default true;
/**
* <p>Optional - only required if the system should not query
* the user for the adorned property values.</p>
*
* <p>Defines whether or not the system should prompt the user
* for the adorned property values (if any) after searching
* for the target entity. This is an advanced feature and is
* rarely used.</p>
*
* @return whether to ignore the adorned properties
*/
boolean ignoreAdornedProperties() default false;
/**
* <p>Optional - only required if you want to specify ordering for this field</p>
*
* <p>The order in which this field will appear in a GUI relative to other collections from the same class</p>
*
* @return the display order
*/
int order() default 99999;
/**
* Optional - only required if you want the field to appear under a different tab
*
* Specify a GUI tab for this field
*
* @return the tab for this field
*/
String tab() default "General";
/**
* Optional - only required if you want to order the appearance of the tabs in the UI
*
* Specify an order for this tab. Tabs will be sorted int he resulting form in
* ascending order based on this parameter.
*
* The default tab will render with an order of 100.
*
* @return the order for this tab
*/
int tabOrder() default 100;
/**
* <p>Optional - only required if you need to specially handle crud operations for this
* specific collection on the server</p>
*
* <p>Custom string values that will be passed to the server during CRUB operations on this
* collection. These criteria values can be detected in a custom persistence handler
* (@CustomPersistenceHandler) in order to engage special handling through custom server
* side code for this collection.</p>
*
* @return the custom string array to pass to the server during CRUD operations
*/
String[] customCriteria() default {};
/**
* <p>Optional - only required if a special operation type is required for a CRUD operation. This
* setting is not normally changed and is an advanced setting</p>
*
* <p>The operation type for a CRUD operation</p>
*
* @return the operation type
*/
AdminPresentationOperationTypes operationTypes() default @AdminPresentationOperationTypes(addType = OperationType.ADORNEDTARGETLIST, fetchType = OperationType.ADORNEDTARGETLIST, inspectType = OperationType.BASIC, removeType = OperationType.ADORNEDTARGETLIST, updateType = OperationType.ADORNEDTARGETLIST);
/**
* Optional - If you have FieldType set to SupportedFieldType.MONEY, *
* then you can specify a money currency property field.
*
*
* @return the currency property field
*/
String currencyCodeField() default "";
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_presentation_AdminPresentationAdornedTargetCollection.java
|
784 |
public class TransactionalSetProxy<E> extends AbstractTransactionalCollectionProxy<SetService, E>
implements TransactionalSet<E> {
private final HashSet<CollectionItem> set = new HashSet<CollectionItem>();
public TransactionalSetProxy(String name, TransactionSupport tx, NodeEngine nodeEngine, SetService service) {
super(name, tx, nodeEngine, service);
}
@Override
public boolean add(E e) {
checkTransactionState();
throwExceptionIfNull(e);
final NodeEngine nodeEngine = getNodeEngine();
final Data value = nodeEngine.toData(e);
if (!getCollection().add(new CollectionItem(-1, value))) {
return false;
}
CollectionReserveAddOperation operation = new CollectionReserveAddOperation(name, tx.getTxnId(), value);
try {
Future<Long> f = nodeEngine.getOperationService().invokeOnPartition(getServiceName(), operation, partitionId);
Long itemId = f.get();
if (itemId != null) {
if (!itemIdSet.add(itemId)) {
throw new TransactionException("Duplicate itemId: " + itemId);
}
CollectionTxnAddOperation op = new CollectionTxnAddOperation(name, itemId, value);
final String txnId = tx.getTxnId();
final String serviceName = getServiceName();
tx.addTransactionLog(new CollectionTransactionLog(itemId, name, partitionId, serviceName, txnId, op));
return true;
}
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
}
return false;
}
@Override
public String getServiceName() {
return SetService.SERVICE_NAME;
}
@Override
protected Collection<CollectionItem> getCollection() {
return set;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_txn_TransactionalSetProxy.java
|
1,341 |
public class OPaginatedClusterFactory {
public static final OPaginatedClusterFactory INSTANCE = new OPaginatedClusterFactory();
public OCluster createCluster(int configurationVersion) {
if (configurationVersion >= 0 && configurationVersion < 6) {
OLogManager.instance().error(
this,
"You use deprecated version of storage cluster, "
+ "this version is not supported in current implementation. Please do export/import or recreate database.");
return new OPaginatedWithoutRidReuseCluster();
}
return new OPaginatedCluster();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OPaginatedClusterFactory.java
|
754 |
public class ListAddAllOperation extends CollectionAddAllOperation {
private int index = -1;
public ListAddAllOperation() {
}
public ListAddAllOperation(String name, int index, List<Data> valueList) {
super(name, valueList);
this.index = index;
}
@Override
public int getId() {
return CollectionDataSerializerHook.LIST_ADD_ALL;
}
@Override
public void run() throws Exception {
if (!hasEnoughCapacity(valueList.size())) {
response = false;
return;
}
valueMap = getOrCreateListContainer().addAll(index, valueList);
response = !valueMap.isEmpty();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeInt(index);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
index = in.readInt();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_list_ListAddAllOperation.java
|
139 |
{
private int size;
@Override
public boolean reached( File file, long version, LogLoader source )
{
size += fileSystem.getFileSize( file );
return size >= maxSize;
}
};
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogPruneStrategies.java
|
1,364 |
@ClusterScope(scope=Scope.TEST, numNodes=0)
public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
private final ESLogger logger = Loggers.getLogger(FilteringAllocationTests.class);
@Test
public void testDecommissionNodeNoReplicas() throws Exception {
logger.info("--> starting 2 nodes");
final String node_0 = cluster().startNode();
final String node_1 = cluster().startNode();
assertThat(cluster().size(), equalTo(2));
logger.info("--> creating an index with no replicas");
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put("index.number_of_replicas", 0))
.execute().actionGet();
ensureGreen();
logger.info("--> index some data");
for (int i = 0; i < 100; i++) {
client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
}
client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
logger.info("--> decommission the second node");
client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._name", node_1))
.execute().actionGet();
waitForRelocation();
logger.info("--> verify all are allocated on node1 now");
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_0));
}
}
}
client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
}
@Test
public void testDisablingAllocationFiltering() throws Exception {
logger.info("--> starting 2 nodes");
final String node_0 = cluster().startNode();
final String node_1 = cluster().startNode();
assertThat(cluster().size(), equalTo(2));
logger.info("--> creating an index with no replicas");
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put("index.number_of_replicas", 0))
.execute().actionGet();
ensureGreen();
logger.info("--> index some data");
for (int i = 0; i < 100; i++) {
client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
}
client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test");
int numShardsOnNode1 = 0;
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if ("node1".equals(clusterState.nodes().get(shardRouting.currentNodeId()).name())) {
numShardsOnNode1++;
}
}
}
if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) {
client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(settingsBuilder().put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet();
// make sure we can recover all the nodes at once otherwise we might run into a state where one of the shards has not yet started relocating
// but we already fired up the request to wait for 0 relocating shards.
}
logger.info("--> remove index from the first node");
client().admin().indices().prepareUpdateSettings("test")
.setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", node_0))
.execute().actionGet();
client().admin().cluster().prepareReroute().get();
ensureGreen();
logger.info("--> verify all shards are allocated on node_1 now");
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
indexRoutingTable = clusterState.routingTable().index("test");
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_1));
}
}
logger.info("--> disable allocation filtering ");
client().admin().indices().prepareUpdateSettings("test")
.setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", ""))
.execute().actionGet();
client().admin().cluster().prepareReroute().get();
ensureGreen();
logger.info("--> verify that there are shards allocated on both nodes now");
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2));
}
}
| 0true
|
src_test_java_org_elasticsearch_cluster_allocation_FilteringAllocationTests.java
|
27 |
public class GetCommandParser implements CommandParser {
public TextCommand parser(SocketTextReader socketTextReader, String cmd, int space) {
String key = cmd.substring(space + 1);
if (key.indexOf(' ') == -1) {
GetCommand r = new GetCommand(key);
socketTextReader.publishRequest(r);
} else {
StringTokenizer st = new StringTokenizer(key);
while (st.hasMoreTokens()) {
PartialGetCommand r = new PartialGetCommand(st.nextToken());
socketTextReader.publishRequest(r);
}
socketTextReader.publishRequest(new EndCommand());
}
return null;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_GetCommandParser.java
|
554 |
public class OImmutableRecordId extends ORecordId {
private static final long serialVersionUID = 1L;
public OImmutableRecordId(final int iClusterId, final OClusterPosition iClusterPosition) {
super(iClusterId, iClusterPosition);
}
public OImmutableRecordId(final ORecordId iRID) {
super(iRID);
}
@Override
public void copyFrom(final ORID iSource) {
throw new UnsupportedOperationException("copyFrom");
}
@Override
public ORecordId fromStream(byte[] iBuffer) {
throw new UnsupportedOperationException("fromStream");
}
@Override
public ORecordId fromStream(OMemoryStream iStream) {
throw new UnsupportedOperationException("fromStream");
}
@Override
public ORecordId fromStream(InputStream iStream) throws IOException {
throw new UnsupportedOperationException("fromStream");
}
@Override
public void fromString(String iRecordId) {
throw new UnsupportedOperationException("fromString");
}
@Override
public void reset() {
throw new UnsupportedOperationException("reset");
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_id_OImmutableRecordId.java
|
848 |
execute(request, new ActionListener<MultiSearchResponse>() {
@Override
public void onResponse(MultiSearchResponse response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [msearch] and request [" + request + "]", e1);
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_search_TransportMultiSearchAction.java
|
149 |
public class OByteSerializer implements OBinarySerializer<Byte> {
/**
* size of byte value in bytes
*/
public static final int BYTE_SIZE = 1;
public static OByteSerializer INSTANCE = new OByteSerializer();
public static final byte ID = 2;
public int getObjectSize(Byte object, Object... hints) {
return BYTE_SIZE;
}
public void serialize(Byte object, byte[] stream, int startPosition, Object... hints) {
stream[startPosition] = object;
}
public Byte deserialize(byte[] stream, int startPosition) {
return stream[startPosition];
}
public int getObjectSize(byte[] stream, int startPosition) {
return BYTE_SIZE;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return getObjectSize(stream, startPosition);
}
public void serializeNative(Byte object, byte[] stream, int startPosition, Object... hints) {
serialize(object, stream, startPosition);
}
public Byte deserializeNative(byte[] stream, int startPosition) {
return deserialize(stream, startPosition);
}
@Override
public void serializeInDirectMemory(Byte object, ODirectMemoryPointer pointer, long offset, Object... hints) {
pointer.setByte(offset, object);
}
@Override
public Byte deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
return pointer.getByte(offset);
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return BYTE_SIZE;
}
public boolean isFixedLength() {
return true;
}
public int getFixedLength() {
return BYTE_SIZE;
}
@Override
public Byte preprocess(Byte value, Object... hints) {
return value;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_serialization_types_OByteSerializer.java
|
1,147 |
public class ParentChildIndexGenerator {
private final static Random RANDOM = new Random();
private final Client client;
private final int numParents;
private final int numChildrenPerParent;
private final int queryValueRatio;
public ParentChildIndexGenerator(Client client, int numParents, int numChildrenPerParent, int queryValueRatio) {
this.client = client;
this.numParents = numParents;
this.numChildrenPerParent = numChildrenPerParent;
this.queryValueRatio = queryValueRatio;
}
public void index() {
// Memory intensive...
ObjectOpenHashSet<String> usedParentIds = ObjectOpenHashSet.newInstanceWithCapacity(numParents, 0.5f);
ObjectArrayList<ParentDocument> parents = ObjectArrayList.newInstanceWithCapacity(numParents);
for (int i = 0; i < numParents; i++) {
String parentId;
do {
parentId = RandomStrings.randomAsciiOfLength(RANDOM, 10);
} while (!usedParentIds.add(parentId));
String[] queryValues = new String[numChildrenPerParent];
for (int j = 0; j < numChildrenPerParent; j++) {
queryValues[j] = getQueryValue();
}
parents.add(new ParentDocument(parentId, queryValues));
}
int indexCounter = 0;
int childIdCounter = 0;
while (!parents.isEmpty()) {
BulkRequestBuilder request = client.prepareBulk();
for (int i = 0; !parents.isEmpty() && i < 100; i++) {
int index = RANDOM.nextInt(parents.size());
ParentDocument parentDocument = parents.get(index);
if (parentDocument.indexCounter == -1) {
request.add(Requests.indexRequest("test").type("parent")
.id(parentDocument.parentId)
.source("field1", getQueryValue()));
} else {
request.add(Requests.indexRequest("test").type("child")
.parent(parentDocument.parentId)
.id(String.valueOf(++childIdCounter))
.source("field2", parentDocument.queryValues[parentDocument.indexCounter]));
}
if (++parentDocument.indexCounter == parentDocument.queryValues.length) {
parents.remove(index);
}
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
indexCounter += response.getItems().length;
if (indexCounter % 100000 == 0) {
System.out.println("--> Indexed " + indexCounter + " documents");
}
}
}
public String getQueryValue() {
return "value" + RANDOM.nextInt(numChildrenPerParent / queryValueRatio);
}
class ParentDocument {
final String parentId;
final String[] queryValues;
int indexCounter;
ParentDocument(String parentId, String[] queryValues) {
this.parentId = parentId;
this.queryValues = queryValues;
this.indexCounter = -1;
}
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_search_child_ParentChildIndexGenerator.java
|
681 |
public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder<PutWarmerRequest, PutWarmerResponse, PutWarmerRequestBuilder> {
public PutWarmerRequestBuilder(IndicesAdminClient indicesClient, String name) {
super((InternalIndicesAdminClient) indicesClient, new PutWarmerRequest().name(name));
}
public PutWarmerRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new PutWarmerRequest());
}
/**
* Sets the name of the warmer.
*/
public PutWarmerRequestBuilder setName(String name) {
request.name(name);
return this;
}
/**
* Sets the search request to use to warm the index when applicable.
*/
public PutWarmerRequestBuilder setSearchRequest(SearchRequest searchRequest) {
request.searchRequest(searchRequest);
return this;
}
/**
* Sets the search request to use to warm the index when applicable.
*/
public PutWarmerRequestBuilder setSearchRequest(SearchRequestBuilder searchRequest) {
request.searchRequest(searchRequest);
return this;
}
@Override
protected void doExecute(ActionListener<PutWarmerResponse> listener) {
((IndicesAdminClient) client).putWarmer(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_put_PutWarmerRequestBuilder.java
|
608 |
public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<UpdateSettingsClusterStateUpdateRequest> {
private Settings settings;
public UpdateSettingsClusterStateUpdateRequest() {
}
/**
* Returns the {@link Settings} to update
*/
public Settings settings() {
return settings;
}
/**
* Sets the {@link Settings} to update
*/
public UpdateSettingsClusterStateUpdateRequest settings(Settings settings) {
this.settings = settings;
return this;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsClusterStateUpdateRequest.java
|
13 |
public interface TextCommandProcessor<T> {
void handle(T request);
void handleRejection(T request);
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_TextCommandProcessor.java
|
401 |
@SuppressWarnings({ "serial" })
public class ORecordTrackedList extends OTrackedList<OIdentifiable> {
public ORecordTrackedList(final ORecordInternal<?> iSourceRecord) {
super(iSourceRecord);
}
public Iterator<OIdentifiable> rawIterator() {
return iterator();
}
/**
* The item's identity does not affect nothing.
*/
public void onBeforeIdentityChanged(final ORID iRID) {
}
/**
* The item's identity does not affect nothing.
*/
public void onAfterIdentityChanged(final ORecord<?> iRecord) {
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordTrackedList.java
|
1,100 |
public class GiftWrapOrderItemRequest extends DiscreteOrderItemRequest {
private List<OrderItem> wrappedItems = new ArrayList<OrderItem>();
public List<OrderItem> getWrappedItems() {
return wrappedItems;
}
public void setWrappedItems(List<OrderItem> wrappedItems) {
this.wrappedItems = wrappedItems;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((wrappedItems == null) ? 0 : wrappedItems.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
GiftWrapOrderItemRequest other = (GiftWrapOrderItemRequest) obj;
if (wrappedItems == null) {
if (other.wrappedItems != null)
return false;
} else if (!wrappedItems.equals(other.wrappedItems))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_call_GiftWrapOrderItemRequest.java
|
6,416 |
public class LocalTransport extends AbstractLifecycleComponent<Transport> implements Transport {
private final ThreadPool threadPool;
private final Version version;
private volatile TransportServiceAdapter transportServiceAdapter;
private volatile BoundTransportAddress boundAddress;
private volatile LocalTransportAddress localAddress;
private final static ConcurrentMap<TransportAddress, LocalTransport> transports = newConcurrentMap();
private static final AtomicLong transportAddressIdGenerator = new AtomicLong();
private final ConcurrentMap<DiscoveryNode, LocalTransport> connectedNodes = newConcurrentMap();
@Inject
public LocalTransport(Settings settings, ThreadPool threadPool, Version version) {
super(settings);
this.threadPool = threadPool;
this.version = version;
}
@Override
public TransportAddress[] addressesFromString(String address) {
return new TransportAddress[]{new LocalTransportAddress(address)};
}
@Override
public boolean addressSupported(Class<? extends TransportAddress> address) {
return LocalTransportAddress.class.equals(address);
}
@Override
protected void doStart() throws ElasticsearchException {
localAddress = new LocalTransportAddress(Long.toString(transportAddressIdGenerator.incrementAndGet()));
transports.put(localAddress, this);
boundAddress = new BoundTransportAddress(localAddress, localAddress);
}
@Override
protected void doStop() throws ElasticsearchException {
transports.remove(localAddress);
// now, go over all the transports connected to me, and raise disconnected event
for (final LocalTransport targetTransport : transports.values()) {
for (final Map.Entry<DiscoveryNode, LocalTransport> entry : targetTransport.connectedNodes.entrySet()) {
if (entry.getValue() == this) {
targetTransport.disconnectFromNode(entry.getKey());
}
}
}
}
@Override
protected void doClose() throws ElasticsearchException {
}
@Override
public void transportServiceAdapter(TransportServiceAdapter transportServiceAdapter) {
this.transportServiceAdapter = transportServiceAdapter;
}
@Override
public BoundTransportAddress boundAddress() {
return boundAddress;
}
@Override
public boolean nodeConnected(DiscoveryNode node) {
return connectedNodes.containsKey(node);
}
@Override
public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
connectToNode(node);
}
@Override
public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
synchronized (this) {
if (connectedNodes.containsKey(node)) {
return;
}
final LocalTransport targetTransport = transports.get(node.address());
if (targetTransport == null) {
throw new ConnectTransportException(node, "Failed to connect");
}
connectedNodes.put(node, targetTransport);
transportServiceAdapter.raiseNodeConnected(node);
}
}
@Override
public void disconnectFromNode(DiscoveryNode node) {
synchronized (this) {
LocalTransport removed = connectedNodes.remove(node);
if (removed != null) {
transportServiceAdapter.raiseNodeDisconnected(node);
}
}
}
@Override
public long serverOpen() {
return 0;
}
@Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
final Version version = Version.smallest(node.version(), this.version);
BytesStreamOutput bStream = new BytesStreamOutput();
StreamOutput stream = new HandlesStreamOutput(bStream);
stream.setVersion(version);
stream.writeLong(requestId);
byte status = 0;
status = TransportStatus.setRequest(status);
stream.writeByte(status); // 0 for request, 1 for response.
stream.writeString(action);
request.writeTo(stream);
stream.close();
final LocalTransport targetTransport = connectedNodes.get(node);
if (targetTransport == null) {
throw new NodeNotConnectedException(node, "Node not connected");
}
final byte[] data = bStream.bytes().toBytes();
transportServiceAdapter.sent(data.length);
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
targetTransport.messageReceived(data, action, LocalTransport.this, version, requestId);
}
});
}
ThreadPool threadPool() {
return this.threadPool;
}
protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version, @Nullable final Long sendRequestId) {
try {
transportServiceAdapter.received(data.length);
StreamInput stream = new BytesStreamInput(data, false);
stream = CachedStreamInput.cachedHandles(stream);
stream.setVersion(version);
long requestId = stream.readLong();
byte status = stream.readByte();
boolean isRequest = TransportStatus.isRequest(status);
if (isRequest) {
handleRequest(stream, requestId, sourceTransport, version);
} else {
final TransportResponseHandler handler = transportServiceAdapter.remove(requestId);
// ignore if its null, the adapter logs it
if (handler != null) {
if (TransportStatus.isError(status)) {
handlerResponseError(stream, handler);
} else {
handleResponse(stream, handler);
}
}
}
} catch (Throwable e) {
if (sendRequestId != null) {
TransportResponseHandler handler = transportServiceAdapter.remove(sendRequestId);
if (handler != null) {
handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e));
}
} else {
logger.warn("Failed to receive message for action [" + action + "]", e);
}
}
}
private void handleRequest(StreamInput stream, long requestId, LocalTransport sourceTransport, Version version) throws Exception {
final String action = stream.readString();
final LocalTransportChannel transportChannel = new LocalTransportChannel(this, sourceTransport, action, requestId, version);
try {
final TransportRequestHandler handler = transportServiceAdapter.handler(action);
if (handler == null) {
throw new ActionNotFoundTransportException("Action [" + action + "] not found");
}
final TransportRequest request = handler.newInstance();
request.readFrom(stream);
if (handler.executor() == ThreadPool.Names.SAME) {
//noinspection unchecked
handler.messageReceived(request, transportChannel);
} else {
threadPool.executor(handler.executor()).execute(new AbstractRunnable() {
@Override
public void run() {
try {
//noinspection unchecked
handler.messageReceived(request, transportChannel);
} catch (Throwable e) {
if (lifecycleState() == Lifecycle.State.STARTED) {
// we can only send a response transport is started....
try {
transportChannel.sendResponse(e);
} catch (Throwable e1) {
logger.warn("Failed to send error message back to client for action [" + action + "]", e1);
logger.warn("Actual Exception", e);
}
}
}
}
@Override
public boolean isForceExecution() {
return handler.isForceExecution();
}
});
}
} catch (Throwable e) {
try {
transportChannel.sendResponse(e);
} catch (Throwable e1) {
logger.warn("Failed to send error message back to client for action [" + action + "]", e);
logger.warn("Actual Exception", e1);
}
}
}
protected void handleResponse(StreamInput buffer, final TransportResponseHandler handler) {
final TransportResponse response = handler.newInstance();
try {
response.readFrom(buffer);
} catch (Throwable e) {
handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
return;
}
handleParsedRespone(response, handler);
}
protected void handleParsedRespone(final TransportResponse response, final TransportResponseHandler handler) {
threadPool.executor(handler.executor()).execute(new Runnable() {
@SuppressWarnings({"unchecked"})
@Override
public void run() {
try {
handler.handleResponse(response);
} catch (Throwable e) {
handleException(handler, new ResponseHandlerFailureTransportException(e));
}
}
});
}
private void handlerResponseError(StreamInput buffer, final TransportResponseHandler handler) {
Throwable error;
try {
ThrowableObjectInputStream ois = new ThrowableObjectInputStream(buffer, settings.getClassLoader());
error = (Throwable) ois.readObject();
} catch (Throwable e) {
error = new TransportSerializationException("Failed to deserialize exception response from stream", e);
}
handleException(handler, error);
}
private void handleException(final TransportResponseHandler handler, Throwable error) {
if (!(error instanceof RemoteTransportException)) {
error = new RemoteTransportException("None remote transport exception", error);
}
final RemoteTransportException rtx = (RemoteTransportException) error;
try {
handler.handleException(rtx);
} catch (Throwable t) {
logger.error("failed to handle exception response [{}]", t, handler);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_transport_local_LocalTransport.java
|
848 |
public class CompareAndSetRequest extends ModifyRequest {
private Data expected;
public CompareAndSetRequest() {
}
public CompareAndSetRequest(String name, Data expected, Data update) {
super(name, update);
this.expected = expected;
}
@Override
protected Operation prepareOperation() {
return new CompareAndSetOperation(name, expected, update);
}
@Override
public int getClassId() {
return AtomicReferencePortableHook.COMPARE_AND_SET;
}
@Override
public void write(PortableWriter writer) throws IOException {
super.write(writer);
ObjectDataOutput out = writer.getRawDataOutput();
writeNullableData(out, expected);
}
@Override
public void read(PortableReader reader) throws IOException {
super.read(reader);
ObjectDataInput in = reader.getRawDataInput();
expected = readNullableData(in);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomicreference_client_CompareAndSetRequest.java
|
775 |
public class CollectionRollbackBackupOperation extends CollectionOperation implements BackupOperation {
private long itemId;
private boolean removeOperation;
public CollectionRollbackBackupOperation() {
}
public CollectionRollbackBackupOperation(String name, long itemId, boolean removeOperation) {
super(name);
this.itemId = itemId;
this.removeOperation = removeOperation;
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_ROLLBACK_BACKUP;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
if (removeOperation) {
getOrCreateContainer().rollbackRemoveBackup(itemId);
} else {
getOrCreateContainer().rollbackAddBackup(itemId);
}
}
@Override
public void afterRun() throws Exception {
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(itemId);
out.writeBoolean(removeOperation);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
itemId = in.readLong();
removeOperation = in.readBoolean();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionRollbackBackupOperation.java
|
156 |
public class JMSArchivedStructuredContentSubscriber implements MessageListener {
@Resource(name = "blStructuredContentService")
private StructuredContentService structuredContentService;
/*
* (non-Javadoc)
* @see javax.jms.MessageListener#onMessage(javax.jms.Message)
*/
@SuppressWarnings("unchecked")
public void onMessage(Message message) {
String basePageCacheKey = null;
try {
HashMap<String,String> props = (HashMap<String,String>) ((ObjectMessage) message).getObject();
if (props != null) {
structuredContentService.removeItemFromCache(props.get("nameKey"), props.get("typeKey"));
}
} catch (JMSException e) {
throw new RuntimeException(e);
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_message_jms_JMSArchivedStructuredContentSubscriber.java
|
706 |
execute(request, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [" + BulkAction.NAME + "] and request [" + request + "]", e1);
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_bulk_TransportBulkAction.java
|
224 |
public class RuntimeEnvironmentPropertiesManager implements BeanFactoryAware {
private static final Log LOG = LogFactory.getLog(RuntimeEnvironmentPropertiesManager.class);
protected ConfigurableBeanFactory beanFactory;
protected String prefix;
public String getPrefix() {
return prefix;
}
public String setPrefix(String prefix) {
return this.prefix = prefix;
}
public String getProperty(String key, String suffix) {
if(key==null) {
return null;
}
String name = prefix + "." + key + "." + suffix;
if (prefix == null) {
name = key + "." + suffix;
}
String rv = beanFactory.resolveEmbeddedValue("${" + name + "}");
if (rv == null ||rv.equals("${" + name + "}")) {
LOG.warn("property ${" + name + "} not found, Reverting to property without suffix"+suffix);
rv = getProperty(key);
}
return rv;
}
public String getProperty(String key) {
if(key==null) {
return null;
}
String name = prefix + "." + key;
if (prefix == null) {
name = key;
}
String rv = beanFactory.resolveEmbeddedValue("${" + name + "}");
if(rv.equals("${" + name + "}")) {
return null;
}
return rv;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = (ConfigurableBeanFactory) beanFactory;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_RuntimeEnvironmentPropertiesManager.java
|
725 |
ItemListener listener = new ItemListener() {
public void itemAdded(ItemEvent item) {
latchAdd.countDown();
}
public void itemRemoved(ItemEvent item) {
latchRemove.countDown();
}
};
| 0true
|
hazelcast_src_test_java_com_hazelcast_collection_SetTest.java
|
3,228 |
public class ReplicatedRecord<K, V>
implements IdentifiedDataSerializable {
private final AtomicLong hits = new AtomicLong();
private final AtomicLong lastAccessTime = new AtomicLong();
private K key;
private V value;
private VectorClock vectorClock;
private int latestUpdateHash;
private long ttlMillis;
private volatile long updateTime = System.currentTimeMillis();
public ReplicatedRecord() {
}
public ReplicatedRecord(K key, V value, VectorClock vectorClock, int hash, long ttlMillis) {
this.key = key;
this.value = value;
this.vectorClock = vectorClock;
this.latestUpdateHash = hash;
this.ttlMillis = ttlMillis;
}
public K getKey() {
access();
return key;
}
public V getValue() {
access();
return value;
}
public VectorClock getVectorClock() {
return vectorClock;
}
public long getTtlMillis() {
return ttlMillis;
}
public V setValue(V value, int hash, long ttlMillis) {
access();
V oldValue = this.value;
this.value = value;
this.latestUpdateHash = hash;
this.updateTime = System.currentTimeMillis();
this.ttlMillis = ttlMillis;
return oldValue;
}
public long getUpdateTime() {
return updateTime;
}
public int getLatestUpdateHash() {
return latestUpdateHash;
}
public long getHits() {
return hits.get();
}
public long getLastAccessTime() {
return lastAccessTime.get();
}
public void access() {
hits.incrementAndGet();
lastAccessTime.set(System.currentTimeMillis());
}
@Override
public int getFactoryId() {
return ReplicatedMapDataSerializerHook.F_ID;
}
@Override
public int getId() {
return ReplicatedMapDataSerializerHook.RECORD;
}
@Override
public void writeData(ObjectDataOutput out)
throws IOException {
out.writeObject(key);
out.writeObject(value);
vectorClock.writeData(out);
out.writeInt(latestUpdateHash);
out.writeLong(ttlMillis);
}
@Override
public void readData(ObjectDataInput in)
throws IOException {
key = in.readObject();
value = in.readObject();
vectorClock = new VectorClock();
vectorClock.readData(in);
latestUpdateHash = in.readInt();
ttlMillis = in.readLong();
}
//CHECKSTYLE:OFF
// Deactivated due to complexity of the equals method
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ReplicatedRecord that = (ReplicatedRecord) o;
if (latestUpdateHash != that.latestUpdateHash) {
return false;
}
if (ttlMillis != that.ttlMillis) {
return false;
}
if (key != null ? !key.equals(that.key) : that.key != null) {
return false;
}
if (value != null ? !value.equals(that.value) : that.value != null) {
return false;
}
return true;
}
//CHECKSTYLE:ON
@Override
public int hashCode() {
int result = key != null ? key.hashCode() : 0;
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + latestUpdateHash;
result = 31 * result + (int) (ttlMillis ^ (ttlMillis >>> 32));
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("ReplicatedRecord{");
sb.append("key=").append(key);
sb.append(", value=").append(value);
sb.append(", vector=").append(vectorClock);
sb.append(", latestUpdateHash=").append(latestUpdateHash);
sb.append(", ttlMillis=").append(ttlMillis);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_record_ReplicatedRecord.java
|
1,216 |
public class CeylonCompilationError implements Diagnostic<JavaFileObject> {
private final AnalysisMessage err;
private final IProject project;
private final JavaFileObject jf;
private final IFile file;
public CeylonCompilationError(IProject proj, AnalysisMessage error) {
err = error;
this.project = proj;
file = project.getFile(err.getTreeNode().getUnit().getFullPath());
jf = new JavaFileObject() {
@Override
public URI toUri() {
try {
return new URI(file.getFullPath().toString());
} catch (URISyntaxException e) {
e.printStackTrace();
}
return null;
}
@Override
public Writer openWriter() throws IOException {
return null;
}
@Override
public Reader openReader(boolean ignoreEncodingErrors) throws IOException {
return null;
}
@Override
public OutputStream openOutputStream() throws IOException {
return null;
}
@Override
public InputStream openInputStream() throws IOException {
return null;
}
@Override
public String getName() {
return file.getLocation().toOSString();
}
@Override
public long getLastModified() {
return file.getModificationStamp();
}
@Override
public CharSequence getCharContent(boolean ignoreEncodingErrors)
throws IOException {
return null;
}
@Override
public boolean delete() {
return false;
}
@Override
public boolean isNameCompatible(String simpleName, Kind kind) {
return false;
}
@Override
public NestingKind getNestingKind() {
return NestingKind.TOP_LEVEL;
}
@Override
public Kind getKind() {
return Kind.SOURCE;
}
@Override
public Modifier getAccessLevel() {
return Modifier.FINAL;
}
};
}
@Override
public javax.tools.Diagnostic.Kind getKind() {
return Diagnostic.Kind.ERROR;
}
@Override
public JavaFileObject getSource() {
return jf;
}
@Override
public long getPosition() {
return getStartPosition();
}
@Override
public long getStartPosition() {
int startOffset = 0;
Node errorNode = Nodes.getIdentifyingNode(err.getTreeNode());
if (errorNode == null) {
errorNode = err.getTreeNode();
}
Token token = errorNode.getToken();
if (token!=null) {
startOffset = errorNode.getStartIndex();
}
return startOffset;
}
@Override
public long getEndPosition() {
int endOffset = 0;
Node errorNode = Nodes.getIdentifyingNode(err.getTreeNode());
if (errorNode == null) {
errorNode = err.getTreeNode();
}
Token token = errorNode.getToken();
if (token!=null) {
endOffset = errorNode.getStopIndex()+1;
}
return endOffset;
}
@Override
public long getLineNumber() {
return err.getLine();
}
@Override
public long getColumnNumber() {
int startCol = 0;
Node errorNode = Nodes.getIdentifyingNode(err.getTreeNode());
if (errorNode == null) {
errorNode = err.getTreeNode();
}
Token token = errorNode.getToken();
if (token!=null) {
startCol = token.getCharPositionInLine();
}
return startCol;
}
@Override
public String getCode() {
return String.valueOf(err.getCode());
}
@Override
public String getMessage(Locale locale) {
return err.getMessage();
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_builder_CeylonCompilationError.java
|
317 |
@SuppressWarnings("serial")
public class OStorageConfiguration implements OSerializableStream {
public static final ORecordId CONFIG_RID = new OImmutableRecordId(0, OClusterPositionFactory.INSTANCE.valueOf(0));
public static final String DEFAULT_CHARSET = "UTF-8";
public static final int CURRENT_VERSION = 6;
public int version = -1;
public String name;
public String schemaRecordId;
public String dictionaryRecordId;
public String indexMgrRecordId;
private String localeLanguage = Locale.getDefault().getLanguage();
private String localeCountry = Locale.getDefault().getCountry();
public String dateFormat = "yyyy-MM-dd";
public String dateTimeFormat = "yyyy-MM-dd HH:mm:ss";
private TimeZone timeZone = TimeZone.getDefault();
private String charset = DEFAULT_CHARSET;
public OStorageSegmentConfiguration fileTemplate;
public List<OStorageClusterConfiguration> clusters = new ArrayList<OStorageClusterConfiguration>();
public List<OStorageDataConfiguration> dataSegments = new ArrayList<OStorageDataConfiguration>();
public OStorageTxConfiguration txSegment = new OStorageTxConfiguration();
public List<OStorageEntryConfiguration> properties = new ArrayList<OStorageEntryConfiguration>();
private transient Locale localeInstance;
private transient DecimalFormatSymbols unusualSymbols;
protected transient OStorage storage;
public OStorageConfiguration(final OStorage iStorage) {
storage = iStorage;
fileTemplate = new OStorageSegmentConfiguration();
}
/**
* This method load the record information by the internal cluster segment. It's for compatibility with older database than
* 0.9.25.
*
* @compatibility 0.9.25
* @return
* @throws OSerializationException
*/
public OStorageConfiguration load() throws OSerializationException {
final byte[] record = storage.readRecord(CONFIG_RID, null, false, null, false).getResult().buffer;
if (record == null)
throw new OStorageException("Cannot load database's configuration. The database seems to be corrupted.");
fromStream(record);
return this;
}
public void update() throws OSerializationException {
final byte[] record = toStream();
storage
.updateRecord(CONFIG_RID, record, OVersionFactory.instance().createUntrackedVersion(), ORecordBytes.RECORD_TYPE, 0, null);
}
public boolean isEmpty() {
return clusters.isEmpty();
}
public String getDirectory() {
return fileTemplate.location != null ? fileTemplate.getLocation() : ((OStorageLocalAbstract) storage).getStoragePath();
}
public Locale getLocaleInstance() {
if (localeInstance == null)
localeInstance = new Locale(localeLanguage, localeCountry);
return localeInstance;
}
public void resetLocaleInstance() {
localeInstance = null;
}
public SimpleDateFormat getDateFormatInstance() {
final SimpleDateFormat dateFormatInstance = new SimpleDateFormat(dateFormat);
dateFormatInstance.setLenient(false);
dateFormatInstance.setTimeZone(timeZone);
return dateFormatInstance;
}
public SimpleDateFormat getDateTimeFormatInstance() {
final SimpleDateFormat dateTimeFormatInstance = new SimpleDateFormat(dateTimeFormat);
dateTimeFormatInstance.setLenient(false);
dateTimeFormatInstance.setTimeZone(timeZone);
return dateTimeFormatInstance;
}
public DecimalFormatSymbols getUnusualSymbols() {
if (unusualSymbols == null)
unusualSymbols = new DecimalFormatSymbols(getLocaleInstance());
return unusualSymbols;
}
public OSerializableStream fromStream(final byte[] iStream) throws OSerializationException {
final String[] values = new String(iStream).split("\\|");
int index = 0;
version = Integer.parseInt(read(values[index++]));
name = read(values[index++]);
schemaRecordId = read(values[index++]);
dictionaryRecordId = read(values[index++]);
if (version > 0)
indexMgrRecordId = read(values[index++]);
else
// @COMPATIBILTY
indexMgrRecordId = null;
localeLanguage = read(values[index++]);
localeCountry = read(values[index++]);
dateFormat = read(values[index++]);
dateTimeFormat = read(values[index++]);
// @COMPATIBILTY 1.2.0
if (version >= 4) {
timeZone = TimeZone.getTimeZone(read(values[index++]));
charset = read(values[index++]);
}
// @COMPATIBILTY
if (version > 1)
index = phySegmentFromStream(values, index, fileTemplate);
int size = Integer.parseInt(read(values[index++]));
// PREPARE THE LIST OF CLUSTERS
clusters = new ArrayList<OStorageClusterConfiguration>(size);
for (int i = 0; i < size; ++i) {
final int clusterId = Integer.parseInt(read(values[index++]));
if (clusterId == -1)
continue;
final String clusterName = read(values[index++]);
final int targetDataSegmentId = version >= 3 ? Integer.parseInt(read(values[index++])) : 0;
final String clusterType = read(values[index++]);
final OStorageClusterConfiguration currentCluster;
if (clusterType.equals("p")) {
// PHYSICAL CLUSTER
final OStoragePhysicalClusterConfigurationLocal phyClusterLocal = new OStoragePhysicalClusterConfigurationLocal(this,
clusterId, targetDataSegmentId);
phyClusterLocal.name = clusterName;
index = phySegmentFromStream(values, index, phyClusterLocal);
final String holeFlag;
if (version > 4) {
holeFlag = read(values[index++]);
} else {
holeFlag = "f";
}
if (holeFlag.equals("f"))
phyClusterLocal.setHoleFile(new OStorageClusterHoleConfiguration(phyClusterLocal, read(values[index++]),
read(values[index++]), read(values[index++])));
currentCluster = phyClusterLocal;
} else if (clusterType.equals("m"))
// MEMORY CLUSTER
currentCluster = new OStorageMemoryClusterConfiguration(clusterName, clusterId, targetDataSegmentId);
else if (clusterType.equals("d")) {
currentCluster = new OStoragePaginatedClusterConfiguration(this, clusterId, clusterName, null,
Boolean.valueOf(read(values[index++])), Float.valueOf(read(values[index++])), Float.valueOf(read(values[index++])),
read(values[index++]));
} else
throw new IllegalArgumentException("Unsupported cluster type: " + clusterType);
// MAKE ROOMS, EVENTUALLY FILLING EMPTIES ENTRIES
for (int c = clusters.size(); c <= clusterId; ++c)
clusters.add(null);
clusters.set(clusterId, currentCluster);
}
// PREPARE THE LIST OF DATA SEGS
size = Integer.parseInt(read(values[index++]));
dataSegments = new ArrayList<OStorageDataConfiguration>(size);
for (int i = 0; i < size; ++i)
dataSegments.add(null);
int dataId;
String dataName;
OStorageDataConfiguration data;
for (int i = 0; i < size; ++i) {
dataId = Integer.parseInt(read(values[index++]));
if (dataId == -1)
continue;
dataName = read(values[index++]);
data = new OStorageDataConfiguration(this, dataName, dataId);
index = phySegmentFromStream(values, index, data);
data.holeFile = new OStorageDataHoleConfiguration(data, read(values[index++]), read(values[index++]), read(values[index++]));
dataSegments.set(dataId, data);
}
txSegment = new OStorageTxConfiguration(read(values[index++]), read(values[index++]), read(values[index++]),
read(values[index++]), read(values[index++]));
size = Integer.parseInt(read(values[index++]));
properties = new ArrayList<OStorageEntryConfiguration>(size);
for (int i = 0; i < size; ++i) {
properties.add(new OStorageEntryConfiguration(read(values[index++]), read(values[index++])));
}
return this;
}
public byte[] toStream() throws OSerializationException {
final StringBuilder buffer = new StringBuilder();
write(buffer, CURRENT_VERSION);
write(buffer, name);
write(buffer, schemaRecordId);
write(buffer, dictionaryRecordId);
write(buffer, indexMgrRecordId);
write(buffer, localeLanguage);
write(buffer, localeCountry);
write(buffer, dateFormat);
write(buffer, dateTimeFormat);
write(buffer, timeZone.getID());
write(buffer, charset);
phySegmentToStream(buffer, fileTemplate);
write(buffer, clusters.size());
for (OStorageClusterConfiguration c : clusters) {
if (c == null) {
write(buffer, -1);
continue;
}
write(buffer, c.getId());
write(buffer, c.getName());
write(buffer, c.getDataSegmentId());
if (c instanceof OStoragePhysicalClusterConfigurationLocal) {
// PHYSICAL
write(buffer, "p");
phySegmentToStream(buffer, (OStoragePhysicalClusterConfigurationLocal) c);
OStorageFileConfiguration holeFile = ((OStoragePhysicalClusterConfigurationLocal) c).getHoleFile();
if (holeFile == null)
write(buffer, "e");
else
write(buffer, "f");
if (holeFile != null)
fileToStream(buffer, holeFile);
} else if (c instanceof OStorageMemoryClusterConfiguration) {
// MEMORY
write(buffer, "m");
} else if (c instanceof OStorageEHClusterConfiguration) {
write(buffer, "h");
} else if (c instanceof OStoragePaginatedClusterConfiguration) {
write(buffer, "d");
final OStoragePaginatedClusterConfiguration paginatedClusterConfiguration = (OStoragePaginatedClusterConfiguration) c;
write(buffer, paginatedClusterConfiguration.useWal);
write(buffer, paginatedClusterConfiguration.recordOverflowGrowFactor);
write(buffer, paginatedClusterConfiguration.recordGrowFactor);
write(buffer, paginatedClusterConfiguration.compression);
}
}
write(buffer, dataSegments.size());
for (OStorageDataConfiguration d : dataSegments) {
if (d == null) {
write(buffer, -1);
continue;
}
write(buffer, d.id);
write(buffer, d.name);
phySegmentToStream(buffer, d);
fileToStream(buffer, d.holeFile);
}
fileToStream(buffer, txSegment);
write(buffer, txSegment.isSynchRecord());
write(buffer, txSegment.isSynchTx());
write(buffer, properties.size());
for (OStorageEntryConfiguration e : properties)
entryToStream(buffer, e);
// PLAIN: ALLOCATE ENOUGHT SPACE TO REUSE IT EVERY TIME
buffer.append("|");
return buffer.toString().getBytes();
}
private int phySegmentFromStream(final String[] values, int index, final OStorageSegmentConfiguration iSegment) {
iSegment.location = version > 2 ? read(values[index++]) : null;
iSegment.maxSize = read(values[index++]);
iSegment.fileType = read(values[index++]);
iSegment.fileStartSize = read(values[index++]);
iSegment.fileMaxSize = read(values[index++]);
iSegment.fileIncrementSize = read(values[index++]);
iSegment.defrag = read(values[index++]);
final int size = Integer.parseInt(read(values[index++]));
iSegment.infoFiles = new OStorageFileConfiguration[size];
String fileName;
for (int i = 0; i < size; ++i) {
fileName = read(values[index++]);
if (!fileName.contains("$")) {
// @COMPATIBILITY 0.9.25
int pos = fileName.indexOf("/databases");
if (pos > -1) {
fileName = "${" + Orient.ORIENTDB_HOME + "}" + fileName.substring(pos);
}
}
iSegment.infoFiles[i] = new OStorageFileConfiguration(iSegment, fileName, read(values[index++]), read(values[index++]),
iSegment.fileIncrementSize);
}
return index;
}
private void phySegmentToStream(final StringBuilder iBuffer, final OStorageSegmentConfiguration iSegment) {
write(iBuffer, iSegment.location);
write(iBuffer, iSegment.maxSize);
write(iBuffer, iSegment.fileType);
write(iBuffer, iSegment.fileStartSize);
write(iBuffer, iSegment.fileMaxSize);
write(iBuffer, iSegment.fileIncrementSize);
write(iBuffer, iSegment.defrag);
write(iBuffer, iSegment.infoFiles.length);
for (OStorageFileConfiguration f : iSegment.infoFiles)
fileToStream(iBuffer, f);
}
private void fileToStream(final StringBuilder iBuffer, final OStorageFileConfiguration iFile) {
write(iBuffer, iFile.path);
write(iBuffer, iFile.type);
write(iBuffer, iFile.maxSize);
}
private void entryToStream(final StringBuilder iBuffer, final OStorageEntryConfiguration iEntry) {
write(iBuffer, iEntry.name);
write(iBuffer, iEntry.value);
}
private String read(final String iValue) {
if (iValue.equals(" "))
return null;
return iValue;
}
private void write(final StringBuilder iBuffer, final Object iValue) {
if (iBuffer.length() > 0)
iBuffer.append('|');
iBuffer.append(iValue != null ? iValue.toString() : ' ');
}
public void create() throws IOException {
storage.createRecord(0, CONFIG_RID, new byte[] { 0, 0, 0, 0 }, OVersionFactory.instance().createVersion(),
ORecordBytes.RECORD_TYPE, (byte) 0, null);
}
public void synch() throws IOException {
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
}
public void close() throws IOException {
}
public void setCluster(final OStorageClusterConfiguration config) {
while (config.getId() >= clusters.size())
clusters.add(null);
clusters.set(config.getId(), config);
}
public void dropCluster(final int iClusterId) {
if (iClusterId < clusters.size()) {
clusters.set(iClusterId, null);
update();
}
}
public void dropDataSegment(final int iId) {
if (iId < dataSegments.size()) {
dataSegments.set(iId, null);
update();
}
}
public TimeZone getTimeZone() {
return timeZone;
}
public void setTimeZone(final TimeZone timeZone) {
this.timeZone = timeZone;
}
public String getLocaleLanguage() {
return localeLanguage;
}
public String getLocaleCountry() {
return localeCountry;
}
public String getCharset() {
return charset;
}
public void setCharset(String charset) {
this.charset = charset;
}
public void setLocaleLanguage(final String iValue) {
localeLanguage = iValue;
localeInstance = null;
}
public void setLocaleCountry(final String iValue) {
localeCountry = iValue;
localeInstance = null;
}
public String getDateFormat() {
return dateFormat;
}
public String getDateTimeFormat() {
return dateTimeFormat;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_config_OStorageConfiguration.java
|
1,542 |
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, Text> {
private boolean isVertex;
private final Text textWritable = new Text();
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.outputs = new SafeMapperOutputs(context);
if (!context.getConfiguration().getBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_PATHS, false))
throw new IllegalStateException(PathMap.class.getSimpleName() + " requires that paths be enabled");
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex && value.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : value.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : edge.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_PathMap.java
|
322 |
static class SimpleMapStore implements MapStore<String, String>, MapLoader<String, String> {
public static final int MAX_KEYS = 30;
public static final int DELAY_SECONDS_PER_KEY = 1;
@Override
public String load(String key) {
sleepSeconds(DELAY_SECONDS_PER_KEY);
return key + "value";
}
@Override
public Map<String, String> loadAll(Collection<String> keys) {
Map<String, String> map = new HashMap<String, String>();
for (String key : keys) {
map.put(key, load(key));
}
return map;
}
@Override
public Set<String> loadAllKeys() {
Set<String> keys = new HashSet<String>();
for (int k = 0; k < MAX_KEYS; k++) { keys.add("key" + k); }
return keys;
}
@Override
public void delete(String key) {
sleepSeconds(DELAY_SECONDS_PER_KEY);
}
@Override
public void deleteAll(Collection<String> keys) {
for (String key : keys) {
delete(key);
}
}
@Override
public void store(String key, String value) {
sleepSeconds(DELAY_SECONDS_PER_KEY);
}
@Override
public void storeAll(Map<String, String> entries) {
for (Map.Entry<String, String> e : entries.entrySet()) {
store(e.getKey(), e.getValue());
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapStoreTest.java
|
5,469 |
public class SearchPhaseController extends AbstractComponent {
public static Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>> QUERY_RESULT_ORDERING = new Comparator<AtomicArray.Entry<? extends QuerySearchResultProvider>>() {
@Override
public int compare(AtomicArray.Entry<? extends QuerySearchResultProvider> o1, AtomicArray.Entry<? extends QuerySearchResultProvider> o2) {
int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index());
if (i == 0) {
i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId();
}
return i;
}
};
public static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
private final CacheRecycler cacheRecycler;
private final boolean optimizeSingleShard;
@Inject
public SearchPhaseController(Settings settings, CacheRecycler cacheRecycler) {
super(settings);
this.cacheRecycler = cacheRecycler;
this.optimizeSingleShard = componentSettings.getAsBoolean("optimize_single_shard", true);
}
public boolean optimizeSingleShard() {
return optimizeSingleShard;
}
public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap();
ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
long aggMaxDoc = 0;
for (AtomicArray.Entry<DfsSearchResult> lEntry : results.asList()) {
final Term[] terms = lEntry.value.terms();
final TermStatistics[] stats = lEntry.value.termStatistics();
assert terms.length == stats.length;
for (int i = 0; i < terms.length; i++) {
assert terms[i] != null;
TermStatistics existing = termStatistics.get(terms[i]);
if (existing != null) {
assert terms[i].bytes().equals(existing.term());
// totalTermFrequency is an optional statistic we need to check if either one or both
// are set to -1 which means not present and then set it globally to -1
termStatistics.put(terms[i], new TermStatistics(existing.term(),
existing.docFreq() + stats[i].docFreq(),
optionalSum(existing.totalTermFreq(), stats[i].totalTermFreq())));
} else {
termStatistics.put(terms[i], stats[i]);
}
}
final boolean[] states = lEntry.value.fieldStatistics().allocated;
final Object[] keys = lEntry.value.fieldStatistics().keys;
final Object[] values = lEntry.value.fieldStatistics().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
String key = (String) keys[i];
CollectionStatistics value = (CollectionStatistics) values[i];
assert key != null;
CollectionStatistics existing = fieldStatistics.get(key);
if (existing != null) {
CollectionStatistics merged = new CollectionStatistics(
key, existing.maxDoc() + value.maxDoc(),
optionalSum(existing.docCount(), value.docCount()),
optionalSum(existing.sumTotalTermFreq(), value.sumTotalTermFreq()),
optionalSum(existing.sumDocFreq(), value.sumDocFreq())
);
fieldStatistics.put(key, merged);
} else {
fieldStatistics.put(key, value);
}
}
}
aggMaxDoc += lEntry.value.maxDoc();
}
return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc);
}
private static long optionalSum(long left, long right) {
return Math.min(left, right) == -1 ? -1 : left + right;
}
public ScoreDoc[] sortDocs(AtomicArray<? extends QuerySearchResultProvider> resultsArr) {
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results = resultsArr.asList();
if (results.isEmpty()) {
return EMPTY_DOCS;
}
if (optimizeSingleShard) {
boolean canOptimize = false;
QuerySearchResult result = null;
int shardIndex = -1;
if (results.size() == 1) {
canOptimize = true;
result = results.get(0).value.queryResult();
shardIndex = results.get(0).index;
} else {
// lets see if we only got hits from a single shard, if so, we can optimize...
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
if (entry.value.queryResult().topDocs().scoreDocs.length > 0) {
if (result != null) { // we already have one, can't really optimize
canOptimize = false;
break;
}
canOptimize = true;
result = entry.value.queryResult();
shardIndex = entry.index;
}
}
}
if (canOptimize) {
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
if (scoreDocs.length < result.from()) {
return EMPTY_DOCS;
}
int resultDocsSize = result.size();
if ((scoreDocs.length - result.from()) < resultDocsSize) {
resultDocsSize = scoreDocs.length - result.from();
}
if (result.topDocs() instanceof TopFieldDocs) {
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
for (int i = 0; i < resultDocsSize; i++) {
ScoreDoc scoreDoc = scoreDocs[result.from() + i];
scoreDoc.shardIndex = shardIndex;
docs[i] = scoreDoc;
}
return docs;
} else {
ScoreDoc[] docs = new ScoreDoc[resultDocsSize];
for (int i = 0; i < resultDocsSize; i++) {
ScoreDoc scoreDoc = scoreDocs[result.from() + i];
scoreDoc.shardIndex = shardIndex;
docs[i] = scoreDoc;
}
return docs;
}
}
}
@SuppressWarnings("unchecked")
AtomicArray.Entry<? extends QuerySearchResultProvider>[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]);
Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
QuerySearchResultProvider firstResult = sortedResults[0].value;
int totalNumDocs = 0;
int queueSize = firstResult.queryResult().from() + firstResult.queryResult().size();
if (firstResult.includeFetch()) {
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
// this is also important since we shortcut and fetch only docs from "from" and up to "size"
queueSize *= sortedResults.length;
}
// we don't use TopDocs#merge here because with TopDocs#merge, when pagination, we need to ask for "from + size" topN
// hits, which ends up creating a "from + size" ScoreDoc[], while in our implementation, we can actually get away with
// just create "size" ScoreDoc (the reverse order in the queue). would be nice to improve TopDocs#merge to allow for
// it in which case we won't need this logic...
PriorityQueue queue;
if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) {
// sorting, first if the type is a String, chance CUSTOM to STRING so we handle nulls properly (since our CUSTOM String sorting might return null)
TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
for (int i = 0; i < fieldDocs.fields.length; i++) {
boolean allValuesAreNull = true;
boolean resolvedField = false;
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
for (ScoreDoc doc : entry.value.queryResult().topDocs().scoreDocs) {
FieldDoc fDoc = (FieldDoc) doc;
if (fDoc.fields[i] != null) {
allValuesAreNull = false;
if (fDoc.fields[i] instanceof String) {
fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.Type.STRING, fieldDocs.fields[i].getReverse());
}
resolvedField = true;
break;
}
}
if (resolvedField) {
break;
}
}
if (!resolvedField && allValuesAreNull && fieldDocs.fields[i].getField() != null) {
// we did not manage to resolve a field (and its not score or doc, which have no field), and all the fields are null (which can only happen for STRING), make it a STRING
fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.Type.STRING, fieldDocs.fields[i].getReverse());
}
}
queue = new ShardFieldDocSortedHitQueue(fieldDocs.fields, queueSize);
// we need to accumulate for all and then filter the from
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
QuerySearchResult result = entry.value.queryResult();
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
totalNumDocs += scoreDocs.length;
for (ScoreDoc doc : scoreDocs) {
doc.shardIndex = entry.index;
if (queue.insertWithOverflow(doc) == doc) {
// filled the queue, break
break;
}
}
}
} else {
queue = new ScoreDocQueue(queueSize); // we need to accumulate for all and then filter the from
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : sortedResults) {
QuerySearchResult result = entry.value.queryResult();
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
totalNumDocs += scoreDocs.length;
for (ScoreDoc doc : scoreDocs) {
doc.shardIndex = entry.index;
if (queue.insertWithOverflow(doc) == doc) {
// filled the queue, break
break;
}
}
}
}
int resultDocsSize = firstResult.queryResult().size();
if (firstResult.includeFetch()) {
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
resultDocsSize *= sortedResults.length;
}
if (totalNumDocs < queueSize) {
resultDocsSize = totalNumDocs - firstResult.queryResult().from();
}
if (resultDocsSize <= 0) {
return EMPTY_DOCS;
}
// we only pop the first, this handles "from" nicely since the "from" are down the queue
// that we already fetched, so we are actually popping the "from" and up to "size"
ScoreDoc[] shardDocs = new ScoreDoc[resultDocsSize];
for (int i = resultDocsSize - 1; i >= 0; i--) // put docs in array
shardDocs[i] = (ScoreDoc) queue.pop();
return shardDocs;
}
/**
* Builds an array, with potential null elements, with docs to load.
*/
public void fillDocIdsToLoad(AtomicArray<IntArrayList> docsIdsToLoad, ScoreDoc[] shardDocs) {
for (ScoreDoc shardDoc : shardDocs) {
IntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex);
if (list == null) {
list = new IntArrayList(); // can't be shared!, uses unsafe on it later on
docsIdsToLoad.set(shardDoc.shardIndex, list);
}
list.add(shardDoc.doc);
}
}
public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray<? extends QuerySearchResultProvider> queryResultsArr, AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults = queryResultsArr.asList();
List<? extends AtomicArray.Entry<? extends FetchSearchResultProvider>> fetchResults = fetchResultsArr.asList();
if (queryResults.isEmpty()) {
return InternalSearchResponse.EMPTY;
}
QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
boolean sorted = false;
int sortScoreIndex = -1;
if (firstResult.topDocs() instanceof TopFieldDocs) {
sorted = true;
TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
for (int i = 0; i < fieldDocs.fields.length; i++) {
if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) {
sortScoreIndex = i;
}
}
}
// merge facets
InternalFacets facets = null;
if (!queryResults.isEmpty()) {
// we rely on the fact that the order of facets is the same on all query results
if (firstResult.facets() != null && firstResult.facets().facets() != null && !firstResult.facets().facets().isEmpty()) {
List<Facet> aggregatedFacets = Lists.newArrayList();
List<Facet> namedFacets = Lists.newArrayList();
for (Facet facet : firstResult.facets()) {
// aggregate each facet name into a single list, and aggregate it
namedFacets.clear();
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
for (Facet facet1 : entry.value.queryResult().facets()) {
if (facet.getName().equals(facet1.getName())) {
namedFacets.add(facet1);
}
}
}
if (!namedFacets.isEmpty()) {
Facet aggregatedFacet = ((InternalFacet) namedFacets.get(0)).reduce(new InternalFacet.ReduceContext(cacheRecycler, namedFacets));
aggregatedFacets.add(aggregatedFacet);
}
}
facets = new InternalFacets(aggregatedFacets);
}
}
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
long totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
boolean timedOut = false;
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
QuerySearchResult result = entry.value.queryResult();
if (result.searchTimedOut()) {
timedOut = true;
}
totalHits += result.topDocs().totalHits;
if (!Float.isNaN(result.topDocs().getMaxScore())) {
maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
}
}
if (Float.isInfinite(maxScore)) {
maxScore = Float.NaN;
}
// clean the fetch counter
for (AtomicArray.Entry<? extends FetchSearchResultProvider> entry : fetchResults) {
entry.value.fetchResult().initCounter();
}
// merge hits
List<InternalSearchHit> hits = new ArrayList<InternalSearchHit>();
if (!fetchResults.isEmpty()) {
for (ScoreDoc shardDoc : sortedDocs) {
FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
if (fetchResultProvider == null) {
continue;
}
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
int index = fetchResult.counterGetAndIncrement();
if (index < fetchResult.hits().internalHits().length) {
InternalSearchHit searchHit = fetchResult.hits().internalHits()[index];
searchHit.score(shardDoc.score);
searchHit.shard(fetchResult.shardTarget());
if (sorted) {
FieldDoc fieldDoc = (FieldDoc) shardDoc;
searchHit.sortValues(fieldDoc.fields);
if (sortScoreIndex != -1) {
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
}
}
hits.add(searchHit);
}
}
}
// merge suggest results
Suggest suggest = null;
if (!queryResults.isEmpty()) {
final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<String, List<Suggest.Suggestion>>();
boolean hasSuggestions = false;
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
Suggest shardResult = entry.value.queryResult().queryResult().suggest();
if (shardResult == null) {
continue;
}
hasSuggestions = true;
Suggest.group(groupedSuggestions, shardResult);
}
suggest = hasSuggestions ? new Suggest(Suggest.Fields.SUGGEST, Suggest.reduce(groupedSuggestions)) : null;
}
// merge addAggregation
InternalAggregations aggregations = null;
if (!queryResults.isEmpty()) {
if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(queryResults.size());
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
}
aggregations = InternalAggregations.reduce(aggregationsList, cacheRecycler);
}
}
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
return new InternalSearchResponse(searchHits, facets, aggregations, suggest, timedOut);
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_controller_SearchPhaseController.java
|
4,158 |
public class InternalIndexShard extends AbstractIndexShardComponent implements IndexShard {
private final ThreadPool threadPool;
private final IndexSettingsService indexSettingsService;
private final MapperService mapperService;
private final IndexQueryParserService queryParserService;
private final IndexCache indexCache;
private final InternalIndicesLifecycle indicesLifecycle;
private final Store store;
private final MergeSchedulerProvider mergeScheduler;
private final Engine engine;
private final Translog translog;
private final IndexAliasesService indexAliasesService;
private final ShardIndexingService indexingService;
private final ShardSearchService searchService;
private final ShardGetService getService;
private final ShardIndexWarmerService shardWarmerService;
private final ShardFilterCache shardFilterCache;
private final ShardIdCache shardIdCache;
private final ShardFieldData shardFieldData;
private final PercolatorQueriesRegistry percolatorQueriesRegistry;
private final ShardPercolateService shardPercolateService;
private final CodecService codecService;
private final ShardTermVectorService termVectorService;
private final IndexFieldDataService indexFieldDataService;
private final IndexService indexService;
private final Object mutex = new Object();
private final String checkIndexOnStartup;
private long checkIndexTook = 0;
private volatile IndexShardState state;
private TimeValue refreshInterval;
private final TimeValue mergeInterval;
private volatile ScheduledFuture refreshScheduledFuture;
private volatile ScheduledFuture mergeScheduleFuture;
private volatile ShardRouting shardRouting;
private RecoveryStatus peerRecoveryStatus;
private ApplyRefreshSettings applyRefreshSettings = new ApplyRefreshSettings();
private final MeanMetric refreshMetric = new MeanMetric();
private final MeanMetric flushMetric = new MeanMetric();
@Inject
public InternalIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, Engine engine, MergeSchedulerProvider mergeScheduler, Translog translog,
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, ShardIndexWarmerService shardWarmerService,
ShardFilterCache shardFilterCache, ShardIdCache shardIdCache, ShardFieldData shardFieldData,
PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService,
ShardTermVectorService termVectorService, IndexFieldDataService indexFieldDataService, IndexService indexService) {
super(shardId, indexSettings);
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
this.indexSettingsService = indexSettingsService;
this.store = store;
this.engine = engine;
this.mergeScheduler = mergeScheduler;
this.translog = translog;
this.threadPool = threadPool;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.indexCache = indexCache;
this.indexAliasesService = indexAliasesService;
this.indexingService = indexingService;
this.getService = getService.setIndexShard(this);
this.termVectorService = termVectorService.setIndexShard(this);
this.searchService = searchService;
this.shardWarmerService = shardWarmerService;
this.shardFilterCache = shardFilterCache;
this.shardIdCache = shardIdCache;
this.shardFieldData = shardFieldData;
this.percolatorQueriesRegistry = percolatorQueriesRegistry;
this.shardPercolateService = shardPercolateService;
this.indexFieldDataService = indexFieldDataService;
this.indexService = indexService;
this.codecService = codecService;
state = IndexShardState.CREATED;
this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, engine.defaultRefreshInterval());
this.mergeInterval = indexSettings.getAsTime("index.merge.async_interval", TimeValue.timeValueSeconds(1));
indexSettingsService.addListener(applyRefreshSettings);
logger.debug("state: [CREATED]");
this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false");
}
public MergeSchedulerProvider mergeScheduler() {
return this.mergeScheduler;
}
public Store store() {
return this.store;
}
public Engine engine() {
return engine;
}
public Translog translog() {
return translog;
}
public ShardIndexingService indexingService() {
return this.indexingService;
}
@Override
public ShardGetService getService() {
return this.getService;
}
@Override
public ShardTermVectorService termVectorService() {
return termVectorService;
}
@Override
public IndexFieldDataService indexFieldDataService() {
return indexFieldDataService;
}
@Override
public MapperService mapperService() {
return mapperService;
}
@Override
public IndexService indexService() {
return indexService;
}
@Override
public ShardSearchService searchService() {
return this.searchService;
}
@Override
public ShardIndexWarmerService warmerService() {
return this.shardWarmerService;
}
@Override
public ShardFilterCache filterCache() {
return this.shardFilterCache;
}
@Override
public ShardIdCache idCache() {
return this.shardIdCache;
}
@Override
public ShardFieldData fieldData() {
return this.shardFieldData;
}
@Override
public ShardRouting routingEntry() {
return this.shardRouting;
}
public InternalIndexShard routingEntry(ShardRouting newRouting) {
ShardRouting currentRouting = this.shardRouting;
if (!newRouting.shardId().equals(shardId())) {
throw new ElasticsearchIllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]");
}
if (currentRouting != null) {
if (!newRouting.primary() && currentRouting.primary()) {
logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
}
// if its the same routing, return
if (currentRouting.equals(newRouting)) {
return this;
}
}
if (state == IndexShardState.POST_RECOVERY) {
// if the state is started or relocating (cause it might move right away from started to relocating)
// then move to STARTED
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
// we want to refresh *before* we move to internal STARTED state
try {
engine.refresh(new Engine.Refresh("cluster_state_started").force(true));
} catch (Throwable t) {
logger.debug("failed to refresh due to move to cluster wide started", t);
}
boolean movedToStarted = false;
synchronized (mutex) {
// do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
if (state == IndexShardState.POST_RECOVERY) {
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
movedToStarted = true;
} else {
logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
}
}
if (movedToStarted) {
indicesLifecycle.afterIndexShardStarted(this);
}
}
}
this.shardRouting = newRouting;
indicesLifecycle.shardRoutingChanged(this, currentRouting, newRouting);
return this;
}
/**
* Marks the shard as recovering, fails with exception is recovering is not allowed to be set.
*/
public IndexShardState recovering(String reason) throws IndexShardStartedException,
IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
if (state == IndexShardState.RELOCATED) {
throw new IndexShardRelocatedException(shardId);
}
if (state == IndexShardState.RECOVERING) {
throw new IndexShardRecoveringException(shardId);
}
if (state == IndexShardState.POST_RECOVERY) {
throw new IndexShardRecoveringException(shardId);
}
return changeState(IndexShardState.RECOVERING, reason);
}
}
public InternalIndexShard relocated(String reason) throws IndexShardNotStartedException {
synchronized (mutex) {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
changeState(IndexShardState.RELOCATED, reason);
}
return this;
}
@Override
public IndexShardState state() {
return state;
}
/**
* Changes the state of the current shard
*
* @param newState the new shard state
* @param reason the reason for the state change
* @return the previous shard state
*/
private IndexShardState changeState(IndexShardState newState, String reason) {
logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason);
IndexShardState previousState = state;
state = newState;
this.indicesLifecycle.indexShardStateChanged(this, previousState, reason);
return previousState;
}
@Override
public Engine.Create prepareCreate(SourceToParse source) throws ElasticsearchException {
long startTime = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(source.type());
ParsedDocument doc = docMapper.parse(source);
return new Engine.Create(docMapper, docMapper.uidMapper().term(doc.uid().stringValue()), doc).startTime(startTime);
}
@Override
public ParsedDocument create(Engine.Create create) throws ElasticsearchException {
writeAllowed(create.origin());
create = indexingService.preCreate(create);
if (logger.isTraceEnabled()) {
logger.trace("index {}", create.docs());
}
engine.create(create);
create.endTime(System.nanoTime());
indexingService.postCreate(create);
return create.parsedDoc();
}
@Override
public Engine.Index prepareIndex(SourceToParse source) throws ElasticsearchException {
long startTime = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(source.type());
ParsedDocument doc = docMapper.parse(source);
return new Engine.Index(docMapper, docMapper.uidMapper().term(doc.uid().stringValue()), doc).startTime(startTime);
}
@Override
public ParsedDocument index(Engine.Index index) throws ElasticsearchException {
writeAllowed(index.origin());
index = indexingService.preIndex(index);
try {
if (logger.isTraceEnabled()) {
logger.trace("index {}", index.docs());
}
engine.index(index);
index.endTime(System.nanoTime());
} catch (RuntimeException ex) {
indexingService.failedIndex(index);
throw ex;
}
indexingService.postIndex(index);
return index.parsedDoc();
}
@Override
public Engine.Delete prepareDelete(String type, String id, long version) throws ElasticsearchException {
long startTime = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapperWithAutoCreate(type);
return new Engine.Delete(type, id, docMapper.uidMapper().term(type, id)).version(version).startTime(startTime);
}
@Override
public void delete(Engine.Delete delete) throws ElasticsearchException {
writeAllowed(delete.origin());
delete = indexingService.preDelete(delete);
try {
if (logger.isTraceEnabled()) {
logger.trace("delete [{}]", delete.uid().text());
}
engine.delete(delete);
delete.endTime(System.nanoTime());
} catch (RuntimeException ex) {
indexingService.failedDelete(delete);
throw ex;
}
indexingService.postDelete(delete);
}
@Override
public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, String... types) throws ElasticsearchException {
long startTime = System.nanoTime();
if (types == null) {
types = Strings.EMPTY_ARRAY;
}
Query query = queryParserService.parseQuery(source).query();
query = filterQueryIfNeeded(query, types);
Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases);
Filter parentFilter = mapperService.hasNested() ? indexCache.filter().cache(NonNestedDocsFilter.INSTANCE) : null;
return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, types).startTime(startTime);
}
@Override
public void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticsearchException {
writeAllowed(deleteByQuery.origin());
if (logger.isTraceEnabled()) {
logger.trace("delete_by_query [{}]", deleteByQuery.query());
}
deleteByQuery = indexingService.preDeleteByQuery(deleteByQuery);
engine.delete(deleteByQuery);
deleteByQuery.endTime(System.nanoTime());
indexingService.postDeleteByQuery(deleteByQuery);
}
@Override
public Engine.GetResult get(Engine.Get get) throws ElasticsearchException {
readAllowed();
return engine.get(get);
}
@Override
public void refresh(Engine.Refresh refresh) throws ElasticsearchException {
verifyNotClosed();
if (logger.isTraceEnabled()) {
logger.trace("refresh with {}", refresh);
}
long time = System.nanoTime();
engine.refresh(refresh);
refreshMetric.inc(System.nanoTime() - time);
}
@Override
public RefreshStats refreshStats() {
return new RefreshStats(refreshMetric.count(), TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()));
}
@Override
public FlushStats flushStats() {
return new FlushStats(flushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum()));
}
@Override
public DocsStats docStats() {
final Engine.Searcher searcher = acquireSearcher("doc_stats");
try {
return new DocsStats(searcher.reader().numDocs(), searcher.reader().numDeletedDocs());
} finally {
searcher.release();
}
}
@Override
public IndexingStats indexingStats(String... types) {
return indexingService.stats(types);
}
@Override
public SearchStats searchStats(String... groups) {
return searchService.stats(groups);
}
@Override
public GetStats getStats() {
return getService.stats();
}
@Override
public StoreStats storeStats() {
try {
return store.stats();
} catch (IOException e) {
throw new ElasticsearchException("io exception while building 'store stats'", e);
}
}
@Override
public MergeStats mergeStats() {
return mergeScheduler.stats();
}
@Override
public SegmentsStats segmentStats() {
return engine.segmentsStats();
}
@Override
public WarmerStats warmerStats() {
return shardWarmerService.stats();
}
@Override
public FilterCacheStats filterCacheStats() {
return shardFilterCache.stats();
}
@Override
public FieldDataStats fieldDataStats(String... fields) {
return shardFieldData.stats(fields);
}
@Override
public PercolatorQueriesRegistry percolateRegistry() {
return percolatorQueriesRegistry;
}
@Override
public ShardPercolateService shardPercolateService() {
return shardPercolateService;
}
@Override
public IdCacheStats idCacheStats() {
return shardIdCache.stats();
}
@Override
public TranslogStats translogStats() {
return translog.stats();
}
@Override
public CompletionStats completionStats(String... fields) {
CompletionStats completionStats = new CompletionStats();
final Engine.Searcher currentSearcher = acquireSearcher("completion_stats");
try {
PostingsFormat postingsFormat = this.codecService.postingsFormatService().get(Completion090PostingsFormat.CODEC_NAME).get();
if (postingsFormat instanceof Completion090PostingsFormat) {
Completion090PostingsFormat completionPostingsFormat = (Completion090PostingsFormat) postingsFormat;
completionStats.add(completionPostingsFormat.completionStats(currentSearcher.reader(), fields));
}
} finally {
currentSearcher.release();
}
return completionStats;
}
@Override
public void flush(Engine.Flush flush) throws ElasticsearchException {
// we allows flush while recovering, since we allow for operations to happen
// while recovering, and we want to keep the translog at bay (up to deletes, which
// we don't gc).
verifyStartedOrRecovering();
if (logger.isTraceEnabled()) {
logger.trace("flush with {}", flush);
}
long time = System.nanoTime();
engine.flush(flush);
flushMetric.inc(System.nanoTime() - time);
}
@Override
public void optimize(Engine.Optimize optimize) throws ElasticsearchException {
verifyStarted();
if (logger.isTraceEnabled()) {
logger.trace("optimize with {}", optimize);
}
engine.optimize(optimize);
}
@Override
public <T> T snapshot(Engine.SnapshotHandler<T> snapshotHandler) throws EngineException {
IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.POST_RECOVERY || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
return engine.snapshot(snapshotHandler);
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
@Override
public SnapshotIndexCommit snapshotIndex() throws EngineException {
IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
return engine.snapshotIndex();
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
@Override
public void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException {
verifyStarted();
engine.recover(recoveryHandler);
}
@Override
public Engine.Searcher acquireSearcher(String source) {
return acquireSearcher(source, Mode.READ);
}
@Override
public Engine.Searcher acquireSearcher(String source, Mode mode) {
readAllowed(mode);
return engine.acquireSearcher(source);
}
public void close(String reason) {
synchronized (mutex) {
indexSettingsService.removeListener(applyRefreshSettings);
if (state != IndexShardState.CLOSED) {
if (refreshScheduledFuture != null) {
refreshScheduledFuture.cancel(true);
refreshScheduledFuture = null;
}
if (mergeScheduleFuture != null) {
mergeScheduleFuture.cancel(true);
mergeScheduleFuture = null;
}
}
changeState(IndexShardState.CLOSED, reason);
}
}
public long checkIndexTook() {
return this.checkIndexTook;
}
public InternalIndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
if (state == IndexShardState.RELOCATED) {
throw new IndexShardRelocatedException(shardId);
}
if (Booleans.parseBoolean(checkIndexOnStartup, false)) {
checkIndex(true);
}
engine.start();
startScheduledTasksIfNeeded();
changeState(IndexShardState.POST_RECOVERY, reason);
}
indicesLifecycle.afterIndexShardPostRecovery(this);
return this;
}
/**
* After the store has been recovered, we need to start the engine in order to apply operations
*/
public void performRecoveryPrepareForTranslog() throws ElasticsearchException {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
// also check here, before we apply the translog
if (Booleans.parseBoolean(checkIndexOnStartup, false)) {
checkIndex(true);
}
// we disable deletes since we allow for operations to be executed against the shard while recovering
// but we need to make sure we don't loose deletes until we are done recovering
engine.enableGcDeletes(false);
engine.start();
}
/**
* The peer recovery status if this shard recovered from a peer shard.
*/
public RecoveryStatus peerRecoveryStatus() {
return this.peerRecoveryStatus;
}
public void performRecoveryFinalization(boolean withFlush, RecoveryStatus peerRecoveryStatus) throws ElasticsearchException {
performRecoveryFinalization(withFlush);
this.peerRecoveryStatus = peerRecoveryStatus;
}
public void performRecoveryFinalization(boolean withFlush) throws ElasticsearchException {
if (withFlush) {
engine.flush(new Engine.Flush());
}
// clear unreferenced files
translog.clearUnreferenced();
engine.refresh(new Engine.Refresh("recovery_finalization").force(true));
synchronized (mutex) {
changeState(IndexShardState.POST_RECOVERY, "post recovery");
}
indicesLifecycle.afterIndexShardPostRecovery(this);
startScheduledTasksIfNeeded();
engine.enableGcDeletes(true);
}
public void performRecoveryOperation(Translog.Operation operation) throws ElasticsearchException {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
try {
switch (operation.opType()) {
case CREATE:
Translog.Create create = (Translog.Create) operation;
engine.create(prepareCreate(source(create.source()).type(create.type()).id(create.id())
.routing(create.routing()).parent(create.parent()).timestamp(create.timestamp()).ttl(create.ttl())).version(create.version())
.origin(Engine.Operation.Origin.RECOVERY));
break;
case SAVE:
Translog.Index index = (Translog.Index) operation;
engine.index(prepareIndex(source(index.source()).type(index.type()).id(index.id())
.routing(index.routing()).parent(index.parent()).timestamp(index.timestamp()).ttl(index.ttl())).version(index.version())
.origin(Engine.Operation.Origin.RECOVERY));
break;
case DELETE:
Translog.Delete delete = (Translog.Delete) operation;
Uid uid = Uid.createUid(delete.uid().text());
engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid()).version(delete.version())
.origin(Engine.Operation.Origin.RECOVERY));
break;
case DELETE_BY_QUERY:
Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation;
engine.delete(prepareDeleteByQuery(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types()).origin(Engine.Operation.Origin.RECOVERY));
break;
default:
throw new ElasticsearchIllegalStateException("No operation defined for [" + operation + "]");
}
} catch (ElasticsearchException e) {
boolean hasIgnoreOnRecoveryException = false;
ElasticsearchException current = e;
while (true) {
if (current instanceof IgnoreOnRecoveryEngineException) {
hasIgnoreOnRecoveryException = true;
break;
}
if (current.getCause() instanceof ElasticsearchException) {
current = (ElasticsearchException) current.getCause();
} else {
break;
}
}
if (!hasIgnoreOnRecoveryException) {
throw e;
}
}
}
/**
* Returns <tt>true</tt> if this shard can ignore a recovery attempt made to it (since the already doing/done it)
*/
public boolean ignoreRecoveryAttempt() {
IndexShardState state = state(); // one time volatile read
return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED ||
state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED;
}
public void readAllowed() throws IllegalIndexShardStateException {
readAllowed(Mode.READ);
}
public void readAllowed(Mode mode) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
switch (mode) {
case READ:
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated");
}
break;
case WRITE:
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated");
}
break;
}
}
private void writeAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (origin == Engine.Operation.Origin.PRIMARY) {
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
}
} else {
// for replicas, we allow to write also while recovering, since we index also during recovery to replicas
// and rely on version checks to make sure its consistent
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
}
}
}
private void verifyStartedOrRecovering() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering");
}
}
private void verifyNotClosed() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed");
}
}
private void verifyStarted() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
}
private void startScheduledTasksIfNeeded() {
if (refreshInterval.millis() > 0) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, new EngineRefresher());
logger.debug("scheduling refresher every {}", refreshInterval);
} else {
logger.debug("scheduled refresher disabled");
}
// since we can do async merging, it will not be called explicitly when indexing (adding / deleting docs), and only when flushing
// so, make sure we periodically call it, this need to be a small enough value so mergine will actually
// happen and reduce the number of segments
if (mergeInterval.millis() > 0) {
mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, new EngineMerger());
logger.debug("scheduling optimizer / merger every {}", mergeInterval);
} else {
logger.debug("scheduled optimizer / merger disabled");
}
}
private Query filterQueryIfNeeded(Query query, String[] types) {
Filter searchFilter = mapperService.searchFilter(types);
if (searchFilter != null) {
query = new XFilteredQuery(query, indexCache.filter().cache(searchFilter));
}
return query;
}
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
private class ApplyRefreshSettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
return;
}
TimeValue refreshInterval = settings.getAsTime(INDEX_REFRESH_INTERVAL, InternalIndexShard.this.refreshInterval);
if (!refreshInterval.equals(InternalIndexShard.this.refreshInterval)) {
logger.info("updating refresh_interval from [{}] to [{}]", InternalIndexShard.this.refreshInterval, refreshInterval);
if (refreshScheduledFuture != null) {
refreshScheduledFuture.cancel(false);
refreshScheduledFuture = null;
}
InternalIndexShard.this.refreshInterval = refreshInterval;
if (refreshInterval.millis() > 0) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, new EngineRefresher());
}
}
}
}
}
class EngineRefresher implements Runnable {
@Override
public void run() {
// we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule
if (!engine().refreshNeeded()) {
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, this);
}
}
return;
}
threadPool.executor(ThreadPool.Names.REFRESH).execute(new Runnable() {
@Override
public void run() {
try {
if (engine.refreshNeeded()) {
refresh(new Engine.Refresh("scheduled").force(false));
}
} catch (EngineClosedException e) {
// we are being closed, ignore
} catch (RefreshFailedEngineException e) {
if (e.getCause() instanceof InterruptedException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ClosedByInterruptException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ThreadInterruptedException) {
// ignore, we are being shutdown
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine refresh", e);
}
}
} catch (Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine refresh", e);
}
}
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
refreshScheduledFuture = threadPool.schedule(refreshInterval, ThreadPool.Names.SAME, EngineRefresher.this);
}
}
}
});
}
}
class EngineMerger implements Runnable {
@Override
public void run() {
if (!engine().possibleMergeNeeded()) {
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, this);
}
}
return;
}
threadPool.executor(ThreadPool.Names.MERGE).execute(new Runnable() {
@Override
public void run() {
try {
engine.maybeMerge();
} catch (EngineClosedException e) {
// we are being closed, ignore
} catch (OptimizeFailedEngineException e) {
if (e.getCause() instanceof EngineClosedException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof InterruptedException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ClosedByInterruptException) {
// ignore, we are being shutdown
} else if (e.getCause() instanceof ThreadInterruptedException) {
// ignore, we are being shutdown
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine optimize/merge", e);
}
}
} catch (Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform scheduled engine optimize/merge", e);
}
}
synchronized (mutex) {
if (state != IndexShardState.CLOSED) {
mergeScheduleFuture = threadPool.schedule(mergeInterval, ThreadPool.Names.SAME, EngineMerger.this);
}
}
}
});
}
}
private void checkIndex(boolean throwException) throws IndexShardException {
try {
checkIndexTook = 0;
long time = System.currentTimeMillis();
if (!Lucene.indexExists(store.directory())) {
return;
}
CheckIndex checkIndex = new CheckIndex(store.directory());
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
checkIndex.setInfoStream(out);
out.flush();
CheckIndex.Status status = checkIndex.checkIndex();
if (!status.clean) {
if (state == IndexShardState.CLOSED) {
// ignore if closed....
return;
}
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
if ("fix".equalsIgnoreCase(checkIndexOnStartup)) {
if (logger.isDebugEnabled()) {
logger.debug("fixing index, writing new segments file ...");
}
checkIndex.fixIndex(status);
if (logger.isDebugEnabled()) {
logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName);
}
} else {
// only throw a failure if we are not going to fix the index
if (throwException) {
throw new IndexShardException(shardId, "index check failure");
}
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
}
}
checkIndexTook = System.currentTimeMillis() - time;
} catch (Exception e) {
logger.warn("failed to check index", e);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_shard_service_InternalIndexShard.java
|
6,227 |
public class ReproduceInfoPrinter extends RunListener {
protected final ESLogger logger = Loggers.getLogger(ElasticsearchTestCase.class);
@Override
public void testStarted(Description description) throws Exception {
logger.info("Test {} started", description.getDisplayName());
}
@Override
public void testFinished(Description description) throws Exception {
logger.info("Test {} finished", description.getDisplayName());
}
@Override
public void testFailure(Failure failure) throws Exception {
// Ignore assumptions.
if (failure.getException() instanceof AssumptionViolatedException) {
return;
}
final Description d = failure.getDescription();
final StringBuilder b = new StringBuilder();
b.append("FAILURE : ").append(d.getDisplayName()).append("\n");
b.append("REPRODUCE WITH : mvn test");
ReproduceErrorMessageBuilder builder = reproduceErrorMessageBuilder(b).appendAllOpts(failure.getDescription());
if (mustAppendClusterSeed(failure)) {
appendClusterSeed(builder);
}
b.append("\n");
b.append("Throwable:\n");
if (failure.getException() != null) {
traces().formatThrowable(b, failure.getException());
}
logger.error(b.toString());
}
protected boolean mustAppendClusterSeed(Failure failure) {
return ElasticsearchIntegrationTest.class.isAssignableFrom(failure.getDescription().getTestClass());
}
protected void appendClusterSeed(ReproduceErrorMessageBuilder builder) {
builder.appendOpt(TESTS_CLUSTER_SEED, SeedUtils.formatSeed(SHARED_CLUSTER_SEED));
}
protected ReproduceErrorMessageBuilder reproduceErrorMessageBuilder(StringBuilder b) {
return new MavenMessageBuilder(b);
}
protected TraceFormatting traces() {
TraceFormatting traces = new TraceFormatting();
try {
traces = RandomizedContext.current().getRunner().getTraceFormatting();
} catch (IllegalStateException e) {
// Ignore if no context.
}
return traces;
}
protected static class MavenMessageBuilder extends ReproduceErrorMessageBuilder {
public MavenMessageBuilder(StringBuilder b) {
super(b);
}
@Override
public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
super.appendAllOpts(description);
return appendESProperties();
}
/**
* Append a single VM option.
*/
@Override
public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) {
if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there!
return this;
}
if (Strings.hasLength(value)) {
return super.appendOpt(sysPropName, value);
}
return this;
}
public ReproduceErrorMessageBuilder appendESProperties() {
appendProperties("es.logger.level", "es.node.mode", "es.node.local", TestCluster.TESTS_ENABLE_MOCK_MODULES,
"tests.assertion.disabled", "tests.security.manager");
if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {
appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\"");
}
return this;
}
protected ReproduceErrorMessageBuilder appendProperties(String... properties) {
for (String sysPropName : properties) {
if (Strings.hasLength(System.getProperty(sysPropName))) {
appendOpt(sysPropName, System.getProperty(sysPropName));
}
}
return this;
}
}
}
| 1no label
|
src_test_java_org_elasticsearch_test_junit_listeners_ReproduceInfoPrinter.java
|
2,034 |
public class AddIndexOperation extends AbstractNamedOperation implements PartitionAwareOperation {
String attributeName;
boolean ordered;
public AddIndexOperation(String name, String attributeName, boolean ordered) {
super(name);
this.attributeName = attributeName;
this.ordered = ordered;
}
public AddIndexOperation() {
}
@Override
public void run() throws Exception {
MapService mapService = getService();
MapContainer mapContainer = mapService.getMapContainer(name);
RecordStore rs = mapService.getPartitionContainer(getPartitionId()).getRecordStore(name);
Map<Data, Record> records = rs.getReadonlyRecordMap();
IndexService indexService = mapContainer.getIndexService();
SerializationService ss = getNodeEngine().getSerializationService();
Index index = indexService.addOrGetIndex(attributeName, ordered);
for (Record record : records.values()) {
Data key = record.getKey();
Object value = record.getValue();
index.saveEntryIndex(new QueryEntry(ss, key, key, value));
}
}
@Override
public Object getResponse() {
return Boolean.TRUE;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeUTF(attributeName);
out.writeBoolean(ordered);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
attributeName = in.readUTF();
ordered = in.readBoolean();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_AddIndexOperation.java
|
2,041 |
public class ClearExpiredOperation extends AbstractMapOperation implements PartitionAwareOperation {
private List expiredKeyValueSequence;
public ClearExpiredOperation(String name) {
super(name);
}
@Override
public void run() throws Exception {
final PartitionContainer partitionContainer = mapService.getPartitionContainer(getPartitionId());
// this should be existing record store since we don't want to trigger record store creation.
final RecordStore recordStore = partitionContainer.getExistingRecordStore(name);
if (recordStore == null) {
return;
}
expiredKeyValueSequence = recordStore.findUnlockedExpiredRecords();
}
@Override
public void afterRun() throws Exception {
final List expiredKeyValueSequence = this.expiredKeyValueSequence;
if (expiredKeyValueSequence == null || expiredKeyValueSequence.isEmpty()) {
return;
}
final MapService mapService = this.mapService;
final String mapName = this.name;
final NodeEngine nodeEngine = getNodeEngine();
final Address owner = nodeEngine.getPartitionService().getPartitionOwner(getPartitionId());
final boolean isOwner = nodeEngine.getThisAddress().equals(owner);
final int size = expiredKeyValueSequence.size();
for (int i = 0; i < size; i += 2) {
Data key = (Data) expiredKeyValueSequence.get(i);
Object value = expiredKeyValueSequence.get(i + 1);
mapService.interceptAfterRemove(mapName, value);
if (mapService.isNearCacheAndInvalidationEnabled(mapName)) {
mapService.invalidateAllNearCaches(mapName, key);
}
if (isOwner) {
EvictionHelper.fireEvent(key, value, mapName, mapService);
}
}
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
return "ClearExpiredOperation{}";
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_ClearExpiredOperation.java
|
633 |
public final class AddMembershipListenerRequest extends CallableClientRequest implements RetryableRequest {
public AddMembershipListenerRequest() {
}
@Override
public Object call() throws Exception {
final ClusterServiceImpl service = getService();
final ClientEndpoint endpoint = getEndpoint();
final String registrationId = service.addMembershipListener(new MembershipListener() {
@Override
public void memberAdded(MembershipEvent membershipEvent) {
if (endpoint.live()) {
final MemberImpl member = (MemberImpl) membershipEvent.getMember();
endpoint.sendEvent(new ClientMembershipEvent(member, MembershipEvent.MEMBER_ADDED), getCallId());
}
}
@Override
public void memberRemoved(MembershipEvent membershipEvent) {
if (endpoint.live()) {
final MemberImpl member = (MemberImpl) membershipEvent.getMember();
endpoint.sendEvent(new ClientMembershipEvent(member, MembershipEvent.MEMBER_REMOVED), getCallId());
}
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
if (endpoint.live()) {
final MemberImpl member = (MemberImpl) memberAttributeEvent.getMember();
final String uuid = member.getUuid();
final MemberAttributeOperationType op = memberAttributeEvent.getOperationType();
final String key = memberAttributeEvent.getKey();
final Object value = memberAttributeEvent.getValue();
final MemberAttributeChange memberAttributeChange = new MemberAttributeChange(uuid, op, key, value);
endpoint.sendEvent(new ClientMembershipEvent(member, memberAttributeChange), getCallId());
}
}
});
final String name = ClusterServiceImpl.SERVICE_NAME;
endpoint.setListenerRegistration(name, name, registrationId);
final Collection<MemberImpl> memberList = service.getMemberList();
final Collection<Data> response = new ArrayList<Data>(memberList.size());
final SerializationService serializationService = getClientEngine().getSerializationService();
for (MemberImpl member : memberList) {
response.add(serializationService.toData(member));
}
return new SerializableCollection(response);
}
public String getServiceName() {
return ClusterServiceImpl.SERVICE_NAME;
}
public int getFactoryId() {
return ClientPortableHook.ID;
}
public int getClassId() {
return ClientPortableHook.MEMBERSHIP_LISTENER;
}
@Override
public Permission getRequiredPermission() {
return null;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_cluster_client_AddMembershipListenerRequest.java
|
1,101 |
public class OSQLFunctionIntersect extends OSQLFunctionMultiValueAbstract<Set<Object>> {
public static final String NAME = "intersect";
public OSQLFunctionIntersect() {
super(NAME, 1, -1);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
OCommandContext iContext) {
Object value = iParameters[0];
if (value instanceof OSQLFilterItemVariable)
value = ((OSQLFilterItemVariable) value).getValue(iCurrentRecord, iContext);
if (value == null)
return Collections.emptySet();
if (!(value instanceof Collection<?>))
value = Arrays.asList(value);
final Collection<?> coll = (Collection<?>) value;
if (iParameters.length == 1) {
// AGGREGATION MODE (STATEFULL)
if (context == null) {
// ADD ALL THE ITEMS OF THE FIRST COLLECTION
context = new HashSet<Object>(coll);
} else {
// INTERSECT IT AGAINST THE CURRENT COLLECTION
context.retainAll(coll);
}
return null;
} else {
// IN-LINE MODE (STATELESS)
final HashSet<Object> result = new HashSet<Object>(coll);
for (int i = 1; i < iParameters.length; ++i) {
value = iParameters[i];
if (value instanceof OSQLFilterItemVariable)
value = ((OSQLFilterItemVariable) value).getValue(iCurrentRecord, iContext);
if (value != null) {
if (!(value instanceof Collection<?>))
// CONVERT IT INTO A COLLECTION
value = Arrays.asList(value);
result.retainAll((Collection<?>) value);
} else
result.clear();
}
return result;
}
}
public String getSyntax() {
return "Syntax error: intersect(<field>*)";
}
@SuppressWarnings("unchecked")
@Override
public Object mergeDistributedResult(List<Object> resultsToMerge) {
final Collection<Object> result = new HashSet<Object>();
if (!resultsToMerge.isEmpty()) {
final Collection<Object> items = (Collection<Object>) resultsToMerge.get(0);
if (items != null) {
result.addAll(items);
}
}
for (int i = 1; i < resultsToMerge.size(); i++) {
final Collection<Object> items = (Collection<Object>) resultsToMerge.get(i);
if (items != null) {
result.retainAll(items);
}
}
return result;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionIntersect.java
|
732 |
public class CollectionClearRequest extends CollectionRequest {
public CollectionClearRequest() {
}
public CollectionClearRequest(String name) {
super(name);
}
@Override
protected Operation prepareOperation() {
return new CollectionClearOperation(name);
}
@Override
public int getClassId() {
return CollectionPortableHook.COLLECTION_CLEAR;
}
@Override
public String getRequiredAction() {
return ActionConstants.ACTION_REMOVE;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_client_CollectionClearRequest.java
|
133 |
public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
private static final long serialVersionUID = 5232453952276485270L;
/**
* The result of the computation.
*/
V result;
/**
* The main computation performed by this task.
* @return the result of the computation
*/
protected abstract V compute();
public final V getRawResult() {
return result;
}
protected final void setRawResult(V value) {
result = value;
}
/**
* Implements execution conventions for RecursiveTask.
*/
protected final boolean exec() {
result = compute();
return true;
}
}
| 0true
|
src_main_java_jsr166e_RecursiveTask.java
|
797 |
public class LongWrapper {
private long value;
public long get() {
return value;
}
public long addAndGet(long delta) {
value += delta;
return value;
}
public void set(long value) {
this.value = value;
}
public boolean compareAndSet(long expect, long value) {
if (this.value != expect) {
return false;
}
this.value = value;
return true;
}
public long getAndAdd(long delta) {
long tempValue = value;
value += delta;
return tempValue;
}
public long getAndSet(long value) {
long tempValue = this.value;
this.value = value;
return tempValue;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_LongWrapper.java
|
422 |
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD})
public @interface ConfigurationItem {
/**
* Item name for the error message (could also be a key to a properties file to support localization)
*/
public static String ERROR_MESSAGE = "errorMessage";
/**
* <p>The name of the validation configuration item</p>
*
* @return the config item name
*/
String itemName();
/**
* <p>The value for the validation configuration item</p>
*
* @return the config item value
*/
String itemValue();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_presentation_ConfigurationItem.java
|
132 |
public interface SchemaManager extends SchemaInspector {
/**
* Returns a {@link com.thinkaurelius.titan.core.schema.PropertyKeyMaker} instance to define a new {@link com.thinkaurelius.titan.core.PropertyKey} with the given name.
* By defining types explicitly (rather than implicitly through usage) one can control various
* aspects of the key and associated consistency constraints.
* <p/>
* The key constructed with this maker will be created in the context of this transaction.
*
* @return a {@link com.thinkaurelius.titan.core.schema.PropertyKeyMaker} linked to this transaction.
* @see com.thinkaurelius.titan.core.schema.PropertyKeyMaker
* @see com.thinkaurelius.titan.core.PropertyKey
*/
public PropertyKeyMaker makePropertyKey(String name);
/**
* Returns a {@link com.thinkaurelius.titan.core.schema.EdgeLabelMaker} instance to define a new {@link com.thinkaurelius.titan.core.EdgeLabel} with the given name.
* By defining types explicitly (rather than implicitly through usage) one can control various
* aspects of the label and associated consistency constraints.
* <p/>
* The label constructed with this maker will be created in the context of this transaction.
*
* @return a {@link com.thinkaurelius.titan.core.schema.EdgeLabelMaker} linked to this transaction.
* @see com.thinkaurelius.titan.core.schema.EdgeLabelMaker
* @see com.thinkaurelius.titan.core.EdgeLabel
*/
public EdgeLabelMaker makeEdgeLabel(String name);
/**
* Returns a {@link VertexLabelMaker} to define a new vertex label with the given name. Note, that the name must
* be unique.
*
* @param name
* @return
*/
public VertexLabelMaker makeVertexLabel(String name);
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_schema_SchemaManager.java
|
1,369 |
public class JDTMethod implements MethodMirror, IBindingProvider {
private WeakReference<MethodBinding> bindingRef;
private Map<String, AnnotationMirror> annotations;
private String name;
private List<VariableMirror> parameters;
private TypeMirror returnType;
private List<TypeParameterMirror> typeParameters;
Boolean isOverriding;
private Boolean isOverloading;
private JDTClass enclosingClass;
private boolean isStatic;
private boolean isPublic;
private boolean isConstructor;
private boolean isStaticInit;
private boolean isAbstract;
private boolean isFinal;
private char[] bindingKey;
private String readableName;
private boolean isProtected;
private boolean isDefaultAccess;
private boolean isDeclaredVoid;
private boolean isVariadic;
private boolean isDefault;
public JDTMethod(JDTClass enclosingClass, MethodBinding method) {
this.enclosingClass = enclosingClass;
bindingRef = new WeakReference<MethodBinding>(method);
name = new String(method.selector);
readableName = new String(method.readableName());
isStatic = method.isStatic();
isPublic = method.isPublic();
isConstructor = method.isConstructor();
isStaticInit = method.selector == TypeConstants.CLINIT; // TODO : check if it is right
isAbstract = method.isAbstract();
isFinal = method.isFinal();
isProtected = method.isProtected();
isDefaultAccess = method.isDefault();
isDeclaredVoid = method.returnType.id == TypeIds.T_void;
isVariadic = method.isVarargs();
isDefault = method.getDefaultValue()!=null;
bindingKey = method.computeUniqueKey();
if (method instanceof ProblemMethodBinding) {
annotations = new HashMap<>();
parameters = Collections.emptyList();
returnType = JDTType.UNKNOWN_TYPE;
typeParameters = Collections.emptyList();
isOverriding = false;
isOverloading = false;
}
}
@Override
public AnnotationMirror getAnnotation(String type) {
if (annotations == null) {
doWithBindings(new ActionOnMethodBinding() {
@Override
public void doWithBinding(IType declaringClassModel,
ReferenceBinding declaringClass,
MethodBinding method) {
annotations = JDTUtils.getAnnotations(method.getAnnotations());
}
});
}
return annotations.get(type);
}
@Override
public String getName() {
return name;
}
@Override
public boolean isStatic() {
return isStatic;
}
@Override
public boolean isPublic() {
return isPublic;
}
@Override
public boolean isConstructor() {
return isConstructor;
}
@Override
public boolean isStaticInit() {
return isStaticInit;
}
@Override
public List<VariableMirror> getParameters() {
if (parameters == null) {
doWithBindings(new ActionOnMethodBinding() {
private String toParameterName(TypeBinding parameterType) {
String typeName = new String(parameterType.sourceName());
StringTokenizer tokens = new StringTokenizer(typeName, "$.[]");
String result = null;
while (tokens.hasMoreTokens()) {
result = tokens.nextToken();
}
if (typeName.endsWith("[]")) {
result = result + "Array";
}
return toLowerCase(result.charAt(0)) +
result.substring(1);
}
@Override
public void doWithBinding(IType declaringClassModel,
ReferenceBinding declaringClassBinding, MethodBinding methodBinding) {
TypeBinding[] parameterBindings;
AnnotationBinding[][] parameterAnnotationBindings;
parameterBindings = ((MethodBinding)methodBinding).parameters;
parameterAnnotationBindings = ((MethodBinding)methodBinding).getParameterAnnotations();
if (parameterAnnotationBindings == null) {
parameterAnnotationBindings = new AnnotationBinding[parameterBindings.length][];
for (int i=0; i<parameterAnnotationBindings.length; i++) {
parameterAnnotationBindings[i] = new AnnotationBinding[0];
}
}
parameters = new ArrayList<VariableMirror>(parameterBindings.length);
List<String> givenNames = new ArrayList<>(parameterBindings.length);
for(int i=0;i<parameterBindings.length;i++) {
Map<String, AnnotationMirror> parameterAnnotations = JDTUtils.getAnnotations(parameterAnnotationBindings[i]);
String parameterName;
AnnotationMirror nameAnnotation = getAnnotation(Name.class.getName());
TypeBinding parameterTypeBinding = parameterBindings[i];
if(nameAnnotation != null) {
parameterName = (String) nameAnnotation.getValue();
} else {
String baseName = toParameterName(parameterTypeBinding);
int count = 0;
String nameToReturn = baseName;
for (String givenName : givenNames) {
if (givenName.equals(nameToReturn)) {
count ++;
nameToReturn = baseName + Integer.toString(count);
}
}
parameterName = nameToReturn;
}
givenNames.add(parameterName);
parameters.add(new JDTVariable(parameterName, new JDTType(parameterTypeBinding), parameterAnnotations));
}
}
});
}
return parameters;
}
@Override
public boolean isAbstract() {
return isAbstract;
}
@Override
public boolean isFinal() {
return isFinal;
}
@Override
public TypeMirror getReturnType() {
if (returnType == null) {
doWithBindings(new ActionOnMethodBinding() {
@Override
public void doWithBinding(IType declaringClassModel,
ReferenceBinding declaringClassBinding, MethodBinding methodBinding) {
returnType = new JDTType(methodBinding.returnType);
}
});
}
return returnType;
}
@Override
public List<TypeParameterMirror> getTypeParameters() {
if (typeParameters == null) {
doWithBindings(new ActionOnMethodBinding() {
@Override
public void doWithBinding(IType declaringClassModel,
ReferenceBinding declaringClassBinding, MethodBinding methodBinding) {
TypeVariableBinding[] jdtTypeParameters = methodBinding.typeVariables();
typeParameters = new ArrayList<TypeParameterMirror>(jdtTypeParameters.length);
for(TypeVariableBinding jdtTypeParameter : jdtTypeParameters)
typeParameters.add(new JDTTypeParameter(jdtTypeParameter));
}
});
}
return typeParameters;
}
public boolean isOverridingMethod() {
if (isOverriding == null) {
isOverriding = false;
doWithBindings(new ActionOnMethodBinding() {
@Override
public void doWithBinding(IType declaringClassModel,
ReferenceBinding declaringClass,
MethodBinding method) {
if (CharOperation.equals(declaringClass.readableName(), "ceylon.language.Identifiable".toCharArray())) {
if ("equals".equals(name)
|| "hashCode".equals(name)) {
isOverriding = true;
return;
}
}
if (CharOperation.equals(declaringClass.readableName(), "ceylon.language.Object".toCharArray())) {
if ("equals".equals(name)
|| "hashCode".equals(name)
|| "toString".equals(name)) {
isOverriding = false;
return;
}
}
// try the superclass first
if (isDefinedInSuperClasses(declaringClass, method)) {
isOverriding = true;
}
if (isDefinedInSuperInterfaces(declaringClass, method)) {
isOverriding = true;
}
}
});
}
return isOverriding.booleanValue();
}
private void doWithBindings(final ActionOnMethodBinding action) {
final IType declaringClassModel = enclosingClass.getType();
if (!JDTModelLoader.doWithMethodBinding(declaringClassModel, bindingRef.get(), action)) {
JDTModelLoader.doWithResolvedType(declaringClassModel, new JDTModelLoader.ActionOnResolvedType() {
@Override
public void doWithBinding(ReferenceBinding declaringClass) {
MethodBinding method = null;
for (MethodBinding m : declaringClass.methods()) {
if (CharOperation.equals(m.computeUniqueKey(), bindingKey)) {
method = m;
break;
}
}
if (method == null) {
throw new ModelResolutionException("Method '" + readableName + "' not found in the binding of class '" + declaringClassModel.getFullyQualifiedName() + "'");
}
bindingRef = new WeakReference<MethodBinding>(method);
action.doWithBinding(declaringClassModel, declaringClass, method);
}
});
}
}
public boolean isOverloadingMethod() {
if (isOverloading == null) {
isOverloading = Boolean.FALSE;
doWithBindings(new ActionOnMethodBinding() {
@Override
public void doWithBinding(IType declaringClassModel,
ReferenceBinding declaringClass,
MethodBinding method) {
// Exception has a pretend supertype of Object, unlike its Java supertype of java.lang.RuntimeException
// so we stop there for it, especially since it does not have any overloading
if(CharOperation.equals(declaringClass.qualifiedSourceName(), "ceylon.language.Exception".toCharArray())) {
isOverloading = false;
return;
}
// try the superclass first
if (isOverloadingInSuperClasses(declaringClass, method)) {
isOverloading = Boolean.TRUE;
}
if (isOverloadingInSuperInterfaces(declaringClass, method)) {
isOverloading = Boolean.TRUE;
}
}
});
}
return isOverloading.booleanValue();
}
private boolean ignoreMethodInAncestorSearch(MethodBinding methodBinding) {
String name = CharOperation.charToString(methodBinding.selector);
if(name.equals("finalize")
|| name.equals("clone")){
if(methodBinding.declaringClass != null && CharOperation.toString(methodBinding.declaringClass.compoundName).equals("java.lang.Object")) {
return true;
}
}
// skip ignored methods too
if(JDTUtils.hasAnnotation(methodBinding, AbstractModelLoader.CEYLON_IGNORE_ANNOTATION)) {
return true;
}
return false;
}
private boolean isDefinedInType(ReferenceBinding superClass, MethodBinding method) {
MethodVerifier methodVerifier = superClass.getPackage().environment.methodVerifier();
for (MethodBinding inheritedMethod : superClass.methods()) {
// skip ignored methods
if(ignoreMethodInAncestorSearch(inheritedMethod)) {
continue;
}
if (methodVerifier.doesMethodOverride(method, inheritedMethod)) {
return true;
}
}
return false;
}
private boolean isOverloadingInType(ReferenceBinding superClass, MethodBinding method) {
MethodVerifier methodVerifier = superClass.getPackage().environment.methodVerifier();
for (MethodBinding inheritedMethod : superClass.methods()) {
if(inheritedMethod.isPrivate()
|| inheritedMethod.isStatic()
|| inheritedMethod.isConstructor()
|| inheritedMethod.isBridge()
|| inheritedMethod.isSynthetic()
|| !Arrays.equals(inheritedMethod.constantPoolName(), method.selector))
continue;
// skip ignored methods
if(ignoreMethodInAncestorSearch(inheritedMethod)) {
continue;
}
// if it does not override it and has the same name, it's overloading
if (!methodVerifier.doesMethodOverride(method, inheritedMethod)) {
return true;
}
}
return false;
}
boolean isDefinedInSuperClasses(ReferenceBinding declaringClass, MethodBinding method) {
ReferenceBinding superClass = declaringClass.superclass();
if (superClass == null) {
return false;
}
superClass = JDTUtils.inferTypeParametersFromSuperClass(declaringClass,
superClass);
if (isDefinedInType(superClass, method)) {
return true;
}
return isDefinedInSuperClasses(superClass, method);
}
boolean isDefinedInSuperInterfaces(ReferenceBinding declaringType, MethodBinding method) {
ReferenceBinding[] superInterfaces = declaringType.superInterfaces();
if (superInterfaces == null) {
return false;
}
for (ReferenceBinding superInterface : superInterfaces) {
if (isDefinedInType(superInterface, method)) {
return true;
}
if (isDefinedInSuperInterfaces(superInterface, method)) {
return true;
}
}
return false;
}
boolean isOverloadingInSuperClasses(ReferenceBinding declaringClass, MethodBinding method) {
ReferenceBinding superClass = declaringClass.superclass();
if (superClass == null) {
return false;
}
// Exception has a pretend supertype of Object, unlike its Java supertype of java.lang.RuntimeException
// so we stop there for it, especially since it does not have any overloading
if(CharOperation.equals(superClass.qualifiedSourceName(), "ceylon.language.Exception".toCharArray()))
return false;
superClass = JDTUtils.inferTypeParametersFromSuperClass(declaringClass,
superClass);
if (isOverloadingInType(superClass, method)) {
return true;
}
return isOverloadingInSuperClasses(superClass, method);
}
boolean isOverloadingInSuperInterfaces(ReferenceBinding declaringType, MethodBinding method) {
ReferenceBinding[] superInterfaces = declaringType.superInterfaces();
if (superInterfaces == null) {
return false;
}
for (ReferenceBinding superInterface : superInterfaces) {
if (isOverloadingInType(superInterface, method)) {
return true;
}
if (isOverloadingInSuperInterfaces(superInterface, method)) {
return true;
}
}
return false;
}
@Override
public boolean isProtected() {
return isProtected;
}
@Override
public boolean isDefaultAccess() {
return isDefaultAccess;
}
@Override
public boolean isDeclaredVoid() {
return isDeclaredVoid;
}
@Override
public boolean isVariadic() {
return isVariadic;
}
@Override
public boolean isDefault() {
return isDefault;
}
@Override
public char[] getBindingKey() {
return bindingKey;
}
@Override
public ClassMirror getEnclosingClass() {
return enclosingClass;
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_model_mirror_JDTMethod.java
|
34 |
public class PartialGetCommand extends GetCommand {
public PartialGetCommand(String key) {
super(TextCommandType.PARTIAL_GET, key);
}
@Override
public String toString() {
return "PartialGetCommand{"
+ "key='"
+ key + '\''
+ '}';
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_PartialGetCommand.java
|
928 |
public abstract class BroadcastOperationResponse extends ActionResponse {
private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0];
private int totalShards;
private int successfulShards;
private int failedShards;
private ShardOperationFailedException[] shardFailures = EMPTY;
protected BroadcastOperationResponse() {
}
protected BroadcastOperationResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
this.totalShards = totalShards;
this.successfulShards = successfulShards;
this.failedShards = failedShards;
this.shardFailures = shardFailures == null ? EMPTY : shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]);
}
/**
* The total shards this request ran against.
*/
public int getTotalShards() {
return totalShards;
}
/**
* The successful shards this request was executed on.
*/
public int getSuccessfulShards() {
return successfulShards;
}
/**
* The failed shards this request was executed on.
*/
public int getFailedShards() {
return failedShards;
}
/**
* The list of shard failures exception.
*/
public ShardOperationFailedException[] getShardFailures() {
return shardFailures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
totalShards = in.readVInt();
successfulShards = in.readVInt();
failedShards = in.readVInt();
int size = in.readVInt();
if (size > 0) {
shardFailures = new ShardOperationFailedException[size];
for (int i = 0; i < size; i++) {
shardFailures[i] = readShardOperationFailed(in);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(totalShards);
out.writeVInt(successfulShards);
out.writeVInt(failedShards);
out.writeVInt(shardFailures.length);
for (ShardOperationFailedException exp : shardFailures) {
exp.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_broadcast_BroadcastOperationResponse.java
|
33 |
{
@Override
public void run()
{
Channel channel = getChannel( to );
try
{
if ( channel == null )
{
channel = openChannel( to );
openedChannel( to, channel );
// Instance could be connected to, remove any marker of it being failed
failedInstances.remove( to );
}
}
catch ( Exception e )
{
// Only print out failure message on first fail
if ( !failedInstances.contains( to ) )
{
msgLog.warn( e.getMessage() );
failedInstances.add( to );
}
return;
}
try
{
// Set FROM header
message.setHeader( Message.FROM, me.toASCIIString() );
msgLog.debug( "Sending to " + to + ": " + message );
ChannelFuture future = channel.write( message );
future.addListener( new ChannelFutureListener()
{
@Override
public void operationComplete( ChannelFuture future ) throws Exception
{
if ( !future.isSuccess() )
{
msgLog.debug( "Unable to write " + message + " to " + future.getChannel(),
future.getCause() );
}
}
} );
}
catch ( Exception e )
{
msgLog.warn( "Could not send message", e );
channel.close();
}
}
} );
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_com_NetworkSender.java
|
1,908 |
public class DataDTOToMVELTranslator {
public String createMVEL(String entityKey, DataDTO dataDTO, RuleBuilderFieldService fieldService)
throws MVELTranslationException {
StringBuffer sb = new StringBuffer();
buildMVEL(dataDTO, sb, entityKey, null, fieldService);
String response = sb.toString().trim();
if (response.length() == 0) {
response = null;
}
return response;
}
protected void buildMVEL(DataDTO dataDTO, StringBuffer sb, String entityKey, String groupOperator,
RuleBuilderFieldService fieldService) throws MVELTranslationException {
BLCOperator operator = null;
if (dataDTO instanceof ExpressionDTO) {
operator = BLCOperator.valueOf(((ExpressionDTO) dataDTO).getOperator());
} else {
operator = BLCOperator.valueOf(dataDTO.getGroupOperator());
}
ArrayList<DataDTO> groups = dataDTO.getGroups();
if (sb.length() != 0 && sb.charAt(sb.length() - 1) != '(' && groupOperator != null) {
BLCOperator groupOp = BLCOperator.valueOf(groupOperator);
switch(groupOp) {
default:
sb.append("&&");
break;
case OR:
sb.append("||");
}
}
if (dataDTO instanceof ExpressionDTO) {
buildExpression((ExpressionDTO)dataDTO, sb, entityKey, operator, fieldService);
} else {
boolean includeTopLevelParenthesis = false;
if (sb.length() != 0 || BLCOperator.NOT.equals(operator) || (sb.length() == 0 && groupOperator != null)) {
includeTopLevelParenthesis = true;
}
if (BLCOperator.NOT.equals(operator)) {
sb.append("!");
}
if (includeTopLevelParenthesis) sb.append("(");
for (DataDTO dto : groups) {
buildMVEL(dto, sb, entityKey, dataDTO.getGroupOperator(), fieldService);
}
if (includeTopLevelParenthesis) sb.append(")");
}
}
protected void buildExpression(ExpressionDTO expressionDTO, StringBuffer sb, String entityKey,
BLCOperator operator, RuleBuilderFieldService fieldService)
throws MVELTranslationException {
String field = expressionDTO.getName();
SupportedFieldType type = fieldService.getSupportedFieldType(field);
SupportedFieldType secondaryType = fieldService.getSecondaryFieldType(field);
Object[] value;
if (type == null) {
throw new MVELTranslationException(MVELTranslationException.SPECIFIED_FIELD_NOT_FOUND, "The DataDTO is not compatible with the RuleBuilderFieldService " +
"associated with the current rules builder. Unable to find the field " +
"specified: ("+field+")");
}
if (
SupportedFieldType.DATE.toString().equals(type.toString()) &&
!BLCOperator.CONTAINS_FIELD.equals(operator) &&
!BLCOperator.ENDS_WITH_FIELD.equals(operator) &&
!BLCOperator.EQUALS_FIELD.equals(operator) &&
!BLCOperator.GREATER_OR_EQUAL_FIELD.equals(operator) &&
!BLCOperator.GREATER_THAN_FIELD.equals(operator) &&
!BLCOperator.LESS_OR_EQUAL_FIELD.equals(operator) &&
!BLCOperator.LESS_THAN_FIELD.equals(operator) &&
!BLCOperator.NOT_EQUAL_FIELD.equals(operator) &&
!BLCOperator.STARTS_WITH_FIELD.equals(operator) &&
!BLCOperator.BETWEEN.equals(operator) &&
!BLCOperator.BETWEEN_INCLUSIVE.equals(operator)
) {
value = extractDate(expressionDTO, operator, "value");
} else {
value = extractBasicValues(expressionDTO.getValue());
}
switch(operator) {
case CONTAINS: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".contains",
true, false, false, false, false);
break;
}
case CONTAINS_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".contains",
true, true, false, false, false);
break;
}
case ENDS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".endsWith",
true, false, false, false, false);
break;
}
case ENDS_WITH_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".endsWith",
true, true, false, false, false);
break;
}
case EQUALS: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "==", false, false, false, false, false);
break;
}
case EQUALS_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "==", false, true, false, false, false);
break;
}
case GREATER_OR_EQUAL: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ">=", false, false, false, false, false);
break;
}
case GREATER_OR_EQUAL_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ">=", false, true, false, false, false);
break;
}
case GREATER_THAN: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ">", false, false, false, false, false);
break;
}
case GREATER_THAN_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ">", false, true, false, false, false);
break;
}
case ICONTAINS: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".contains",
true, false, true, false, false);
break;
}
case IENDS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".endsWith",
true, false, true, false, false);
break;
}
case IEQUALS: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "==", false, false, true, false, false);
break;
}
case INOT_CONTAINS: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".contains",
true, false, true, true, false);
break;
}
case INOT_ENDS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".endsWith",
true, false, true, true, false);
break;
}
case INOT_EQUAL: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "!=", false, false, true, false, false);
break;
}
case INOT_STARTS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".startsWith",
true, false, true, true, false);
break;
}
case IS_NULL: {
buildExpression(sb, entityKey, field, new Object[]{"null"}, type, secondaryType, "==",
false, false, false, false, true);
break;
}
case ISTARTS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".startsWith",
true, false, true, false, false);
break;
}
case LESS_OR_EQUAL: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "<=", false, false, false, false, false);
break;
}
case LESS_OR_EQUAL_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "<=", false, true, false, false, false);
break;
}
case LESS_THAN: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "<", false, false, false, false, false);
break;
}
case LESS_THAN_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "<",
false, true, false, false, false);
break;
}
case NOT_CONTAINS: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".contains",
true, false, false, true, false);
break;
}
case NOT_ENDS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".endsWith",
true, false, false, true, false);
break;
}
case NOT_EQUAL: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "!=", false, false, false, false, false);
break;
}
case NOT_EQUAL_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, "!=",
false, true, false, false, false);
break;
}
case NOT_NULL: {
buildExpression(sb, entityKey, field, new Object[]{"null"}, type, secondaryType, "!=",
false, false, false, false, true);
break;
}
case NOT_STARTS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".startsWith",
true, false, false, true, false);
break;
}
case STARTS_WITH: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".startsWith",
true, false, false, false, false);
break;
}
case STARTS_WITH_FIELD: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".startsWith",
true, true, false, false, false);
break;
}
case COUNT_GREATER_THAN: {
buildExpression(sb, entityKey, field, value, type, secondaryType, ".size()>", false, false, false, false, true);
break;
}
case COUNT_GREATER_OR_EQUAL:{
buildExpression(sb, entityKey, field, value, type, secondaryType, ".size()>=", false, false, false, false, true);
break;
}
case COUNT_LESS_THAN:{
buildExpression(sb, entityKey, field, value, type, secondaryType, ".size()<", false, false, false, false, true);
break;
}
case COUNT_LESS_OR_EQUAL:{
buildExpression(sb, entityKey, field, value, type, secondaryType, ".size()<=", false, false, false, false, true);
break;
}
case COUNT_EQUALS:{
buildExpression(sb, entityKey, field, value, type, secondaryType, ".size()==", false, false, false, false, true);
break;
}
case BETWEEN: {
if (SupportedFieldType.DATE.toString().equals(type.toString())) {
sb.append("(");
buildExpression(sb, entityKey, field, extractDate(expressionDTO, BLCOperator.GREATER_THAN, "start"),
type, secondaryType, ">", false, false, false, false, false);
sb.append("&&");
buildExpression(sb, entityKey, field, extractDate(expressionDTO, BLCOperator.LESS_THAN, "end"),
type, secondaryType, "<", false, false, false, false, false);
sb.append(")");
} else {
sb.append("(");
buildExpression(sb, entityKey, field, new Object[]{expressionDTO.getStart()}, type, secondaryType, ">",
false, false, false, false, false);
sb.append("&&");
buildExpression(sb, entityKey, field, new Object[]{expressionDTO.getEnd()}, type, secondaryType, "<",
false, false, false, false, false);
sb.append(")");
}
break;
}
case BETWEEN_INCLUSIVE: {
if (
SupportedFieldType.DATE.toString().equals(type.toString())
) {
sb.append("(");
buildExpression(sb, entityKey, field,
extractDate(expressionDTO, BLCOperator.GREATER_OR_EQUAL, "start"), type,
secondaryType, ">=", false, false, false, false, false);
sb.append("&&");
buildExpression(sb, entityKey, field, extractDate(expressionDTO, BLCOperator.LESS_OR_EQUAL, "end"),
type, secondaryType, "<=", false, false, false, false, false);
sb.append(")");
} else {
sb.append("(");
buildExpression(sb, entityKey, field, new Object[]{expressionDTO.getStart()}, type, secondaryType, ">=",
false, false, false, false, false);
sb.append("&&");
buildExpression(sb, entityKey, field, new Object[]{expressionDTO.getEnd()}, type, secondaryType, "<=",
false, false, false, false, false);
sb.append(")");
}
break;
}
}
}
@SuppressWarnings({ "rawtypes", "deprecation", "unchecked" })
protected Object[] extractDate(ExpressionDTO expressionDTO, BLCOperator operator, String key) {
String value;
if ("start".equals(key)) {
value = expressionDTO.getStart();
} else if ("end".equals(key)) {
value = expressionDTO.getEnd();
} else {
value = expressionDTO.getValue();
}
//TODO handle Date Time Format
// if (BLCOperator.GREATER_THAN.equals(operator) || BLCOperator.LESS_OR_EQUAL.equals(operator)) {
// ((Date) value).setHours(23);
// ((Date) value).setMinutes(59);
// } else {
// ((Date) value).setHours(0);
// ((Date) value).setMinutes(0);
// }
return new Object[]{value};
}
protected Object[] extractBasicValues(Object value) {
if (value == null) {
return null;
}
String stringValue = value.toString().trim();
Object[] response = new Object[]{};
if (isProjection(value)) {
List<String> temp = new ArrayList<String>();
int initial = 1;
//assume this is a multi-value phrase
boolean eof = false;
while (!eof) {
int end = stringValue.indexOf(",", initial);
if (end == -1) {
eof = true;
end = stringValue.length() - 1;
}
temp.add(stringValue.substring(initial, end));
initial = end + 1;
}
response = temp.toArray(response);
} else {
response = new Object[]{value};
}
return response;
}
public boolean isProjection(Object value) {
String stringValue = value.toString().trim();
return stringValue.startsWith("[") && stringValue.endsWith("]") && stringValue.indexOf(",") > 0;
}
protected void buildExpression(StringBuffer sb, String entityKey, String field, Object[] value,
SupportedFieldType type, SupportedFieldType secondaryType, String operator,
boolean includeParenthesis, boolean isFieldComparison, boolean ignoreCase,
boolean isNegation, boolean ignoreQuotes)
throws MVELTranslationException {
if (operator.equals("==") && !isFieldComparison && value.length > 1) {
sb.append("(");
sb.append("[");
sb.append(formatValue(field, entityKey, type, secondaryType, value, isFieldComparison,
ignoreCase, ignoreQuotes));
sb.append("] contains ");
sb.append(formatField(entityKey, type, field, ignoreCase, isNegation));
if ((type.equals(SupportedFieldType.ID) && secondaryType != null &&
secondaryType.equals(SupportedFieldType.INTEGER)) || type.equals(SupportedFieldType.INTEGER)) {
sb.append(".intValue()");
}
sb.append(")");
} else {
sb.append(formatField(entityKey, type, field, ignoreCase, isNegation));
sb.append(operator);
if (includeParenthesis) {
sb.append("(");
}
sb.append(formatValue(field, entityKey, type, secondaryType, value,
isFieldComparison, ignoreCase, ignoreQuotes));
if (includeParenthesis) {
sb.append(")");
}
}
}
protected String buildFieldName(String entityKey, String fieldName) {
String response = entityKey + "." + fieldName;
response = response.replaceAll("\\.", ".?");
return response;
}
protected String formatField(String entityKey, SupportedFieldType type, String field,
boolean ignoreCase, boolean isNegation) {
StringBuilder response = new StringBuilder();
if (isNegation) {
response.append("!");
}
String convertedField = field;
boolean isMapField = false;
if (convertedField.contains(FieldManager.MAPFIELDSEPARATOR)) {
//This must be a map field, convert the field name to syntax MVEL can understand for map access
convertedField = convertedField.substring(0, convertedField.indexOf(FieldManager.MAPFIELDSEPARATOR))
+ "[\"" + convertedField.substring(convertedField.indexOf(FieldManager.MAPFIELDSEPARATOR) +
FieldManager.MAPFIELDSEPARATOR.length(), convertedField.length()) + "\"]";
isMapField = true;
}
if (isMapField) {
switch(type) {
case BOOLEAN:
response.append("MvelHelper.convertField(\"BOOLEAN\",");
response.append(buildFieldName(entityKey, convertedField));
response.append(")");
break;
case INTEGER:
response.append("MvelHelper.convertField(\"INTEGER\",");
response.append(buildFieldName(entityKey, convertedField));
response.append(")");
break;
case DECIMAL:
case MONEY:
response.append("MvelHelper.convertField(\"DECIMAL\",");
response.append(buildFieldName(entityKey, convertedField));
response.append(")");
break;
case DATE:
response.append("MvelHelper.convertField(\"DATE\",");
response.append(buildFieldName(entityKey, convertedField));
response.append(")");
break;
case STRING:
if (ignoreCase) {
response.append("MvelHelper.toUpperCase(");
}
response.append(buildFieldName(entityKey, convertedField));
if (ignoreCase) {
response.append(")");
}
break;
case STRING_LIST:
response.append(buildFieldName(entityKey, convertedField));
break;
default:
throw new UnsupportedOperationException(type.toString() + " is not supported for map fields in the rule builder.");
}
} else {
switch(type) {
case BROADLEAF_ENUMERATION:
if (isMapField) {
throw new UnsupportedOperationException("Enumerations are not supported for map fields in the rule builder.");
} else {
response.append(buildFieldName(entityKey, convertedField));
response.append(".getType()");
}
break;
case MONEY:
response.append(buildFieldName(entityKey, convertedField));
response.append(".getAmount()");
break;
case STRING:
if (ignoreCase) {
response.append("MvelHelper.toUpperCase(");
}
response.append(buildFieldName(entityKey, convertedField));
if (ignoreCase) {
response.append(")");
}
break;
default:
response.append(buildFieldName(entityKey, convertedField));
break;
}
}
return response.toString();
}
protected String formatValue(String fieldName, String entityKey, SupportedFieldType type,
SupportedFieldType secondaryType, Object[] value,
boolean isFieldComparison, boolean ignoreCase,
boolean ignoreQuotes) throws MVELTranslationException {
StringBuilder response = new StringBuilder();
if (isFieldComparison) {
switch(type) {
case MONEY:
response.append(entityKey);
response.append(".");
response.append(value[0]);
response.append(".getAmount()");
break;
case STRING:
if (ignoreCase) {
response.append("MvelHelper.toUpperCase(");
}
response.append(entityKey);
response.append(".");
response.append(value[0]);
if (ignoreCase) {
response.append(")");
}
break;
default:
response.append(entityKey);
response.append(".");
response.append(value[0]);
break;
}
} else {
for (int j=0;j<value.length;j++){
switch(type) {
case BOOLEAN:
response.append(value[j]);
break;
case DECIMAL:
try {
Double.parseDouble(value[j].toString());
} catch (Exception e) {
throw new MVELTranslationException(MVELTranslationException.INCOMPATIBLE_DECIMAL_VALUE, "Cannot format value for the field ("
+ fieldName + ") based on field type. The type of field is Decimal, " +
"and you entered: (" + value[j] +")");
}
response.append(value[j]);
break;
case ID:
if (secondaryType != null && secondaryType.toString().equals(
SupportedFieldType.STRING.toString())) {
if (ignoreCase) {
response.append("MvelHelper.toUpperCase(");
}
if (!ignoreQuotes) {
response.append("\"");
}
response.append(value[j]);
if (!ignoreQuotes) {
response.append("\"");
}
if (ignoreCase) {
response.append(")");
}
} else {
try {
Integer.parseInt(value[j].toString());
} catch (Exception e) {
throw new MVELTranslationException(MVELTranslationException.INCOMPATIBLE_INTEGER_VALUE, "Cannot format value for the field (" +
fieldName + ") based on field type. The type of field is Integer, " +
"and you entered: (" + value[j] +")");
}
response.append(value[j]);
}
break;
case INTEGER:
try {
Integer.parseInt(value[j].toString());
} catch (Exception e) {
throw new MVELTranslationException(MVELTranslationException.INCOMPATIBLE_INTEGER_VALUE, "Cannot format value for the field (" +
fieldName + ") based on field type. The type of field is Integer, " +
"and you entered: (" + value[j] +")");
}
response.append(value[j]);
break;
case MONEY:
try {
Double.parseDouble(value[j].toString());
} catch (Exception e) {
throw new MVELTranslationException(MVELTranslationException.INCOMPATIBLE_DECIMAL_VALUE, "Cannot format value for the field (" +
fieldName + ") based on field type. The type of field is Money, " +
"and you entered: (" + value[j] +")");
}
response.append(value[j]);
break;
case DATE:
//convert the date to our standard date/time format
Date temp = null;
try {
temp = RuleBuilderFormatUtil.parseDate(String.valueOf(value[j]));
} catch (ParseException e) {
throw new MVELTranslationException(MVELTranslationException.INCOMPATIBLE_DATE_VALUE, "Cannot format value for the field (" +
fieldName + ") based on field type. The type of field is Date, " +
"and you entered: (" + value[j] +"). Dates must be in the format MM/dd/yyyy HH:mm.");
}
String convertedDate = FormatUtil.getTimeZoneFormat().format(temp);
response.append("MvelHelper.convertField(\"DATE\",\"");
response.append(convertedDate);
response.append("\")");
break;
default:
if (ignoreCase) {
response.append("MvelHelper.toUpperCase(");
}
if (!ignoreQuotes) {
response.append("\"");
}
response.append(value[j]);
if (!ignoreQuotes) {
response.append("\"");
}
if (ignoreCase) {
response.append(")");
}
break;
}
if (j < value.length - 1) {
response.append(",");
}
}
}
return response.toString();
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_web_rulebuilder_DataDTOToMVELTranslator.java
|
219 |
PriorityQueue<Passage> passageQueue = new PriorityQueue<Passage>(n, new Comparator<Passage>() {
@Override
public int compare(Passage left, Passage right) {
if (left.score < right.score) {
return -1;
} else if (left.score > right.score) {
return 1;
} else {
return left.startOffset - right.startOffset;
}
}
});
| 0true
|
src_main_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighter.java
|
777 |
public class InventoryType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, InventoryType> TYPES = new LinkedHashMap<String, InventoryType>();
public static final InventoryType NONE = new InventoryType("NONE", "None");
public static final InventoryType BASIC = new InventoryType("BASIC", "Basic");
public static InventoryType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public InventoryType() {
//do nothing
}
public InventoryType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
} else {
throw new RuntimeException("Cannot add the type: (" + type + "). It already exists as a type via " + getInstance(type).getClass().getName());
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
InventoryType other = (InventoryType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_inventory_service_type_InventoryType.java
|
167 |
static final class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
| 0true
|
src_main_java_jsr166y_ForkJoinPool.java
|
414 |
private static final class ExecutionCallbackWrapper<T> implements ExecutionCallback<T> {
MultiExecutionCallbackWrapper multiExecutionCallbackWrapper;
Member member;
private ExecutionCallbackWrapper(MultiExecutionCallbackWrapper multiExecutionCallback, Member member) {
this.multiExecutionCallbackWrapper = multiExecutionCallback;
this.member = member;
}
public void onResponse(T response) {
multiExecutionCallbackWrapper.onResponse(member, response);
}
public void onFailure(Throwable t) {
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientExecutorServiceProxy.java
|
168 |
public interface SimpleClient {
void auth() throws IOException;
void send(Object o) throws IOException;
Object receive() throws IOException;
void close() throws IOException;
SerializationService getSerializationService();
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_client_SimpleClient.java
|
35 |
public class TitanEdgeTestSuite extends EdgeTestSuite {
public TitanEdgeTestSuite(final GraphTest graphTest) {
super(graphTest);
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_blueprints_TitanEdgeTestSuite.java
|
1,190 |
.moduleManagerFactory(new ModuleManagerFactory(){
@Override
public ModuleManager createModuleManager(Context context) {
return new JDTModuleManager(context, javaProject);
}
});
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_builder_CeylonBuilder.java
|
493 |
new ThreadLocal<DateFormat>() {
protected DateFormat initialValue() {
DateFormat df =
new SimpleDateFormat(OLD_COOKIE_PATTERN, Locale.US);
df.setTimeZone(TimeZone.getTimeZone("GMT"));
return df;
}
};
| 0true
|
common_src_main_java_org_broadleafcommerce_common_security_util_ServerCookie.java
|
1,260 |
class SimpleNodeSampler extends NodeSampler {
@Override
protected void doSample() {
HashSet<DiscoveryNode> newNodes = new HashSet<DiscoveryNode>();
HashSet<DiscoveryNode> newFilteredNodes = new HashSet<DiscoveryNode>();
for (DiscoveryNode listedNode : listedNodes) {
if (!transportService.nodeConnected(listedNode)) {
try {
// its a listed node, light connect to it...
logger.trace("connecting to listed node (light) [{}]", listedNode);
transportService.connectToNodeLight(listedNode);
} catch (Throwable e) {
logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
continue;
}
}
try {
NodesInfoResponse nodeInfo = transportService.submitRequest(listedNode, NodesInfoAction.NAME,
Requests.nodesInfoRequest("_local"),
TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withTimeout(pingTimeout),
new FutureTransportResponseHandler<NodesInfoResponse>() {
@Override
public NodesInfoResponse newInstance() {
return new NodesInfoResponse();
}
}).txGet();
if (!ignoreClusterName && !clusterName.equals(nodeInfo.getClusterName())) {
logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName);
newFilteredNodes.add(listedNode);
} else if (nodeInfo.getNodes().length != 0) {
// use discovered information but do keep the original transport address, so people can control which address is exactly used.
DiscoveryNode nodeWithInfo = nodeInfo.getNodes()[0].getNode();
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(), nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(), nodeWithInfo.version()));
} else {
// although we asked for one node, our target may not have completed initialization yet and doesn't have cluster nodes
logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", listedNode);
newNodes.add(listedNode);
}
} catch (Throwable e) {
logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
transportService.disconnectFromNode(listedNode);
}
}
nodes = validateNewNodes(newNodes);
filteredNodes = ImmutableList.copyOf(newFilteredNodes);
}
}
| 0true
|
src_main_java_org_elasticsearch_client_transport_TransportClientNodesService.java
|
15 |
public class TextCommandServiceImpl implements TextCommandService, TextCommandConstants {
private final Node node;
private final TextCommandProcessor[] textCommandProcessors = new TextCommandProcessor[100];
private final HazelcastInstance hazelcast;
private final AtomicLong sets = new AtomicLong();
private final AtomicLong touches = new AtomicLong();
private final AtomicLong getHits = new AtomicLong();
private final AtomicLong getMisses = new AtomicLong();
private final AtomicLong deleteMisses = new AtomicLong();
private final AtomicLong deleteHits = new AtomicLong();
private final AtomicLong incrementHits = new AtomicLong();
private final AtomicLong incrementMisses = new AtomicLong();
private final AtomicLong decrementHits = new AtomicLong();
private final AtomicLong decrementMisses = new AtomicLong();
private final long startTime = Clock.currentTimeMillis();
private final ILogger logger;
private volatile ResponseThreadRunnable responseThreadRunnable;
private volatile boolean running = true;
public TextCommandServiceImpl(Node node) {
this.node = node;
this.hazelcast = node.hazelcastInstance;
this.logger = node.getLogger(this.getClass().getName());
textCommandProcessors[GET.getValue()] = new GetCommandProcessor(this, true);
textCommandProcessors[PARTIAL_GET.getValue()] = new GetCommandProcessor(this, false);
textCommandProcessors[SET.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[APPEND.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[PREPEND.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[ADD.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[REPLACE.getValue()] = new SetCommandProcessor(this);
textCommandProcessors[GET_END.getValue()] = new NoOpCommandProcessor(this);
textCommandProcessors[DELETE.getValue()] = new DeleteCommandProcessor(this);
textCommandProcessors[QUIT.getValue()] = new SimpleCommandProcessor(this);
textCommandProcessors[STATS.getValue()] = new StatsCommandProcessor(this);
textCommandProcessors[UNKNOWN.getValue()] = new ErrorCommandProcessor(this);
textCommandProcessors[VERSION.getValue()] = new VersionCommandProcessor(this);
textCommandProcessors[TOUCH.getValue()] = new TouchCommandProcessor(this);
textCommandProcessors[INCREMENT.getValue()] = new IncrementCommandProcessor(this);
textCommandProcessors[DECREMENT.getValue()] = new IncrementCommandProcessor(this);
textCommandProcessors[ERROR_CLIENT.getValue()] = new ErrorCommandProcessor(this);
textCommandProcessors[ERROR_SERVER.getValue()] = new ErrorCommandProcessor(this);
textCommandProcessors[HTTP_GET.getValue()] = new HttpGetCommandProcessor(this);
textCommandProcessors[HTTP_POST.getValue()] = new HttpPostCommandProcessor(this);
textCommandProcessors[HTTP_PUT.getValue()] = new HttpPostCommandProcessor(this);
textCommandProcessors[HTTP_DELETE.getValue()] = new HttpDeleteCommandProcessor(this);
textCommandProcessors[NO_OP.getValue()] = new NoOpCommandProcessor(this);
}
@Override
public Node getNode() {
return node;
}
@Override
public byte[] toByteArray(Object value) {
Data data = node.getSerializationService().toData(value);
return data.getBuffer();
}
@Override
public Stats getStats() {
Stats stats = new Stats();
stats.uptime = (int) ((Clock.currentTimeMillis() - startTime) / 1000);
stats.cmd_get = getMisses.get() + getHits.get();
stats.cmd_set = sets.get();
stats.cmd_touch = touches.get();
stats.get_hits = getHits.get();
stats.get_misses = getMisses.get();
stats.delete_hits = deleteHits.get();
stats.delete_misses = deleteMisses.get();
stats.incr_hits = incrementHits.get();
stats.incr_misses = incrementMisses.get();
stats.decr_hits = decrementHits.get();
stats.decr_misses = decrementMisses.get();
stats.curr_connections = node.connectionManager.getCurrentClientConnections();
stats.total_connections = node.connectionManager.getAllTextConnections();
return stats;
}
@Override
public long incrementDeleteHitCount(int inc) {
return deleteHits.addAndGet(inc);
}
@Override
public long incrementDeleteMissCount() {
return deleteMisses.incrementAndGet();
}
@Override
public long incrementGetHitCount() {
return getHits.incrementAndGet();
}
@Override
public long incrementGetMissCount() {
return getMisses.incrementAndGet();
}
@Override
public long incrementSetCount() {
return sets.incrementAndGet();
}
@Override
public long incrementIncHitCount() {
return incrementHits.incrementAndGet();
}
@Override
public long incrementIncMissCount() {
return incrementMisses.incrementAndGet();
}
@Override
public long incrementDecrHitCount() {
return decrementHits.incrementAndGet();
}
@Override
public long incrementDecrMissCount() {
return decrementMisses.incrementAndGet();
}
@Override
public long incrementTouchCount() {
return touches.incrementAndGet();
}
@Override
public void processRequest(TextCommand command) {
if (responseThreadRunnable == null) {
synchronized (this) {
if (responseThreadRunnable == null) {
responseThreadRunnable = new ResponseThreadRunnable();
String threadNamePrefix = node.getThreadNamePrefix("ascii.service.response");
Thread thread = new Thread(node.threadGroup, responseThreadRunnable, threadNamePrefix);
thread.start();
}
}
}
node.nodeEngine.getExecutionService().execute("hz:text", new CommandExecutor(command));
}
@Override
public Object get(String mapName, String key) {
return hazelcast.getMap(mapName).get(key);
}
@Override
public int getAdjustedTTLSeconds(int ttl) {
if (ttl <= MONTH_SECONDS) {
return ttl;
} else {
return ttl - (int) (Clock.currentTimeMillis() / 1000);
}
}
@Override
public byte[] getByteArray(String mapName, String key) {
Object value = hazelcast.getMap(mapName).get(key);
byte[] result = null;
if (value != null) {
if (value instanceof RestValue) {
RestValue restValue = (RestValue) value;
result = restValue.getValue();
} else if (value instanceof byte[]) {
result = (byte[]) value;
} else {
result = toByteArray(value);
}
}
return result;
}
@Override
public Object put(String mapName, String key, Object value) {
return hazelcast.getMap(mapName).put(key, value);
}
@Override
public Object put(String mapName, String key, Object value, int ttlSeconds) {
return hazelcast.getMap(mapName).put(key, value, ttlSeconds, TimeUnit.SECONDS);
}
@Override
public Object putIfAbsent(String mapName, String key, Object value, int ttlSeconds) {
return hazelcast.getMap(mapName).putIfAbsent(key, value, ttlSeconds, TimeUnit.SECONDS);
}
@Override
public Object replace(String mapName, String key, Object value) {
return hazelcast.getMap(mapName).replace(key, value);
}
@Override
public void lock(String mapName, String key) throws InterruptedException {
if (!hazelcast.getMap(mapName).tryLock(key, 1, TimeUnit.MINUTES)) {
throw new RuntimeException("Memcache client could not get the lock for map:"
+ mapName + " key:" + key + " in 1 minute");
}
}
@Override
public void unlock(String mapName, String key) {
hazelcast.getMap(mapName).unlock(key);
}
@Override
public void deleteAll(String mapName) {
final IMap<Object, Object> map = hazelcast.getMap(mapName);
map.clear();
}
@Override
public Object delete(String mapName, String key) {
return hazelcast.getMap(mapName).remove(key);
}
@Override
public boolean offer(String queueName, Object value) {
return hazelcast.getQueue(queueName).offer(value);
}
@Override
public Object poll(String queueName, int seconds) {
try {
return hazelcast.getQueue(queueName).poll(seconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
return null;
}
}
@Override
public Object poll(String queueName) {
return hazelcast.getQueue(queueName).poll();
}
@Override
public int size(String queueName) {
return hazelcast.getQueue(queueName).size();
}
@Override
public void sendResponse(TextCommand textCommand) {
if (!textCommand.shouldReply() || textCommand.getRequestId() == -1) {
throw new RuntimeException("Shouldn't reply " + textCommand);
}
responseThreadRunnable.sendResponse(textCommand);
}
public void stop() {
final ResponseThreadRunnable rtr = responseThreadRunnable;
if (rtr != null) {
rtr.stop();
}
}
class CommandExecutor implements Runnable {
final TextCommand command;
CommandExecutor(TextCommand command) {
this.command = command;
}
@Override
public void run() {
try {
TextCommandType type = command.getType();
TextCommandProcessor textCommandProcessor = textCommandProcessors[type.getValue()];
textCommandProcessor.handle(command);
} catch (Throwable e) {
logger.warning(e);
}
}
}
private class ResponseThreadRunnable implements Runnable {
private final BlockingQueue<TextCommand> blockingQueue = new ArrayBlockingQueue<TextCommand>(200);
private final Object stopObject = new Object();
@edu.umd.cs.findbugs.annotations.SuppressWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
public void sendResponse(TextCommand textCommand) {
blockingQueue.offer(textCommand);
}
@Override
public void run() {
while (running) {
try {
TextCommand textCommand = blockingQueue.take();
if (TextCommandConstants.TextCommandType.STOP == textCommand.getType()) {
synchronized (stopObject) {
stopObject.notify();
}
} else {
SocketTextWriter socketTextWriter = textCommand.getSocketTextWriter();
socketTextWriter.enqueue(textCommand);
}
} catch (InterruptedException e) {
return;
} catch (OutOfMemoryError e) {
OutOfMemoryErrorDispatcher.onOutOfMemory(e);
throw e;
}
}
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings("RV_RETURN_VALUE_IGNORED_BAD_PRACTICE")
void stop() {
running = false;
synchronized (stopObject) {
try {
blockingQueue.offer(new AbstractTextCommand(TextCommandConstants.TextCommandType.STOP) {
@Override
public boolean readFrom(ByteBuffer cb) {
return true;
}
@Override
public boolean writeTo(ByteBuffer bb) {
return true;
}
});
//noinspection WaitNotInLoop
stopObject.wait(1000);
} catch (Exception ignored) {
}
}
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_TextCommandServiceImpl.java
|
308 |
new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
636 |
public class IndicesStatusResponse extends BroadcastOperationResponse implements ToXContent {
protected ShardStatus[] shards;
private Map<String, IndexStatus> indicesStatus;
IndicesStatusResponse() {
}
IndicesStatusResponse(ShardStatus[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards;
}
public ShardStatus[] getShards() {
return this.shards;
}
public ShardStatus getAt(int position) {
return shards[position];
}
public IndexStatus getIndex(String index) {
return getIndices().get(index);
}
public Map<String, IndexStatus> getIndices() {
if (indicesStatus != null) {
return indicesStatus;
}
Map<String, IndexStatus> indicesStatus = newHashMap();
Set<String> indices = Sets.newHashSet();
for (ShardStatus shard : shards) {
indices.add(shard.getIndex());
}
for (String index : indices) {
List<ShardStatus> shards = newArrayList();
for (ShardStatus shard : this.shards) {
if (shard.getShardRouting().index().equals(index)) {
shards.add(shard);
}
}
indicesStatus.put(index, new IndexStatus(index, shards.toArray(new ShardStatus[shards.size()])));
}
this.indicesStatus = indicesStatus;
return indicesStatus;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(getShards().length);
for (ShardStatus status : getShards()) {
status.writeTo(out);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shards = new ShardStatus[in.readVInt()];
for (int i = 0; i < shards.length; i++) {
shards[i] = readIndexShardStatus(in);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return toXContent(builder, params, null);
}
public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nullable SettingsFilter settingsFilter) throws IOException {
builder.startObject(Fields.INDICES);
for (IndexStatus indexStatus : getIndices().values()) {
builder.startObject(indexStatus.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject(Fields.INDEX);
if (indexStatus.getStoreSize() != null) {
builder.byteSizeField(Fields.PRIMARY_SIZE_IN_BYTES, Fields.PRIMARY_SIZE, indexStatus.getPrimaryStoreSize());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, indexStatus.getStoreSize());
}
builder.endObject();
if (indexStatus.getTranslogOperations() != -1) {
builder.startObject(Fields.TRANSLOG);
builder.field(Fields.OPERATIONS, indexStatus.getTranslogOperations());
builder.endObject();
}
if (indexStatus.getDocs() != null) {
builder.startObject(Fields.DOCS);
builder.field(Fields.NUM_DOCS, indexStatus.getDocs().getNumDocs());
builder.field(Fields.MAX_DOC, indexStatus.getDocs().getMaxDoc());
builder.field(Fields.DELETED_DOCS, indexStatus.getDocs().getDeletedDocs());
builder.endObject();
}
MergeStats mergeStats = indexStatus.getMergeStats();
if (mergeStats != null) {
mergeStats.toXContent(builder, params);
}
RefreshStats refreshStats = indexStatus.getRefreshStats();
if (refreshStats != null) {
refreshStats.toXContent(builder, params);
}
FlushStats flushStats = indexStatus.getFlushStats();
if (flushStats != null) {
flushStats.toXContent(builder, params);
}
builder.startObject(Fields.SHARDS);
for (IndexShardStatus indexShardStatus : indexStatus) {
builder.startArray(Integer.toString(indexShardStatus.getShardId().id()));
for (ShardStatus shardStatus : indexShardStatus) {
builder.startObject();
builder.startObject(Fields.ROUTING)
.field(Fields.STATE, shardStatus.getShardRouting().state())
.field(Fields.PRIMARY, shardStatus.getShardRouting().primary())
.field(Fields.NODE, shardStatus.getShardRouting().currentNodeId())
.field(Fields.RELOCATING_NODE, shardStatus.getShardRouting().relocatingNodeId())
.field(Fields.SHARD, shardStatus.getShardRouting().shardId().id())
.field(Fields.INDEX, shardStatus.getShardRouting().shardId().index().name())
.endObject();
builder.field(Fields.STATE, shardStatus.getState());
if (shardStatus.getStoreSize() != null) {
builder.startObject(Fields.INDEX);
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, shardStatus.getStoreSize());
builder.endObject();
}
if (shardStatus.getTranslogId() != -1) {
builder.startObject(Fields.TRANSLOG);
builder.field(Fields.ID, shardStatus.getTranslogId());
builder.field(Fields.OPERATIONS, shardStatus.getTranslogOperations());
builder.endObject();
}
if (shardStatus.getDocs() != null) {
builder.startObject(Fields.DOCS);
builder.field(Fields.NUM_DOCS, shardStatus.getDocs().getNumDocs());
builder.field(Fields.MAX_DOC, shardStatus.getDocs().getMaxDoc());
builder.field(Fields.DELETED_DOCS, shardStatus.getDocs().getDeletedDocs());
builder.endObject();
}
mergeStats = shardStatus.getMergeStats();
if (mergeStats != null) {
mergeStats.toXContent(builder, params);
}
refreshStats = shardStatus.getRefreshStats();
if (refreshStats != null) {
refreshStats.toXContent(builder, params);
}
flushStats = shardStatus.getFlushStats();
if (flushStats != null) {
flushStats.toXContent(builder, params);
}
if (shardStatus.getPeerRecoveryStatus() != null) {
PeerRecoveryStatus peerRecoveryStatus = shardStatus.getPeerRecoveryStatus();
builder.startObject(Fields.PEER_RECOVERY);
builder.field(Fields.STAGE, peerRecoveryStatus.getStage());
builder.field(Fields.START_TIME_IN_MILLIS, peerRecoveryStatus.getStartTime());
builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, peerRecoveryStatus.getTime());
builder.startObject(Fields.INDEX);
builder.field(Fields.PROGRESS, peerRecoveryStatus.getIndexRecoveryProgress());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, peerRecoveryStatus.getIndexSize());
builder.byteSizeField(Fields.REUSED_SIZE_IN_BYTES, Fields.REUSED_SIZE, peerRecoveryStatus.getReusedIndexSize());
builder.byteSizeField(Fields.EXPECTED_RECOVERED_SIZE_IN_BYTES, Fields.EXPECTED_RECOVERED_SIZE, peerRecoveryStatus.getExpectedRecoveredIndexSize());
builder.byteSizeField(Fields.RECOVERED_SIZE_IN_BYTES, Fields.RECOVERED_SIZE, peerRecoveryStatus.getRecoveredIndexSize());
builder.endObject();
builder.startObject(Fields.TRANSLOG);
builder.field(Fields.RECOVERED, peerRecoveryStatus.getRecoveredTranslogOperations());
builder.endObject();
builder.endObject();
}
if (shardStatus.getGatewayRecoveryStatus() != null) {
GatewayRecoveryStatus gatewayRecoveryStatus = shardStatus.getGatewayRecoveryStatus();
builder.startObject(Fields.GATEWAY_RECOVERY);
builder.field(Fields.STAGE, gatewayRecoveryStatus.getStage());
builder.field(Fields.START_TIME_IN_MILLIS, gatewayRecoveryStatus.getStartTime());
builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, gatewayRecoveryStatus.getTime());
builder.startObject(Fields.INDEX);
builder.field(Fields.PROGRESS, gatewayRecoveryStatus.getIndexRecoveryProgress());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, gatewayRecoveryStatus.getIndexSize());
builder.byteSizeField(Fields.REUSED_SIZE_IN_BYTES, Fields.REUSED_SIZE, gatewayRecoveryStatus.getReusedIndexSize());
builder.byteSizeField(Fields.EXPECTED_RECOVERED_SIZE_IN_BYTES, Fields.EXPECTED_RECOVERED_SIZE, gatewayRecoveryStatus.getExpectedRecoveredIndexSize());
builder.byteSizeField(Fields.RECOVERED_SIZE_IN_BYTES, Fields.RECOVERED_SIZE, gatewayRecoveryStatus.getRecoveredIndexSize());
builder.endObject();
builder.startObject(Fields.TRANSLOG);
builder.field(Fields.RECOVERED, gatewayRecoveryStatus.getRecoveredTranslogOperations());
builder.endObject();
builder.endObject();
}
if (shardStatus.getGatewaySnapshotStatus() != null) {
GatewaySnapshotStatus gatewaySnapshotStatus = shardStatus.getGatewaySnapshotStatus();
builder.startObject(Fields.GATEWAY_SNAPSHOT);
builder.field(Fields.STAGE, gatewaySnapshotStatus.getStage());
builder.field(Fields.START_TIME_IN_MILLIS, gatewaySnapshotStatus.getStartTime());
builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, gatewaySnapshotStatus.getTime());
builder.startObject(Fields.INDEX);
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, gatewaySnapshotStatus.getIndexSize());
builder.endObject();
builder.startObject(Fields.TRANSLOG);
builder.field(Fields.EXPECTED_OPERATIONS, gatewaySnapshotStatus.getExpectedNumberOfOperations());
builder.endObject();
builder.endObject();
}
builder.endObject();
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString INDEX = new XContentBuilderString("index");
static final XContentBuilderString PRIMARY_SIZE = new XContentBuilderString("primary_size");
static final XContentBuilderString PRIMARY_SIZE_IN_BYTES = new XContentBuilderString("primary_size_in_bytes");
static final XContentBuilderString SIZE = new XContentBuilderString("size");
static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
static final XContentBuilderString TRANSLOG = new XContentBuilderString("translog");
static final XContentBuilderString OPERATIONS = new XContentBuilderString("operations");
static final XContentBuilderString DOCS = new XContentBuilderString("docs");
static final XContentBuilderString NUM_DOCS = new XContentBuilderString("num_docs");
static final XContentBuilderString MAX_DOC = new XContentBuilderString("max_doc");
static final XContentBuilderString DELETED_DOCS = new XContentBuilderString("deleted_docs");
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
static final XContentBuilderString STATE = new XContentBuilderString("state");
static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
static final XContentBuilderString NODE = new XContentBuilderString("node");
static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
static final XContentBuilderString SHARD = new XContentBuilderString("shard");
static final XContentBuilderString ID = new XContentBuilderString("id");
static final XContentBuilderString PEER_RECOVERY = new XContentBuilderString("peer_recovery");
static final XContentBuilderString STAGE = new XContentBuilderString("stage");
static final XContentBuilderString START_TIME_IN_MILLIS = new XContentBuilderString("start_time_in_millis");
static final XContentBuilderString TIME = new XContentBuilderString("time");
static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis");
static final XContentBuilderString PROGRESS = new XContentBuilderString("progress");
static final XContentBuilderString REUSED_SIZE = new XContentBuilderString("reused_size");
static final XContentBuilderString REUSED_SIZE_IN_BYTES = new XContentBuilderString("reused_size_in_bytes");
static final XContentBuilderString EXPECTED_RECOVERED_SIZE = new XContentBuilderString("expected_recovered_size");
static final XContentBuilderString EXPECTED_RECOVERED_SIZE_IN_BYTES = new XContentBuilderString("expected_recovered_size_in_bytes");
static final XContentBuilderString RECOVERED_SIZE = new XContentBuilderString("recovered_size");
static final XContentBuilderString RECOVERED_SIZE_IN_BYTES = new XContentBuilderString("recovered_size_in_bytes");
static final XContentBuilderString RECOVERED = new XContentBuilderString("recovered");
static final XContentBuilderString GATEWAY_RECOVERY = new XContentBuilderString("gateway_recovery");
static final XContentBuilderString GATEWAY_SNAPSHOT = new XContentBuilderString("gateway_snapshot");
static final XContentBuilderString EXPECTED_OPERATIONS = new XContentBuilderString("expected_operations");
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_IndicesStatusResponse.java
|
1,785 |
public abstract class ShapeBuilder implements ToXContent {
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
private static final boolean DEBUG;
static {
// if asserts are enabled we run the debug statements even if they are not logged
// to prevent exceptions only present if debug enabled
boolean debug = false;
assert debug = true;
DEBUG = debug;
}
public static final double DATELINE = 180;
public static final GeometryFactory FACTORY = new GeometryFactory();
public static final JtsSpatialContext SPATIAL_CONTEXT = new JtsSpatialContext(true);
protected final boolean wrapdateline = true;
protected ShapeBuilder() {
}
protected static Coordinate coordinate(double longitude, double latitude) {
return new Coordinate(longitude, latitude);
}
/**
* Create a new point
*
* @param longitude longitude of the point
* @param latitude latitude of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(double longitude, double latitude) {
return newPoint(new Coordinate(longitude, latitude));
}
/**
* Create a new {@link PointBuilder} from a {@link Coordinate}
* @param coordinate coordinate defining the position of the point
* @return a new {@link PointBuilder}
*/
public static PointBuilder newPoint(Coordinate coordinate) {
return new PointBuilder().coordinate(coordinate);
}
/**
* Create a new set of points
* @return new {@link MultiPointBuilder}
*/
public static MultiPointBuilder newMultiPoint() {
return new MultiPointBuilder();
}
/**
* Create a new lineString
* @return a new {@link LineStringBuilder}
*/
public static LineStringBuilder newLineString() {
return new LineStringBuilder();
}
/**
* Create a new Collection of lineStrings
* @return a new {@link MultiLineStringBuilder}
*/
public static MultiLineStringBuilder newMultiLinestring() {
return new MultiLineStringBuilder();
}
/**
* Create a new Polygon
* @return a new {@link PointBuilder}
*/
public static PolygonBuilder newPolygon() {
return new PolygonBuilder();
}
/**
* Create a new Collection of polygons
* @return a new {@link MultiPolygonBuilder}
*/
public static MultiPolygonBuilder newMultiPolygon() {
return new MultiPolygonBuilder();
}
/**
* create a new Circle
* @return a new {@link CircleBuilder}
*/
public static CircleBuilder newCircleBuilder() {
return new CircleBuilder();
}
/**
* create a new rectangle
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope() {
return new EnvelopeBuilder();
}
@Override
public String toString() {
try {
XContentBuilder xcontent = JsonXContent.contentBuilder();
return toXContent(xcontent, EMPTY_PARAMS).prettyPrint().string();
} catch (IOException e) {
return super.toString();
}
}
/**
* Create a new Shape from this builder. Since calling this method could change the
* defined shape. (by inserting new coordinates or change the position of points)
* the builder looses its validity. So this method should only be called once on a builder
* @return new {@link Shape} defined by the builder
*/
public abstract Shape build();
/**
* Recursive method which parses the arrays of coordinates used to define
* Shapes
*
* @param parser
* Parser that will be read from
* @return CoordinateNode representing the start of the coordinate tree
* @throws IOException
* Thrown if an error occurs while reading from the
* XContentParser
*/
private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
// Base case
if (token != XContentParser.Token.START_ARRAY) {
double lon = parser.doubleValue();
token = parser.nextToken();
double lat = parser.doubleValue();
token = parser.nextToken();
return new CoordinateNode(new Coordinate(lon, lat));
}
List<CoordinateNode> nodes = new ArrayList<CoordinateNode>();
while (token != XContentParser.Token.END_ARRAY) {
nodes.add(parseCoordinates(parser));
token = parser.nextToken();
}
return new CoordinateNode(nodes);
}
/**
* Create a new {@link ShapeBuilder} from {@link XContent}
* @param parser parser to read the GeoShape from
* @return {@link ShapeBuilder} read from the parser or null
* if the parsers current token has been <code><null</code>
* @throws IOException if the input could not be read
*/
public static ShapeBuilder parse(XContentParser parser) throws IOException {
return GeoShapeType.parse(parser);
}
protected static XContentBuilder toXContent(XContentBuilder builder, Coordinate coordinate) throws IOException {
return builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
}
protected static Coordinate shift(Coordinate coordinate, double dateline) {
if (dateline == 0) {
return coordinate;
} else {
return new Coordinate(-2 * dateline + coordinate.x, coordinate.y);
}
}
/**
* get the shapes type
* @return type of the shape
*/
public abstract GeoShapeType type();
/**
* Calculate the intersection of a line segment and a vertical dateline.
*
* @param p1
* start-point of the line segment
* @param p2
* end-point of the line segment
* @param dateline
* x-coordinate of the vertical dateline
* @return position of the intersection in the open range (0..1] if the line
* segment intersects with the line segment. Otherwise this method
* returns {@link Double#NaN}
*/
protected static final double intersection(Coordinate p1, Coordinate p2, double dateline) {
if (p1.x == p2.x) {
return Double.NaN;
} else {
final double t = (dateline - p1.x) / (p2.x - p1.x);
if (t > 1 || t <= 0) {
return Double.NaN;
} else {
return t;
}
}
}
/**
* Calculate all intersections of line segments and a vertical line. The
* Array of edges will be ordered asc by the y-coordinate of the
* intersections of edges.
*
* @param dateline
* x-coordinate of the dateline
* @param edges
* set of edges that may intersect with the dateline
* @return number of intersecting edges
*/
protected static int intersections(double dateline, Edge[] edges) {
int numIntersections = 0;
assert !Double.isNaN(dateline);
for (int i = 0; i < edges.length; i++) {
Coordinate p1 = edges[i].coordinate;
Coordinate p2 = edges[i].next.coordinate;
assert !Double.isNaN(p2.x) && !Double.isNaN(p1.x);
edges[i].intersect = IntersectionOrder.SENTINEL;
double position = intersection(p1, p2, dateline);
if (!Double.isNaN(position)) {
if (position == 1) {
if (Double.compare(p1.x, dateline) == Double.compare(edges[i].next.next.coordinate.x, dateline)) {
// Ignore the ear
continue;
} else if (p2.x == dateline) {
// Ignore Linesegment on dateline
continue;
}
}
edges[i].intersection(position);
numIntersections++;
}
}
Arrays.sort(edges, INTERSECTION_ORDER);
return numIntersections;
}
/**
* Node used to represent a tree of coordinates.
* <p/>
* Can either be a leaf node consisting of a Coordinate, or a parent with
* children
*/
protected static class CoordinateNode implements ToXContent {
protected final Coordinate coordinate;
protected final List<CoordinateNode> children;
/**
* Creates a new leaf CoordinateNode
*
* @param coordinate
* Coordinate for the Node
*/
protected CoordinateNode(Coordinate coordinate) {
this.coordinate = coordinate;
this.children = null;
}
/**
* Creates a new parent CoordinateNode
*
* @param children
* Children of the Node
*/
protected CoordinateNode(List<CoordinateNode> children) {
this.children = children;
this.coordinate = null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (children == null) {
builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
} else {
builder.startArray();
for (CoordinateNode child : children) {
child.toXContent(builder, params);
}
builder.endArray();
}
return builder;
}
}
/**
* This helper class implements a linked list for {@link Coordinate}. It contains
* fields for a dateline intersection and component id
*/
protected static final class Edge {
Coordinate coordinate; // coordinate of the start point
Edge next; // next segment
Coordinate intersect; // potential intersection with dateline
int component = -1; // id of the component this edge belongs to
protected Edge(Coordinate coordinate, Edge next, Coordinate intersection) {
this.coordinate = coordinate;
this.next = next;
this.intersect = intersection;
if (next != null) {
this.component = next.component;
}
}
protected Edge(Coordinate coordinate, Edge next) {
this(coordinate, next, IntersectionOrder.SENTINEL);
}
private static final int top(Coordinate[] points, int offset, int length) {
int top = 0; // we start at 1 here since top points to 0
for (int i = 1; i < length; i++) {
if (points[offset + i].y < points[offset + top].y) {
top = i;
} else if (points[offset + i].y == points[offset + top].y) {
if (points[offset + i].x < points[offset + top].x) {
top = i;
}
}
}
return top;
}
/**
* Concatenate a set of points to a polygon
*
* @param component
* component id of the polygon
* @param direction
* direction of the ring
* @param points
* list of points to concatenate
* @param pointOffset
* index of the first point
* @param edges
* Array of edges to write the result to
* @param edgeOffset
* index of the first edge in the result
* @param length
* number of points to use
* @return the edges creates
*/
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
int length) {
assert edges.length >= length+edgeOffset;
assert points.length >= length+pointOffset;
edges[edgeOffset] = new Edge(points[pointOffset], null);
for (int i = 1; i < length; i++) {
if (direction) {
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
edges[edgeOffset + i].component = component;
} else {
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
edges[edgeOffset + i - 1].component = component;
}
}
if (direction) {
edges[edgeOffset].next = edges[edgeOffset + length - 1];
edges[edgeOffset].component = component;
} else {
edges[edgeOffset + length - 1].next = edges[edgeOffset];
edges[edgeOffset + length - 1].component = component;
}
return edges;
}
/**
* Create a connected list of a list of coordinates
*
* @param points
* array of point
* @param offset
* index of the first point
* @param length
* number of points
* @return Array of edges
*/
protected static Edge[] ring(int component, boolean direction, Coordinate[] points, int offset, Edge[] edges, int toffset,
int length) {
// calculate the direction of the points:
// find the point a the top of the set and check its
// neighbors orientation. So direction is equivalent
// to clockwise/counterclockwise
final int top = top(points, offset, length);
final int prev = (offset + ((top + length - 1) % length));
final int next = (offset + ((top + 1) % length));
final boolean orientation = points[offset + prev].x > points[offset + next].x;
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
}
/**
* Set the intersection of this line segment to the given position
*
* @param position
* position of the intersection [0..1]
* @return the {@link Coordinate} of the intersection
*/
protected Coordinate intersection(double position) {
return intersect = position(coordinate, next.coordinate, position);
}
public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
if (position == 0) {
return p1;
} else if (position == 1) {
return p2;
} else {
final double x = p1.x + position * (p2.x - p1.x);
final double y = p1.y + position * (p2.y - p1.y);
return new Coordinate(x, y);
}
}
@Override
public String toString() {
return "Edge[Component=" + component + "; start=" + coordinate + " " + "; intersection=" + intersect + "]";
}
}
protected static final IntersectionOrder INTERSECTION_ORDER = new IntersectionOrder();
private static final class IntersectionOrder implements Comparator<Edge> {
private static final Coordinate SENTINEL = new Coordinate(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY);
@Override
public int compare(Edge o1, Edge o2) {
return Double.compare(o1.intersect.y, o2.intersect.y);
}
}
public static final String FIELD_TYPE = "type";
public static final String FIELD_COORDINATES = "coordinates";
protected static final boolean debugEnabled() {
return LOGGER.isDebugEnabled() || DEBUG;
}
/**
* Enumeration that lists all {@link GeoShapeType}s that can be handled
*/
public static enum GeoShapeType {
POINT("point"),
MULTIPOINT("multipoint"),
LINESTRING("linestring"),
MULTILINESTRING("multilinestring"),
POLYGON("polygon"),
MULTIPOLYGON("multipolygon"),
ENVELOPE("envelope"),
CIRCLE("circle");
protected final String shapename;
private GeoShapeType(String shapename) {
this.shapename = shapename;
}
public static GeoShapeType forName(String geoshapename) {
String typename = geoshapename.toLowerCase(Locale.ROOT);
for (GeoShapeType type : values()) {
if(type.shapename.equals(typename)) {
return type;
}
}
throw new ElasticsearchIllegalArgumentException("unknown geo_shape ["+geoshapename+"]");
}
public static ShapeBuilder parse(XContentParser parser) throws IOException {
if (parser.currentToken() == XContentParser.Token.VALUE_NULL) {
return null;
} else if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("Shape must be an object consisting of type and coordinates");
}
GeoShapeType shapeType = null;
Distance radius = null;
CoordinateNode node = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
if (FIELD_TYPE.equals(fieldName)) {
parser.nextToken();
shapeType = GeoShapeType.forName(parser.text());
} else if (FIELD_COORDINATES.equals(fieldName)) {
parser.nextToken();
node = parseCoordinates(parser);
} else if (CircleBuilder.FIELD_RADIUS.equals(fieldName)) {
parser.nextToken();
radius = Distance.parseDistance(parser.text());
} else {
parser.nextToken();
parser.skipChildren();
}
}
}
if (shapeType == null) {
throw new ElasticsearchParseException("Shape type not included");
} else if (node == null) {
throw new ElasticsearchParseException("Coordinates not included");
} else if (radius != null && GeoShapeType.CIRCLE != shapeType) {
throw new ElasticsearchParseException("Field [" + CircleBuilder.FIELD_RADIUS + "] is supported for [" + CircleBuilder.TYPE
+ "] only");
}
switch (shapeType) {
case POINT: return parsePoint(node);
case MULTIPOINT: return parseMultiPoint(node);
case LINESTRING: return parseLineString(node);
case MULTILINESTRING: return parseMultiLine(node);
case POLYGON: return parsePolygon(node);
case MULTIPOLYGON: return parseMultiPolygon(node);
case CIRCLE: return parseCircle(node, radius);
case ENVELOPE: return parseEnvelope(node);
default:
throw new ElasticsearchParseException("Shape type [" + shapeType + "] not included");
}
}
protected static PointBuilder parsePoint(CoordinateNode node) {
return newPoint(node.coordinate);
}
protected static CircleBuilder parseCircle(CoordinateNode coordinates, Distance radius) {
return newCircleBuilder().center(coordinates.coordinate).radius(radius);
}
protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) {
return newEnvelope().topLeft(coordinates.children.get(0).coordinate).bottomRight(coordinates.children.get(1).coordinate);
}
protected static MultiPointBuilder parseMultiPoint(CoordinateNode coordinates) {
MultiPointBuilder points = new MultiPointBuilder();
for (CoordinateNode node : coordinates.children) {
points.point(node.coordinate);
}
return points;
}
protected static LineStringBuilder parseLineString(CoordinateNode coordinates) {
LineStringBuilder line = newLineString();
for (CoordinateNode node : coordinates.children) {
line.point(node.coordinate);
}
return line;
}
protected static MultiLineStringBuilder parseMultiLine(CoordinateNode coordinates) {
MultiLineStringBuilder multiline = newMultiLinestring();
for (CoordinateNode node : coordinates.children) {
multiline.linestring(parseLineString(node));
}
return multiline;
}
protected static PolygonBuilder parsePolygon(CoordinateNode coordinates) {
LineStringBuilder shell = parseLineString(coordinates.children.get(0));
PolygonBuilder polygon = new PolygonBuilder(shell.points);
for (int i = 1; i < coordinates.children.size(); i++) {
polygon.hole(parseLineString(coordinates.children.get(i)));
}
return polygon;
}
protected static MultiPolygonBuilder parseMultiPolygon(CoordinateNode coordinates) {
MultiPolygonBuilder polygons = newMultiPolygon();
for (CoordinateNode node : coordinates.children) {
polygons.polygon(parsePolygon(node));
}
return polygons;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_geo_builders_ShapeBuilder.java
|
1,298 |
public class HadoopPipeline {
private static final Logger log =
LoggerFactory.getLogger(HadoopPipeline.class);
// used to validate closure parse tree
protected static final ScriptEngine engine = new GroovyScriptEngineImpl();
public static final String PIPELINE_IS_LOCKED = "No more steps are possible as pipeline is locked";
protected final HadoopCompiler compiler;
protected final HadoopGraph graph;
protected final State state;
protected final List<String> stringRepresentation = new ArrayList<String>();
private Compare convert(final com.tinkerpop.gremlin.Tokens.T compare) {
if (compare.equals(com.tinkerpop.gremlin.Tokens.T.eq))
return Compare.EQUAL;
else if (compare.equals(com.tinkerpop.gremlin.Tokens.T.neq))
return Compare.NOT_EQUAL;
else if (compare.equals(com.tinkerpop.gremlin.Tokens.T.gt))
return Compare.GREATER_THAN;
else if (compare.equals(com.tinkerpop.gremlin.Tokens.T.gte))
return Compare.GREATER_THAN_EQUAL;
else if (compare.equals(com.tinkerpop.gremlin.Tokens.T.lt))
return Compare.LESS_THAN;
else
return Compare.LESS_THAN_EQUAL;
}
protected class State {
private Class<? extends Element> elementType;
private String propertyKey;
private Class<? extends WritableComparable> propertyType;
private int step = -1;
private boolean locked = false;
private Map<String, Integer> namedSteps = new HashMap<String, Integer>();
public State set(Class<? extends Element> elementType) {
if (!elementType.equals(Vertex.class) && !elementType.equals(Edge.class))
throw new IllegalArgumentException("The element class type must be either Vertex or Edge");
this.elementType = elementType;
return this;
}
public Class<? extends Element> getElementType() {
return this.elementType;
}
public boolean atVertex() {
if (null == this.elementType)
throw new IllegalStateException("No element type can be inferred: start vertices (or edges) set must be defined");
return this.elementType.equals(Vertex.class);
}
public State setProperty(final String key, final Class type) {
this.propertyKey = key;
this.propertyType = convertJavaToHadoop(type);
return this;
}
public Pair<String, Class<? extends WritableComparable>> popProperty() {
if (null == this.propertyKey)
return null;
Pair<String, Class<? extends WritableComparable>> pair = new Pair<String, Class<? extends WritableComparable>>(this.propertyKey, this.propertyType);
this.propertyKey = null;
this.propertyType = null;
return pair;
}
public int incrStep() {
return ++this.step;
}
public int getStep() {
return this.step;
}
public void assertNotLocked() {
if (this.locked) throw new IllegalStateException(PIPELINE_IS_LOCKED);
}
public void assertNoProperty() {
if (this.propertyKey != null)
throw new IllegalStateException("This step can not follow a property reference");
}
public void assertAtVertex() {
if (!this.atVertex())
throw new IllegalStateException("This step can not follow an edge-based step");
}
public void assertAtEdge() {
if (this.atVertex())
throw new IllegalStateException("This step can not follow a vertex-based step");
}
public boolean isLocked() {
return this.locked;
}
public void lock() {
this.locked = true;
}
public void addStep(final String name) {
if (this.step == -1)
throw new IllegalArgumentException("There is no previous step to name");
this.namedSteps.put(name, this.step);
}
public int getStep(final String name) {
final Integer i = this.namedSteps.get(name);
if (null == i)
throw new IllegalArgumentException("There is no step identified by: " + name);
else
return i;
}
}
////////////////////////////////
////////////////////////////////
////////////////////////////////
/**
* Construct a HadoopPipeline
*
* @param graph the HadoopGraph that is the source of the traversal
*/
public HadoopPipeline(final HadoopGraph graph) {
this.graph = graph;
this.compiler = HadoopCompatLoader.getCompat().newCompiler(graph);
this.state = new State();
if (MapReduceFormat.class.isAssignableFrom(this.graph.getGraphInputFormat())) {
try {
((Class<? extends MapReduceFormat>) this.graph.getGraphInputFormat()).getConstructor().newInstance().addMapReduceJobs(this.compiler);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
if (graph.hasEdgeCopyDirection()) {
Direction ecDir = graph.getEdgeCopyDirection();
this.compiler.addMapReduce(EdgeCopyMapReduce.Map.class,
null,
EdgeCopyMapReduce.Reduce.class,
null,
LongWritable.class,
Holder.class,
NullWritable.class,
FaunusVertex.class,
EdgeCopyMapReduce.createConfiguration(ecDir));
}
}
//////// TRANSFORMS
/**
* The identity step does not alter the graph in anyway.
* It has the benefit of emitting various useful graph statistic counters.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline _() {
this.state.assertNotLocked();
this.compiler.addMap(IdentityMap.Map.class,
NullWritable.class,
FaunusVertex.class,
IdentityMap.createConfiguration());
makeMapReduceString(IdentityMap.class);
return this;
}
/**
* Apply the provided closure to the current element and emit the result.
*
* @param closure the closure to apply to the element
* @return the extended HadoopPipeline
*/
public HadoopPipeline transform(final String closure) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(TransformMap.Map.class,
NullWritable.class,
FaunusVertex.class,
TransformMap.createConfiguration(this.state.getElementType(), this.validateClosure(closure)));
this.state.lock();
makeMapReduceString(TransformMap.class);
return this;
}
/**
* Start a traversal at all vertices in the graph.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline V() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.set(Vertex.class);
this.compiler.addMap(VerticesMap.Map.class,
NullWritable.class,
FaunusVertex.class,
VerticesMap.createConfiguration(this.state.incrStep() != 0));
makeMapReduceString(VerticesMap.class);
return this;
}
/**
* Start a traversal at all edges in the graph.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline E() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.set(Edge.class);
this.compiler.addMap(EdgesMap.Map.class,
NullWritable.class,
FaunusVertex.class,
EdgesMap.createConfiguration(this.state.incrStep() != 0));
makeMapReduceString(EdgesMap.class);
return this;
}
/**
* Start a traversal at the vertices identified by the provided ids.
*
* @param ids the long ids of the vertices to start the traversal from
* @return the extended HadoopPipeline
*/
public HadoopPipeline v(final long... ids) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.set(Vertex.class);
this.state.incrStep();
this.compiler.addMap(VertexMap.Map.class,
NullWritable.class,
FaunusVertex.class,
VertexMap.createConfiguration(ids));
makeMapReduceString(VertexMap.class);
return this;
}
/**
* Take outgoing labeled edges to adjacent vertices.
*
* @param labels the labels of the edges to traverse over
* @return the extended HadoopPipeline
*/
public HadoopPipeline out(final String... labels) {
return this.inOutBoth(OUT, labels);
}
/**
* Take incoming labeled edges to adjacent vertices.
*
* @param labels the labels of the edges to traverse over
* @return the extended HadoopPipeline
*/
public HadoopPipeline in(final String... labels) {
return this.inOutBoth(IN, labels);
}
/**
* Take both incoming and outgoing labeled edges to adjacent vertices.
*
* @param labels the labels of the edges to traverse over
* @return the extended HadoopPipeline
*/
public HadoopPipeline both(final String... labels) {
return this.inOutBoth(BOTH, labels);
}
private HadoopPipeline inOutBoth(final Direction direction, final String... labels) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.assertAtVertex();
this.state.incrStep();
this.compiler.addMapReduce(VerticesVerticesMapReduce.Map.class,
null,
VerticesVerticesMapReduce.Reduce.class,
null,
LongWritable.class,
Holder.class,
NullWritable.class,
FaunusVertex.class,
VerticesVerticesMapReduce.createConfiguration(direction, labels));
this.state.set(Vertex.class);
makeMapReduceString(VerticesVerticesMapReduce.class, direction.name(), Arrays.asList(labels));
return this;
}
/**
* Take outgoing labeled edges to incident edges.
*
* @param labels the labels of the edges to traverse over
* @return the extended HadoopPipeline
*/
public HadoopPipeline outE(final String... labels) {
return this.inOutBothE(OUT, labels);
}
/**
* Take incoming labeled edges to incident edges.
*
* @param labels the labels of the edges to traverse over
* @return the extended HadoopPipeline
*/
public HadoopPipeline inE(final String... labels) {
return this.inOutBothE(IN, labels);
}
/**
* Take both incoming and outgoing labeled edges to incident edges.
*
* @param labels the labels of the edges to traverse over
* @return the extended HadoopPipeline
*/
public HadoopPipeline bothE(final String... labels) {
return this.inOutBothE(BOTH, labels);
}
private HadoopPipeline inOutBothE(final Direction direction, final String... labels) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.assertAtVertex();
this.state.incrStep();
this.compiler.addMapReduce(VerticesEdgesMapReduce.Map.class,
null,
VerticesEdgesMapReduce.Reduce.class,
null,
LongWritable.class,
Holder.class,
NullWritable.class,
FaunusVertex.class,
VerticesEdgesMapReduce.createConfiguration(direction, labels));
this.state.set(Edge.class);
makeMapReduceString(VerticesEdgesMapReduce.class, direction.name(), Arrays.asList(labels));
return this;
}
/**
* Go to the outgoing/tail vertex of the edge.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline outV() {
return this.inOutBothV(OUT);
}
/**
* Go to the incoming/head vertex of the edge.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline inV() {
return this.inOutBothV(IN);
}
/**
* Go to both the incoming/head and outgoing/tail vertices of the edge.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline bothV() {
return this.inOutBothV(BOTH);
}
private HadoopPipeline inOutBothV(final Direction direction) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.assertAtEdge();
this.state.incrStep();
this.compiler.addMap(EdgesVerticesMap.Map.class,
NullWritable.class,
FaunusVertex.class,
EdgesVerticesMap.createConfiguration(direction));
this.state.set(Vertex.class);
makeMapReduceString(EdgesVerticesMap.class, direction.name());
return this;
}
/**
* Emit the property value of an element.
*
* @param key the key identifying the property
* @param type the class of the property value (so Hadoop can intelligently handle the result)
* @return the extended HadoopPipeline
*/
public HadoopPipeline property(final String key, final Class type) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.setProperty(key, type);
return this;
}
/**
* Emit the property value of an element.
*
* @param key the key identifying the property
* @return the extended HadoopPipeline
*/
public HadoopPipeline property(final String key) {
return this.property(key, String.class);
}
/**
* Emit a string representation of the property map.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline map() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(PropertyMapMap.Map.class,
LongWritable.class,
Text.class,
PropertyMapMap.createConfiguration(this.state.getElementType()));
makeMapReduceString(PropertyMap.class);
this.state.lock();
return this;
}
/**
* Emit the label of the current edge.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline label() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.assertAtEdge();
this.property(Tokens.LABEL, String.class);
return this;
}
/**
* Emit the path taken from start to current element.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline path() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(PathMap.Map.class,
NullWritable.class,
Text.class,
PathMap.createConfiguration(this.state.getElementType()));
this.state.lock();
makeMapReduceString(PathMap.class);
return this;
}
/**
* Order the previous property value results and emit them with another element property value.
* It is important to emit the previous property with a provided type else it is ordered lexigraphically.
*
* @param order increasing and descending order
* @param elementKey the key of the element to associate it with
* @return the extended HadoopPipeline
*/
public HadoopPipeline order(final TransformPipe.Order order, final String elementKey) {
this.state.assertNotLocked();
final Pair<String, Class<? extends WritableComparable>> pair = this.state.popProperty();
if (null != pair) {
this.compiler.addMapReduce(OrderMapReduce.Map.class,
null,
OrderMapReduce.Reduce.class,
OrderMapReduce.createComparator(order, pair.getB()),
pair.getB(),
Text.class,
Text.class,
pair.getB(),
OrderMapReduce.createConfiguration(this.state.getElementType(), pair.getA(), pair.getB(), elementKey));
makeMapReduceString(OrderMapReduce.class, order.name(), elementKey);
} else {
throw new IllegalArgumentException("There is no specified property to order on");
}
this.state.lock();
return this;
}
/**
* Order the previous property value results.
*
* @param order increasing and descending order
* @return the extended HadoopPipeline
*/
public HadoopPipeline order(final TransformPipe.Order order) {
return this.order(order, Tokens.ID);
}
/**
* Order the previous property value results and emit them with another element property value.
* It is important to emit the previous property with a provided type else it is ordered lexigraphically.
*
* @param order increasing and descending order
* @param elementKey the key of the element to associate it with
* @return the extended HadoopPipeline
*/
public HadoopPipeline order(final com.tinkerpop.gremlin.Tokens.T order, final String elementKey) {
return this.order(com.tinkerpop.gremlin.Tokens.mapOrder(order), elementKey);
}
/**
* Order the previous property value results.
*
* @param order increasing and descending order
* @return the extended HadoopPipeline
*/
public HadoopPipeline order(final com.tinkerpop.gremlin.Tokens.T order) {
return this.order(com.tinkerpop.gremlin.Tokens.mapOrder(order));
}
//////// FILTERS
/**
* Emit or deny the current element based upon the provided boolean-based closure.
*
* @param closure return true to emit and false to remove.
* @return the extended HadoopPipeline
*/
public HadoopPipeline filter(final String closure) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(FilterMap.Map.class,
NullWritable.class,
FaunusVertex.class,
FilterMap.createConfiguration(this.state.getElementType(), this.validateClosure(closure)));
makeMapReduceString(FilterMap.class);
return this;
}
/**
* Emit the current element if it has a property value comparable to the provided values.
*
* @param key the property key of the element
* @param compare the comparator
* @param values the values to compare against where only one needs to succeed (or'd)
* @return the extended HadoopPipeline
*/
public HadoopPipeline has(final String key, final com.tinkerpop.gremlin.Tokens.T compare, final Object... values) {
return this.has(key, convert(compare), values);
}
/**
* Emit the current element if it does not have a property value comparable to the provided values.
*
* @param key the property key of the element
* @param compare the comparator (will be not'd)
* @param values the values to compare against where only one needs to succeed (or'd)
* @return the extended HadoopPipeline
*/
public HadoopPipeline hasNot(final String key, final com.tinkerpop.gremlin.Tokens.T compare, final Object... values) {
return this.hasNot(key, convert(compare), values);
}
/**
* Emit the current element if it has a property value comparable to the provided values.
*
* @param key the property key of the element
* @param compare the comparator
* @param values the values to compare against where only one needs to succeed (or'd)
* @return the extended HadoopPipeline
*/
public HadoopPipeline has(final String key, final Compare compare, final Object... values) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(PropertyFilterMap.Map.class,
NullWritable.class,
FaunusVertex.class,
PropertyFilterMap.createConfiguration(this.state.getElementType(), key, compare, values));
makeMapReduceString(PropertyFilterMap.class, compare.name(), Arrays.asList(values));
return this;
}
/**
* Emit the current element if it does not have a property value comparable to the provided values.
*
* @param key the property key of the element
* @param compare the comparator (will be not'd)
* @param values the values to compare against where only one needs to succeed (or'd)
* @return the extended HadoopPipeline
*/
public HadoopPipeline hasNot(final String key, final Compare compare, final Object... values) {
return this.has(key, compare.opposite(), values);
}
/**
* Emit the current element it has a property value equal to the provided values.
*
* @param key the property key of the element
* @param values the values to compare against where only one needs to succeed (or'd)
* @return the extended HadoopPipeline
*/
public HadoopPipeline has(final String key, final Object... values) {
return (values.length == 0) ? this.has(key, Compare.NOT_EQUAL, new Object[]{null}) : this.has(key, Compare.EQUAL, values);
}
/**
* Emit the current element it does not have a property value equal to the provided values.
*
* @param key the property key of the element
* @param values the values to compare against where only one needs to succeed (or'd)
* @return the extended HadoopPipeline
*/
public HadoopPipeline hasNot(final String key, final Object... values) {
return (values.length == 0) ? this.has(key, Compare.EQUAL, new Object[]{null}) : this.has(key, Compare.NOT_EQUAL, values);
}
/**
* Emit the current element it has a property value equal within the provided range.
*
* @param key the property key of the element
* @param startValue the start of the range (inclusive)
* @param endValue the end of the range (exclusive)
* @return the extended HadoopPipeline
*/
public HadoopPipeline interval(final String key, final Object startValue, final Object endValue) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(IntervalFilterMap.Map.class,
NullWritable.class,
FaunusVertex.class,
IntervalFilterMap.createConfiguration(this.state.getElementType(), key, startValue, endValue));
makeMapReduceString(IntervalFilterMap.class, key, startValue, endValue);
return this;
}
/**
* Remove any duplicate traversers at a single element.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline dedup() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(DuplicateFilterMap.Map.class,
NullWritable.class,
FaunusVertex.class,
DuplicateFilterMap.createConfiguration(this.state.getElementType()));
makeMapReduceString(DuplicateFilterMap.class);
return this;
}
/**
* Go back to an element a named step ago.
* Currently only backing up to vertices is supported.
*
* @param step the name of the step to back up to
* @return the extended HadoopPipeline
*/
public HadoopPipeline back(final String step) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMapReduce(BackFilterMapReduce.Map.class,
BackFilterMapReduce.Combiner.class,
BackFilterMapReduce.Reduce.class,
LongWritable.class,
Holder.class,
NullWritable.class,
FaunusVertex.class,
BackFilterMapReduce.createConfiguration(this.state.getElementType(), this.state.getStep(step)));
makeMapReduceString(BackFilterMapReduce.class, step);
return this;
}
/*public HadoopPipeline back(final int numberOfSteps) {
this.state.assertNotLocked();
this.compiler.backFilterMapReduce(this.state.getElementType(), this.state.getStep() - numberOfSteps);
this.compiler.setPathEnabled(true);
makeMapReduceString(BackFilterMapReduce.class, numberOfSteps);
return this;
}*/
/**
* Emit the element only if it was arrived at via a path that does not have cycles in it.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline simplePath() {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(CyclicPathFilterMap.Map.class,
NullWritable.class,
FaunusVertex.class,
CyclicPathFilterMap.createConfiguration(this.state.getElementType()));
makeMapReduceString(CyclicPathFilterMap.class);
return this;
}
//////// SIDEEFFECTS
/**
* Emit the element, but compute some sideeffect in the process.
* For example, mutate the properties of the element.
*
* @param closure the sideeffect closure whose results are ignored.
* @return the extended HadoopPipeline
*/
public HadoopPipeline sideEffect(final String closure) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.compiler.addMap(SideEffectMap.Map.class,
NullWritable.class,
FaunusVertex.class,
SideEffectMap.createConfiguration(this.state.getElementType(), this.validateClosure(closure)));
makeMapReduceString(SideEffectMap.class);
return this;
}
/**
* Name a step in order to reference it later in the expression.
*
* @param name the string representation of the name
* @return the extended HadoopPipeline
*/
public HadoopPipeline as(final String name) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.addStep(name);
final String string = "As(" + name + "," + this.stringRepresentation.get(this.state.getStep(name)) + ")";
this.stringRepresentation.set(this.state.getStep(name), string);
return this;
}
/**
* Have the elements for the named step previous project an edge to the current vertex with provided label.
* If a merge weight key is provided, then count the number of duplicate edges between the same two vertices and add a weight.
* No weight key is specified by "_" and then all duplicates are merged, but no weight is added to the resultant edge.
*
* @param step the name of the step where the source vertices were
* @param label the label of the edge to project
* @param mergeWeightKey the property key to use for weight
* @return the extended HadoopPipeline
*/
public HadoopPipeline linkIn(final String label, final String step, final String mergeWeightKey) {
return this.link(IN, label, step, mergeWeightKey);
}
/**
* Have the elements for the named step previous project an edge to the current vertex with provided label.
*
* @param step the name of the step where the source vertices were
* @param label the label of the edge to project
* @return the extended HadoopPipeline
*/
public HadoopPipeline linkIn(final String label, final String step) {
return this.link(IN, label, step, null);
}
/**
* Have the elements for the named step previous project an edge from the current vertex with provided label.
* If a merge weight key is provided, then count the number of duplicate edges between the same two vertices and add a weight.
* No weight key is specified by "_" and then all duplicates are merged, but no weight is added to the resultant edge.
*
* @param step the name of the step where the source vertices were
* @param label the label of the edge to project
* @param mergeWeightKey the property key to use for weight
* @return the extended HadoopPipeline
*/
public HadoopPipeline linkOut(final String label, final String step, final String mergeWeightKey) {
return link(OUT, label, step, mergeWeightKey);
}
/**
* Have the elements for the named step previous project an edge from the current vertex with provided label.
*
* @param step the name of the step where the source vertices were
* @param label the label of the edge to project
* @return the extended HadoopPipeline
*/
public HadoopPipeline linkOut(final String label, final String step) {
return this.link(OUT, label, step, null);
}
private HadoopPipeline link(final Direction direction, final String label, final String step, final String mergeWeightKey) {
this.state.assertNotLocked();
this.state.assertNoProperty();
Preconditions.checkNotNull(direction);
this.compiler.addMapReduce(LinkMapReduce.Map.class,
LinkMapReduce.Combiner.class,
LinkMapReduce.Reduce.class,
null,
LongWritable.class,
Holder.class,
NullWritable.class,
FaunusVertex.class,
LinkMapReduce.createConfiguration(direction, label, this.state.getStep(step), mergeWeightKey));
log.debug("Added {} job with direction {}, label {}, step {}, merge weight key {}", LinkMapReduce.class.getSimpleName(), direction, label, step, mergeWeightKey);
if (null != mergeWeightKey)
makeMapReduceString(LinkMapReduce.class, direction.name(), label, step, mergeWeightKey);
else
makeMapReduceString(LinkMapReduce.class, direction.name(), label, step);
return this;
}
/**
* Count the number of times the previous element (or property) has been traversed to.
* The results are stored in the jobs sideeffect file in HDFS.
*
* @return the extended HadoopPipeline.
*/
public HadoopPipeline groupCount() {
this.state.assertNotLocked();
final Pair<String, Class<? extends WritableComparable>> pair = this.state.popProperty();
if (null == pair) {
return this.groupCount(null, null);
} else {
this.compiler.addMapReduce(ValueGroupCountMapReduce.Map.class,
ValueGroupCountMapReduce.Combiner.class,
ValueGroupCountMapReduce.Reduce.class,
pair.getB(),
LongWritable.class,
pair.getB(),
LongWritable.class,
ValueGroupCountMapReduce.createConfiguration(this.state.getElementType(), pair.getA(), pair.getB()));
makeMapReduceString(ValueGroupCountMapReduce.class, pair.getA());
}
return this;
}
/**
* Apply the provided closure to the incoming element to determine the grouping key.
* The value of the count is incremented by 1
* The results are stored in the jobs sideeffect file in HDFS.
*
* @return the extended HadoopPipeline.
*/
public HadoopPipeline groupCount(final String keyClosure) {
return this.groupCount(keyClosure, null);
}
/**
* Apply the provided closure to the incoming element to determine the grouping key.
* Then apply the value closure to the current element to determine the count increment.
* The results are stored in the jobs sideeffect file in HDFS.
*
* @return the extended HadoopPipeline.
*/
public HadoopPipeline groupCount(final String keyClosure, final String valueClosure) {
this.state.assertNotLocked();
this.compiler.addMapReduce(GroupCountMapReduce.Map.class,
GroupCountMapReduce.Combiner.class,
GroupCountMapReduce.Reduce.class,
Text.class,
LongWritable.class,
Text.class,
LongWritable.class,
GroupCountMapReduce.createConfiguration(this.state.getElementType(),
this.validateClosure(keyClosure), this.validateClosure(valueClosure)));
makeMapReduceString(GroupCountMapReduce.class);
return this;
}
private HadoopPipeline commit(final Tokens.Action action) {
this.state.assertNotLocked();
this.state.assertNoProperty();
if (this.state.atVertex()) {
this.compiler.addMapReduce(CommitVerticesMapReduce.Map.class,
CommitVerticesMapReduce.Combiner.class,
CommitVerticesMapReduce.Reduce.class,
null,
LongWritable.class,
Holder.class,
NullWritable.class,
FaunusVertex.class,
CommitVerticesMapReduce.createConfiguration(action));
makeMapReduceString(CommitVerticesMapReduce.class, action.name());
} else {
this.compiler.addMap(CommitEdgesMap.Map.class,
NullWritable.class,
FaunusVertex.class,
CommitEdgesMap.createConfiguration(action));
makeMapReduceString(CommitEdgesMap.class, action.name());
}
return this;
}
/**
* Drop all the elements of respective type at the current step. Keep all others.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline drop() {
return this.commit(Tokens.Action.DROP);
}
/**
* Keep all the elements of the respetive type at the current step. Drop all others.
*
* @return the extended HadoopPipeline
*/
public HadoopPipeline keep() {
return this.commit(Tokens.Action.KEEP);
}
public HadoopPipeline script(final String scriptUri, final String... args) {
this.state.assertNotLocked();
this.state.assertNoProperty();
this.state.assertAtVertex();
this.compiler.addMap(ScriptMap.Map.class,
NullWritable.class,
FaunusVertex.class,
ScriptMap.createConfiguration(scriptUri, args));
makeMapReduceString(CommitEdgesMap.class, scriptUri);
// this.state.lock();
return this;
}
/////////////// UTILITIES
/**
* Count the number of traversers currently in the graph
*
* @return the count
*/
public HadoopPipeline count() {
this.state.assertNotLocked();
this.compiler.addMapReduce(CountMapReduce.Map.class,
CountMapReduce.Combiner.class,
CountMapReduce.Reduce.class,
NullWritable.class,
LongWritable.class,
NullWritable.class,
LongWritable.class,
CountMapReduce.createConfiguration(this.state.getElementType()));
makeMapReduceString(CountMapReduce.class);
this.state.lock();
return this;
}
public String toString() {
return this.stringRepresentation.toString();
}
private HadoopPipeline done() {
if (!this.state.isLocked()) {
final Pair<String, Class<? extends WritableComparable>> pair = this.state.popProperty();
if (null != pair) {
this.compiler.addMap(PropertyMap.Map.class,
LongWritable.class,
pair.getB(),
PropertyMap.createConfiguration(this.state.getElementType(), pair.getA(), pair.getB()));
makeMapReduceString(PropertyMap.class, pair.getA());
this.state.lock();
}
}
return this;
}
/**
* Submit the HadoopPipeline to the Hadoop cluster.
*
* @throws Exception
*/
public void submit() throws Exception {
submit(Tokens.EMPTY_STRING, false);
}
/**
* Submit the HadoopPipeline to the Hadoop cluster and ensure that a header is emitted in the logs.
*
* @param script the Gremlin script
* @param showHeader the Titan/Hadoop header
* @throws Exception
*/
public void submit(final String script, final Boolean showHeader) throws Exception {
this.done();
if (MapReduceFormat.class.isAssignableFrom(this.graph.getGraphOutputFormat())) {
this.state.assertNotLocked();
((Class<? extends MapReduceFormat>) this.graph.getGraphOutputFormat()).getConstructor().newInstance().addMapReduceJobs(this.compiler);
}
this.compiler.completeSequence();
ToolRunner.run(this.compiler, new String[]{script, showHeader.toString()});
}
/**
* Get a reference to the graph currently being used in this HadoopPipeline.
*
* @return the HadoopGraph
*/
public HadoopGraph getGraph() {
return this.graph;
}
public HadoopCompiler getCompiler() {
return this.compiler;
}
private String validateClosure(String closure) {
if (closure == null)
return null;
try {
engine.eval(closure);
return closure;
} catch (ScriptException e) {
closure = closure.trim();
closure = closure.replaceFirst("\\{", "{it->");
try {
engine.eval(closure);
return closure;
} catch (ScriptException e1) {
}
throw new IllegalArgumentException("The provided closure does not compile: " + e.getMessage(), e);
}
}
private void makeMapReduceString(final Class klass, final Object... arguments) {
String result = klass.getSimpleName();
if (arguments.length > 0) {
result = result + "(";
for (final Object arg : arguments) {
result = result + arg + ",";
}
result = result.substring(0, result.length() - 1) + ")";
}
this.stringRepresentation.add(result);
}
private Class<? extends WritableComparable> convertJavaToHadoop(final Class klass) {
if (klass.equals(String.class)) {
return Text.class;
} else if (klass.equals(Integer.class)) {
return IntWritable.class;
} else if (klass.equals(Double.class)) {
return DoubleWritable.class;
} else if (klass.equals(Long.class)) {
return LongWritable.class;
} else if (klass.equals(Float.class)) {
return FloatWritable.class;
} else if (klass.equals(Boolean.class)) {
return BooleanWritable.class;
} else {
throw new IllegalArgumentException("The provided class is not supported: " + klass.getSimpleName());
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_HadoopPipeline.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.