Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
4 |
killerHook = new Thread() {
public void run() {
killAndUnregisterHook(stat);
}
};
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_DaemonRunner.java
|
808 |
public class TransportPercolateAction extends TransportBroadcastOperationAction<PercolateRequest, PercolateResponse, PercolateShardRequest, PercolateShardResponse> {
private final PercolatorService percolatorService;
private final TransportGetAction getAction;
@Inject
public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, PercolatorService percolatorService,
TransportGetAction getAction) {
super(settings, threadPool, clusterService, transportService);
this.percolatorService = percolatorService;
this.getAction = getAction;
}
@Override
protected void doExecute(final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
request.startTime = System.currentTimeMillis();
if (request.getRequest() != null) {
getAction.execute(request.getRequest(), new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse getResponse) {
if (!getResponse.isExists()) {
onFailure(new DocumentMissingException(null, request.getRequest().type(), request.getRequest().id()));
return;
}
BytesReference docSource = getResponse.getSourceAsBytesRef();
TransportPercolateAction.super.doExecute(new PercolateRequest(request, docSource), listener);
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
} else {
super.doExecute(request, listener);
}
}
@Override
protected String executor() {
return ThreadPool.Names.PERCOLATE;
}
@Override
protected PercolateRequest newRequest() {
return new PercolateRequest();
}
@Override
protected String transportAction() {
return PercolateAction.NAME;
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, PercolateRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, PercolateRequest request, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected PercolateResponse newResponse(PercolateRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
return reduce(request, shardsResponses, percolatorService);
}
public static PercolateResponse reduce(PercolateRequest request, AtomicReferenceArray shardsResponses, PercolatorService percolatorService) {
int successfulShards = 0;
int failedShards = 0;
List<PercolateShardResponse> shardResults = null;
List<ShardOperationFailedException> shardFailures = null;
byte percolatorTypeId = 0x00;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
PercolateShardResponse percolateShardResponse = (PercolateShardResponse) shardResponse;
successfulShards++;
if (!percolateShardResponse.isEmpty()) {
if (shardResults == null) {
percolatorTypeId = percolateShardResponse.percolatorTypeId();
shardResults = newArrayList();
}
shardResults.add(percolateShardResponse);
}
}
}
if (shardResults == null) {
long tookInMillis = System.currentTimeMillis() - request.startTime;
PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;
return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);
} else {
PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults);
long tookInMillis = System.currentTimeMillis() - request.startTime;
return new PercolateResponse(
shardsResponses.length(), successfulShards, failedShards, shardFailures,
result.matches(), result.count(), tookInMillis, result.reducedFacets(), result.reducedAggregations()
);
}
}
@Override
protected PercolateShardRequest newShardRequest() {
return new PercolateShardRequest();
}
@Override
protected PercolateShardRequest newShardRequest(ShardRouting shard, PercolateRequest request) {
return new PercolateShardRequest(shard.index(), shard.id(), request);
}
@Override
protected PercolateShardResponse newShardResponse() {
return new PercolateShardResponse();
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, PercolateRequest request, String[] concreteIndices) {
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
}
@Override
protected PercolateShardResponse shardOperation(PercolateShardRequest request) throws ElasticsearchException {
try {
return percolatorService.percolate(request);
} catch (Throwable e) {
logger.trace("[{}][{}] failed to percolate", e, request.index(), request.shardId());
ShardId shardId = new ShardId(request.index(), request.shardId());
throw new PercolateException(shardId, "failed to percolate", e);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_percolate_TransportPercolateAction.java
|
295 |
new Thread() {
public void run() {
if (!l.tryLock()) {
latch.countDown();
}
try {
if (l.tryLock(5, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java
|
2,820 |
nodeEngine.getExecutionService().schedule(new Runnable() {
@Override
public void run() {
resumeMigration();
}
}, migrationActivationDelay, TimeUnit.MILLISECONDS);
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_InternalPartitionServiceImpl.java
|
478 |
public class GetAliasesResponse extends ActionResponse {
private ImmutableOpenMap<String, List<AliasMetaData>> aliases = ImmutableOpenMap.of();
public GetAliasesResponse(ImmutableOpenMap<String, List<AliasMetaData>> aliases) {
this.aliases = aliases;
}
GetAliasesResponse() {
}
public ImmutableOpenMap<String, List<AliasMetaData>> getAliases() {
return aliases;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableOpenMap.Builder<String, List<AliasMetaData>> aliasesBuilder = ImmutableOpenMap.builder();
for (int i = 0; i < size; i++) {
String key = in.readString();
int valueSize = in.readVInt();
List<AliasMetaData> value = new ArrayList<AliasMetaData>(valueSize);
for (int j = 0; j < valueSize; j++) {
value.add(AliasMetaData.Builder.readFrom(in));
}
aliasesBuilder.put(key, ImmutableList.copyOf(value));
}
aliases = aliasesBuilder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(aliases.size());
for (ObjectObjectCursor<String, List<AliasMetaData>> entry : aliases) {
out.writeString(entry.key);
out.writeVInt(entry.value.size());
for (AliasMetaData aliasMetaData : entry.value) {
AliasMetaData.Builder.writeTo(aliasMetaData, out);
}
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_alias_get_GetAliasesResponse.java
|
3,857 |
public class HasChildFilterParser implements FilterParser {
public static final String NAME = "has_child";
@Inject
public HasChildFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
boolean queryFound = false;
String childType = null;
int shortCircuitParentDocSet = 8192; // Tests show a cut of point between 8192 and 16384.
String filterName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
// TODO we need to set the type, but, `query` can come before `type`...
// since we switch types, make sure we change the context
String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
try {
query = parseContext.parseInnerQuery();
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else if ("filter".equals(currentFieldName)) {
// TODO handle `filter` element before `type` element...
String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
try {
Filter innerFilter = parseContext.parseInnerFilter();
query = new XConstantScoreQuery(innerFilter);
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else {
throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) {
childType = parser.text();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_child] filter has been removed, use a filter as a facet_filter in the relevant global facet");
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
// noop to be backwards compatible
} else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
// noop to be backwards compatible
} else if ("short_circuit_cutoff".equals(currentFieldName)) {
shortCircuitParentDocSet = parser.intValue();
} else {
throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound) {
throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'query' field");
}
if (query == null) {
return null;
}
if (childType == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'type' field");
}
DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
if (childDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]");
}
if (!childDocMapper.parentFieldMapper().active()) {
throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping");
}
String parentType = childDocMapper.parentFieldMapper().type();
// wrap the query with type query
query = new XFilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
if (parentDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]");
}
Filter nonNestedDocsFilter = null;
if (parentDocMapper.hasNestedObjects()) {
nonNestedDocsFilter = parseContext.cacheFilter(NonNestedDocsFilter.INSTANCE, null);
}
Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
Query childrenConstantScoreQuery = new ChildrenConstantScoreQuery(query, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
if (filterName != null) {
parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(childrenConstantScoreQuery));
}
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
if (deleteByQuery) {
return new DeleteByQueryWrappingFilter(childrenConstantScoreQuery);
} else {
return new CustomQueryWrappingFilter(childrenConstantScoreQuery);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_HasChildFilterParser.java
|
5,301 |
return new Comparator<Terms.Bucket>() {
@Override
public int compare(Terms.Bucket o1, Terms.Bucket o2) {
double v1 = ((MetricsAggregator.MultiValue) aggregator).metric(valueName, ((InternalTerms.Bucket) o1).bucketOrd);
double v2 = ((MetricsAggregator.MultiValue) aggregator).metric(valueName, ((InternalTerms.Bucket) o2).bucketOrd);
// some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to
// the bottom
if (v1 == Double.NaN) {
return asc ? 1 : -1;
}
return asc ? Double.compare(v1, v2) : Double.compare(v2, v1);
}
};
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_InternalOrder.java
|
432 |
return new EventHandler<ReplicatedMapPortableEntryEvent>() {
public void handle(ReplicatedMapPortableEntryEvent event) {
V value = (V) event.getValue();
V oldValue = (V) event.getOldValue();
K key = (K) event.getKey();
Member member = getContext().getClusterService().getMember(event.getUuid());
EntryEvent<K, V> entryEvent = new EntryEvent<K, V>(getName(), member, event.getEventType().getType(), key,
oldValue, value);
switch (event.getEventType()) {
case ADDED:
listener.entryAdded(entryEvent);
break;
case REMOVED:
listener.entryRemoved(entryEvent);
break;
case UPDATED:
listener.entryUpdated(entryEvent);
break;
case EVICTED:
listener.entryEvicted(entryEvent);
break;
default:
throw new IllegalArgumentException("Not a known event type " + event.getEventType());
}
}
@Override
public void onListenerRegister() {
}
};
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientReplicatedMapProxy.java
|
92 |
private static class GenericEvent implements Serializable {
private static final long serialVersionUID = -933111044641052844L;
private int userId;
public GenericEvent(int userId) {
this.setUserId(userId);
}
public int getUserId() {
return userId;
}
public void setUserId(int userId) {
this.userId = userId;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientEntryListenerDisconnectTest.java
|
603 |
public class GetSettingsResponse extends ActionResponse {
private ImmutableOpenMap<String, Settings> indexToSettings = ImmutableOpenMap.of();
public GetSettingsResponse(ImmutableOpenMap<String, Settings> indexToSettings) {
this.indexToSettings = indexToSettings;
}
GetSettingsResponse() {
}
public ImmutableOpenMap<String, Settings> getIndexToSettings() {
return indexToSettings;
}
public String getSetting(String index, String setting) {
Settings settings = indexToSettings.get(index);
if (setting != null) {
return settings.get(setting);
} else {
return null;
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableOpenMap.Builder<String, Settings> builder = ImmutableOpenMap.builder();
for (int i = 0; i < size; i++) {
builder.put(in.readString(), ImmutableSettings.readSettingsFromStream(in));
}
indexToSettings = builder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(indexToSettings.size());
for (ObjectObjectCursor<String, Settings> cursor : indexToSettings) {
out.writeString(cursor.key);
ImmutableSettings.writeSettingsToStream(cursor.value, out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_get_GetSettingsResponse.java
|
287 |
static class TryLockThread extends LockTestThread {
public TryLockThread(ILock lock, AtomicInteger upTotal, AtomicInteger downTotal){
super(lock, upTotal, downTotal);
}
public void doRun() throws Exception{
if ( lock.tryLock() ) {
work();
lock.unlock();
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientConcurrentLockTest.java
|
6 |
public class HBaseStatus {
private static final Logger log =
LoggerFactory.getLogger(HBaseStatus.class);
private final File file;
private final String version;
private HBaseStatus(File file, String version) {
Preconditions.checkNotNull(file);
Preconditions.checkNotNull(version);
this.file = file;
this.version = version;
}
public String getVersion() {
return version;
}
public File getFile() {
return file;
}
public String getScriptDir() {
return HBaseStorageSetup.getScriptDirForHBaseVersion(version);
}
public String getConfDir() {
return HBaseStorageSetup.getConfDirForHBaseVersion(version);
}
public static HBaseStatus read(String path) {
File pid = new File(path);
if (!pid.exists()) {
log.info("HBase pidfile {} does not exist", path);
return null;
}
BufferedReader pidReader = null;
try {
pidReader = new BufferedReader(new FileReader(pid));
HBaseStatus s = parsePidFile(pid, pidReader);
log.info("Read HBase status from {}", path);
return s;
} catch (HBasePidfileParseException e) {
log.warn("Assuming HBase is not running", e);
} catch (IOException e) {
log.warn("Assuming HBase is not running", e);
} finally {
IOUtils.closeQuietly(pidReader);
}
return null;
}
public static HBaseStatus write(String path, String hbaseVersion) {
File f = new File(path);
FileOutputStream fos = null;
try {
fos = new FileOutputStream(path);
fos.write(String.format("%s", hbaseVersion).getBytes());
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
IOUtils.closeQuietly(fos);
}
return new HBaseStatus(f, hbaseVersion);
}
private static HBaseStatus parsePidFile(File f, BufferedReader br) throws HBasePidfileParseException, IOException {
String l = br.readLine();
if (null == l || "".equals(l.trim())) {
throw new HBasePidfileParseException("Empty HBase statusfile " + f);
}
HBaseStatus stat = new HBaseStatus(f, l.trim());
return stat;
}
private static class HBasePidfileParseException extends Exception {
private static final long serialVersionUID = 1L;
public HBasePidfileParseException(String message) {
super(message);
}
}
}
| 0true
|
titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_HBaseStatus.java
|
528 |
public class DimensionUnitOfMeasureType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, DimensionUnitOfMeasureType> TYPES = new LinkedHashMap<String, DimensionUnitOfMeasureType>();
public static final DimensionUnitOfMeasureType CENTIMETERS = new DimensionUnitOfMeasureType("CENTIMETERS", "Centimeters");
public static final DimensionUnitOfMeasureType METERS = new DimensionUnitOfMeasureType("METERS", "Meters");
public static final DimensionUnitOfMeasureType INCHES = new DimensionUnitOfMeasureType("INCHES", "Inches");
public static final DimensionUnitOfMeasureType FEET = new DimensionUnitOfMeasureType("FEET", "Feet");
public static DimensionUnitOfMeasureType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public DimensionUnitOfMeasureType() {
//do nothing
}
public DimensionUnitOfMeasureType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)){
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
DimensionUnitOfMeasureType other = (DimensionUnitOfMeasureType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_util_DimensionUnitOfMeasureType.java
|
5,085 |
transportService.sendRequest(node, SearchScanTransportHandler.ACTION, request, new BaseTransportResponseHandler<QuerySearchResult>() {
@Override
public QuerySearchResult newInstance() {
return new QuerySearchResult();
}
@Override
public void handleResponse(QuerySearchResult response) {
listener.onResult(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
| 1no label
|
src_main_java_org_elasticsearch_search_action_SearchServiceTransportAction.java
|
732 |
private static class BucketSearchResult {
private final int itemIndex;
private final ArrayList<Long> path;
private BucketSearchResult(int itemIndex, ArrayList<Long> path) {
this.itemIndex = itemIndex;
this.path = path;
}
public long getLastPathItem() {
return path.get(path.size() - 1);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_sbtree_local_OSBTree.java
|
1,352 |
PhasedUnit lastPhasedUnit = new CeylonSourceParser<PhasedUnit>() {
@Override
protected String getCharset() {
try {
return modelPhaseUnit.getProjectResource().getDefaultCharset();
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
@Override
protected PhasedUnit createPhasedUnit(CompilationUnit cu, Package pkg, CommonTokenStream tokenStream) {
return new PhasedUnit(virtualSrcFile,
virtualSrcDir, cu, pkg,
currentModuleManager,
currentTypechecker.getContext(),
tokenStream.getTokens()) {
@Override
protected boolean reuseExistingDescriptorModels() {
return true;
}
};
}
}.parseFileToPhasedUnit(
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_model_ProjectSourceFile.java
|
171 |
public abstract class OPollerThread extends OSoftThread {
protected final long delay;
public OPollerThread(final long iDelay) {
delay = iDelay;
}
public OPollerThread(long iDelay, final ThreadGroup iThreadGroup) {
super(iThreadGroup, OPollerThread.class.getSimpleName());
delay = iDelay;
}
public OPollerThread(final long iDelay, final String name) {
super(name);
delay = iDelay;
}
public OPollerThread(final long iDelay, final ThreadGroup group, final String name) {
super(group, name);
delay = iDelay;
}
@Override
protected void afterExecution() throws InterruptedException {
pauseCurrentThread(delay);
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_thread_OPollerThread.java
|
2,828 |
private static class ReplicaSyncEntryProcessor implements ScheduledEntryProcessor<Integer, ReplicaSyncInfo> {
private InternalPartitionServiceImpl partitionService;
public ReplicaSyncEntryProcessor(InternalPartitionServiceImpl partitionService) {
this.partitionService = partitionService;
}
@Override
public void process(EntryTaskScheduler<Integer, ReplicaSyncInfo> scheduler,
Collection<ScheduledEntry<Integer, ReplicaSyncInfo>> entries) {
for (ScheduledEntry<Integer, ReplicaSyncInfo> entry : entries) {
ReplicaSyncInfo syncInfo = entry.getValue();
if (partitionService.replicaSyncRequests.compareAndSet(entry.getKey(), syncInfo, null)) {
logRendingSyncReplicaRequest(syncInfo);
partitionService.triggerPartitionReplicaSync(syncInfo.partitionId, syncInfo.replicaIndex);
}
}
}
private void logRendingSyncReplicaRequest(ReplicaSyncInfo syncInfo) {
ILogger logger = partitionService.logger;
if (logger.isFinestEnabled()) {
logger.finest("Re-sending sync replica request for partition: " + syncInfo.partitionId + ", replica: "
+ syncInfo.replicaIndex);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_InternalPartitionServiceImpl.java
|
1,271 |
public class FaunusSchemaManager implements SchemaInspector {
private static final FaunusSchemaManager DEFAULT_MANAGER = new FaunusSchemaManager();
private final ConcurrentMap<String,FaunusVertexLabel> vertexLabels;
private final ConcurrentMap<String,FaunusRelationType> relationTypes;
private SchemaProvider schemaProvider;
private FaunusSchemaManager() {
this(DefaultSchemaProvider.INSTANCE);
}
public FaunusSchemaManager(SchemaProvider provider) {
vertexLabels = Maps.newConcurrentMap();
relationTypes = Maps.newConcurrentMap();
setSchemaProvider(provider);
initialize();
}
private final void initialize() {
vertexLabels.put(FaunusVertexLabel.DEFAULT_VERTEXLABEL.getName(),FaunusVertexLabel.DEFAULT_VERTEXLABEL);
relationTypes.put(FaunusPropertyKey.COUNT.getName(),FaunusPropertyKey.COUNT);
relationTypes.put(FaunusEdgeLabel.LINK.getName(),FaunusEdgeLabel.LINK);
relationTypes.put(FaunusPropertyKey.VALUE.getName(),FaunusPropertyKey.VALUE);
relationTypes.put(FaunusPropertyKey.ID.getName(),FaunusPropertyKey.ID);
relationTypes.put(FaunusPropertyKey._ID.getName(),FaunusPropertyKey._ID);
relationTypes.put(FaunusPropertyKey.LABEL.getName(),FaunusPropertyKey.LABEL);
}
public void setSchemaProvider(SchemaProvider provider) {
if (provider!=DefaultSchemaProvider.INSTANCE) {
provider = DefaultSchemaProvider.asBackupProvider(provider);
}
this.schemaProvider=provider;
}
public void clear() {
vertexLabels.clear();
relationTypes.clear();
initialize();
}
public FaunusVertexLabel getVertexLabel(String name) {
FaunusVertexLabel vl = vertexLabels.get(name);
if (vl==null) {
vertexLabels.putIfAbsent(name,new FaunusVertexLabel(schemaProvider.getVertexLabel(name)));
vl = vertexLabels.get(name);
}
assert vl!=null;
return vl;
}
@Override
public boolean containsVertexLabel(String name) {
return vertexLabels.containsKey(name) || schemaProvider.getVertexLabel(name)!=null;
}
@Override
public boolean containsRelationType(String name) {
return relationTypes.containsKey(name) || schemaProvider.getRelationType(name)!=null;
}
@Override
public FaunusRelationType getRelationType(String name) {
FaunusRelationType rt = relationTypes.get(name);
if (rt==null) {
RelationTypeDefinition def = schemaProvider.getRelationType(name);
if (def==null) return null;
if (def instanceof PropertyKeyDefinition) rt = new FaunusPropertyKey((PropertyKeyDefinition)def,false);
else rt = new FaunusEdgeLabel((EdgeLabelDefinition)def,false);
relationTypes.putIfAbsent(name,rt);
rt = relationTypes.get(name);
}
assert rt!=null;
return rt;
}
@Override
public boolean containsPropertyKey(String name) {
FaunusRelationType rt = getRelationType(name);
return rt!=null && rt.isPropertyKey();
}
@Override
public boolean containsEdgeLabel(String name) {
FaunusRelationType rt = getRelationType(name);
return rt!=null && rt.isEdgeLabel();
}
@Override
public FaunusPropertyKey getOrCreatePropertyKey(String name) {
FaunusRelationType rt = getRelationType(name);
if (rt==null) {
relationTypes.putIfAbsent(name,new FaunusPropertyKey(schemaProvider.getPropertyKey(name),false));
rt = relationTypes.get(name);
}
assert rt!=null;
if (!(rt instanceof FaunusPropertyKey)) throw new IllegalArgumentException("Not a property key: " + name);
return (FaunusPropertyKey)rt;
}
@Override
public FaunusPropertyKey getPropertyKey(String name) {
FaunusRelationType rt = getRelationType(name);
Preconditions.checkArgument(rt==null || rt.isPropertyKey(),"Name does not identify a property key: ",name);
return (FaunusPropertyKey)rt;
}
@Override
public FaunusEdgeLabel getOrCreateEdgeLabel(String name) {
FaunusRelationType rt = getRelationType(name);
if (rt==null) {
relationTypes.putIfAbsent(name,new FaunusEdgeLabel(schemaProvider.getEdgeLabel(name),false));
rt = relationTypes.get(name);
}
assert rt!=null;
if (!(rt instanceof FaunusEdgeLabel)) throw new IllegalArgumentException("Not an edge label: " + name);
return (FaunusEdgeLabel)rt;
}
@Override
public FaunusEdgeLabel getEdgeLabel(String name) {
FaunusRelationType rt = getRelationType(name);
Preconditions.checkArgument(rt==null || rt.isEdgeLabel(),"Name does not identify an edge label: ",name);
return (FaunusEdgeLabel)rt;
}
public static FaunusSchemaManager getTypeManager(Configuration config) {
return DEFAULT_MANAGER;
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_FaunusSchemaManager.java
|
1,586 |
public class Entity implements Serializable {
protected static final long serialVersionUID = 1L;
protected String[] type;
protected Property[] properties;
protected boolean isDirty = false;
protected Boolean isDeleted = false;
protected Boolean isInactive = false;
protected Boolean isActive = false;
protected Boolean isLocked = false;
protected String lockedBy;
protected String lockedDate;
protected boolean multiPartAvailableOnThread = false;
protected boolean isValidationFailure = false;
protected Map<String, List<String>> validationErrors = new HashMap<String, List<String>>();
protected Map<String, Property> pMap = null;
public String[] getType() {
return type;
}
public void setType(String[] type) {
if (type != null && type.length > 0) {
Arrays.sort(type);
}
this.type = type;
}
public Map<String, Property> getPMap() {
if (pMap == null) {
pMap = BLCMapUtils.keyedMap(properties, new TypedClosure<String, Property>() {
@Override
public String getKey(Property value) {
return value.getName();
}
});
}
return pMap;
}
public Property[] getProperties() {
return properties;
}
public void setProperties(Property[] properties) {
this.properties = properties;
pMap = null;
}
public void mergeProperties(String prefix, Entity entity) {
int j = 0;
Property[] merged = new Property[properties.length + entity.getProperties().length];
for (Property property : properties) {
merged[j] = property;
j++;
}
for (Property property : entity.getProperties()) {
property.setName(prefix!=null?prefix+"."+property.getName():""+property.getName());
merged[j] = property;
j++;
}
properties = merged;
}
/**
* Replaces all property values in this entity with the values from the given entity. This also resets the {@link #pMap}
*
* @param entity
*/
public void overridePropertyValues(Entity entity) {
for (Property property : entity.getProperties()) {
Property myProperty = findProperty(property.getName());
if (myProperty != null) {
myProperty.setValue(property.getValue());
myProperty.setRawValue(property.getRawValue());
}
}
pMap = null;
}
public Property findProperty(String name) {
Arrays.sort(properties, new Comparator<Property>() {
@Override
public int compare(Property o1, Property o2) {
if (o1 == null && o2 == null) {
return 0;
} else if (o1 == null) {
return 1;
} else if (o2 == null) {
return -1;
}
return o1.getName().compareTo(o2.getName());
}
});
Property searchProperty = new Property();
searchProperty.setName(name);
int index = Arrays.binarySearch(properties, searchProperty, new Comparator<Property>() {
@Override
public int compare(Property o1, Property o2) {
if (o1 == null && o2 == null) {
return 0;
} else if (o1 == null) {
return 1;
} else if (o2 == null) {
return -1;
}
return o1.getName().compareTo(o2.getName());
}
});
if (index >= 0) {
return properties[index];
}
return null;
}
public void addProperty(Property property) {
Property[] allProps = getProperties();
Property[] newProps = new Property[allProps.length + 1];
for (int j=0;j<allProps.length;j++) {
newProps[j] = allProps[j];
}
newProps[newProps.length - 1] = property;
setProperties(newProps);
}
/**
* Adds a single validation error to this entity. This will also set the entire
* entity in an error state by invoking {@link #setValidationFailure(boolean)}.
*
* @param fieldName - the field that is in error. This works on top-level properties (like a 'manufacturer' field on a
* Product entity) but can also work on properties gleaned from a related entity (like
* 'defaultSku.weight.weightUnitOfMeasure' on a Product entity)
* @param errorOrErrorKey - the error message to present to a user. Could be the actual error message or a key to a
* property in messages.properties to support different locales
*/
public void addValidationError(String fieldName, String errorOrErrorKey) {
Map<String, List<String>> fieldErrors = getValidationErrors();
List<String> errorMessages = fieldErrors.get(fieldName);
if (errorMessages == null) {
errorMessages = new ArrayList<String>();
fieldErrors.put(fieldName, errorMessages);
}
errorMessages.add(errorOrErrorKey);
setValidationFailure(true);
}
public boolean isDirty() {
return isDirty;
}
public void setDirty(boolean dirty) {
isDirty = dirty;
}
public boolean isMultiPartAvailableOnThread() {
return multiPartAvailableOnThread;
}
public void setMultiPartAvailableOnThread(boolean multiPartAvailableOnThread) {
this.multiPartAvailableOnThread = multiPartAvailableOnThread;
}
/**
*
* @return if this entity has failed validation. This will also check the {@link #getValidationErrors()} map if this
* boolean has not been explicitly set
*/
public boolean isValidationFailure() {
if (!getValidationErrors().isEmpty()) {
isValidationFailure = true;
}
return isValidationFailure;
}
public void setValidationFailure(boolean validationFailure) {
isValidationFailure = validationFailure;
}
/**
* Validation error map where the key corresponds to the property that failed validation (which could be dot-separated)
* and the value corresponds to a list of the error messages, in the case of multiple errors on the same field.
*
* For instance, you might have a configuration where the field is both a Required validator and a regex validator.
* The validation map in this case might contain something like:
*
* defaultSku.name => ['This field is required', 'Cannot have numbers in name']
*
* @return a map keyed by property name to the list of error messages for that property
*/
public Map<String, List<String>> getValidationErrors() {
return validationErrors;
}
/**
* Completely reset the validation errors for this Entity. In most cases it is more appropriate to use the convenience
* method for adding a single error via {@link #addValidationError(String, String)}. This will also set the entire
* entity in an error state by invoking {@link #setValidationFailure(boolean)}.
*
* @param validationErrors
* @see #addValidationError(String, String)
*/
public void setValidationErrors(Map<String, List<String>> validationErrors) {
if (MapUtils.isNotEmpty(validationErrors)) {
setValidationFailure(true);
}
this.validationErrors = validationErrors;
}
public Boolean getActive() {
return isActive;
}
public void setActive(Boolean active) {
isActive = active;
}
public Boolean getDeleted() {
return isDeleted;
}
public void setDeleted(Boolean deleted) {
isDeleted = deleted;
}
public Boolean getInactive() {
return isInactive;
}
public void setInactive(Boolean inactive) {
isInactive = inactive;
}
public Boolean getLocked() {
return isLocked;
}
public void setLocked(Boolean locked) {
isLocked = locked;
}
public String getLockedBy() {
return lockedBy;
}
public void setLockedBy(String lockedBy) {
this.lockedBy = lockedBy;
}
public String getLockedDate() {
return lockedDate;
}
public void setLockedDate(String lockedDate) {
this.lockedDate = lockedDate;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Entity)) return false;
Entity entity = (Entity) o;
if (isDirty != entity.isDirty) return false;
if (isValidationFailure != entity.isValidationFailure) return false;
if (multiPartAvailableOnThread != entity.multiPartAvailableOnThread) return false;
if (isActive != null ? !isActive.equals(entity.isActive) : entity.isActive != null) return false;
if (isDeleted != null ? !isDeleted.equals(entity.isDeleted) : entity.isDeleted != null) return false;
if (isInactive != null ? !isInactive.equals(entity.isInactive) : entity.isInactive != null) return false;
if (isLocked != null ? !isLocked.equals(entity.isLocked) : entity.isLocked != null) return false;
if (lockedBy != null ? !lockedBy.equals(entity.lockedBy) : entity.lockedBy != null) return false;
if (lockedDate != null ? !lockedDate.equals(entity.lockedDate) : entity.lockedDate != null) return false;
if (!Arrays.equals(properties, entity.properties)) return false;
if (!Arrays.equals(type, entity.type)) return false;
return true;
}
@Override
public int hashCode() {
int result = type != null ? Arrays.hashCode(type) : 0;
result = 31 * result + (properties != null ? Arrays.hashCode(properties) : 0);
result = 31 * result + (isDirty ? 1 : 0);
result = 31 * result + (isDeleted != null ? isDeleted.hashCode() : 0);
result = 31 * result + (isInactive != null ? isInactive.hashCode() : 0);
result = 31 * result + (isActive != null ? isActive.hashCode() : 0);
result = 31 * result + (isLocked != null ? isLocked.hashCode() : 0);
result = 31 * result + (lockedBy != null ? lockedBy.hashCode() : 0);
result = 31 * result + (lockedDate != null ? lockedDate.hashCode() : 0);
result = 31 * result + (multiPartAvailableOnThread ? 1 : 0);
result = 31 * result + (isValidationFailure ? 1 : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_Entity.java
|
708 |
mappingUpdatedAction.execute(request, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {
@Override
public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {
// all is well
}
@Override
public void onFailure(Throwable e) {
logger.warn("failed to update master on updated mapping for {}", e, request);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_bulk_TransportShardBulkAction.java
|
186 |
@Component("blContentProcessor")
public class ContentProcessor extends AbstractModelVariableModifierProcessor {
protected final Log LOG = LogFactory.getLog(getClass());
public static final String REQUEST_DTO = "blRequestDTO";
public static final String BLC_RULE_MAP_PARAM = "blRuleMap";
@Resource(name = "blStructuredContentService")
protected StructuredContentService structuredContentService;
@Resource(name = "blStaticAssetService")
protected StaticAssetService staticAssetService;
/**
* Sets the name of this processor to be used in Thymeleaf template
*/
public ContentProcessor() {
super("content");
}
public ContentProcessor(String elementName) {
super(elementName);
}
@Override
public int getPrecedence() {
return 10000;
}
/**
* Returns a default name
* @param element
* @param valueName
* @return
*/
protected String getAttributeValue(Element element, String valueName, String defaultValue) {
String returnValue = element.getAttributeValue(valueName);
if (returnValue == null) {
return defaultValue;
} else {
return returnValue;
}
}
@Override
protected void modifyModelAttributes(Arguments arguments, Element element) {
String contentType = element.getAttributeValue("contentType");
String contentName = element.getAttributeValue("contentName");
String maxResultsStr = element.getAttributeValue("maxResults");
Integer maxResults = null;
if (maxResultsStr != null) {
maxResults = Ints.tryParse(maxResultsStr);
}
if (maxResults == null) {
maxResults = Integer.MAX_VALUE;
}
String contentListVar = getAttributeValue(element, "contentListVar", "contentList");
String contentItemVar = getAttributeValue(element, "contentItemVar", "contentItem");
String numResultsVar = getAttributeValue(element, "numResultsVar", "numResults");
String fieldFilters = element.getAttributeValue("fieldFilters");
String sortField = element.getAttributeValue("sortField");
IWebContext context = (IWebContext) arguments.getContext();
HttpServletRequest request = context.getHttpServletRequest();
BroadleafRequestContext blcContext = BroadleafRequestContext.getBroadleafRequestContext();
Map<String, Object> mvelParameters = buildMvelParameters(request, arguments, element);
SandBox currentSandbox = blcContext.getSandbox();
List<StructuredContentDTO> contentItems;
StructuredContentType structuredContentType = structuredContentService.findStructuredContentTypeByName(contentType);
Locale locale = blcContext.getLocale();
contentItems = getContentItems(contentName, maxResults, request, mvelParameters, currentSandbox, structuredContentType, locale, arguments, element);
if (contentItems.size() > 0) {
List<Map<String,String>> contentItemFields = new ArrayList<Map<String, String>>();
for (StructuredContentDTO item : contentItems) {
if (StringUtils.isNotEmpty(fieldFilters)) {
AssignationSequence assignments = StandardExpressionProcessor.parseAssignationSequence(arguments, fieldFilters, false);
boolean valid = true;
for (Assignation assignment : assignments) {
if (ObjectUtils.notEqual(StandardExpressionProcessor.executeExpression(arguments, assignment.getRight()),
item.getValues().get(assignment.getLeft().getValue()))) {
valid = false;
break;
}
}
if (valid) {
contentItemFields.add(item.getValues());
}
} else {
contentItemFields.add(item.getValues());
}
}
addToModel(arguments, contentItemVar, contentItemFields.get(0));
addToModel(arguments, contentListVar, contentItemFields);
addToModel(arguments, numResultsVar, contentItems.size());
} else {
if (LOG.isInfoEnabled()) {
LOG.info("**************************The contentItems is null*************************");
}
addToModel(arguments, contentItemVar, null);
addToModel(arguments, contentListVar, null);
addToModel(arguments, numResultsVar, 0);
}
}
/**
* @param contentName name of the content to be looked up (can be null)
* @param maxResults maximum results to return
* @param request servlet request
* @param mvelParameters values that should be considered when filtering the content list by rules
* @param currentSandbox current sandbox being used
* @param structuredContentType the type of content that should be returned
* @param locale current locale
* @param arguments Thymeleaf Arguments passed into the tag
* @param element element context that this Thymeleaf processor is being executed in
* @return
*/
protected List<StructuredContentDTO> getContentItems(String contentName, Integer maxResults, HttpServletRequest request,
Map<String, Object> mvelParameters,
SandBox currentSandbox,
StructuredContentType structuredContentType,
Locale locale,
Arguments arguments,
Element element) {
List<StructuredContentDTO> contentItems;
if (structuredContentType == null) {
contentItems = structuredContentService.lookupStructuredContentItemsByName(currentSandbox, contentName, locale, maxResults, mvelParameters, isSecure(request));
} else {
if (contentName == null || "".equals(contentName)) {
contentItems = structuredContentService.lookupStructuredContentItemsByType(currentSandbox, structuredContentType, locale, maxResults, mvelParameters, isSecure(request));
} else {
contentItems = structuredContentService.lookupStructuredContentItemsByName(currentSandbox, structuredContentType, contentName, locale, maxResults, mvelParameters, isSecure(request));
}
}
return contentItems;
}
/**
* MVEL is used to process the content targeting rules.
*
* @param request
* @return
*/
protected Map<String, Object> buildMvelParameters(HttpServletRequest request, Arguments arguments, Element element) {
TimeZone timeZone = BroadleafRequestContext.getBroadleafRequestContext().getTimeZone();
final TimeDTO timeDto;
if (timeZone != null) {
timeDto = new TimeDTO(SystemTime.asCalendar(timeZone));
} else {
timeDto = new TimeDTO();
}
RequestDTO requestDto = (RequestDTO) request.getAttribute(REQUEST_DTO);
Map<String, Object> mvelParameters = new HashMap<String, Object>();
mvelParameters.put("time", timeDto);
mvelParameters.put("request", requestDto);
String productString = element.getAttributeValue("product");
if (productString != null) {
Object product = StandardExpressionProcessor.processExpression(arguments, productString);
if (product != null) {
mvelParameters.put("product", product);
}
}
String categoryString = element.getAttributeValue("category");
if (categoryString != null) {
Object category = StandardExpressionProcessor.processExpression(arguments, categoryString);
if (category != null) {
mvelParameters.put("category", category);
}
}
@SuppressWarnings("unchecked")
Map<String,Object> blcRuleMap = (Map<String,Object>) request.getAttribute(BLC_RULE_MAP_PARAM);
if (blcRuleMap != null) {
for (String mapKey : blcRuleMap.keySet()) {
mvelParameters.put(mapKey, blcRuleMap.get(mapKey));
}
}
return mvelParameters;
}
public boolean isSecure(HttpServletRequest request) {
boolean secure = false;
if (request != null) {
secure = ("HTTPS".equalsIgnoreCase(request.getScheme()) || request.isSecure());
}
return secure;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_processor_ContentProcessor.java
|
32 |
public class Values extends AbstractCollection<V> {
@Override
public Iterator<V> iterator() {
return new ValueIterator(getFirstEntry());
}
public Iterator<V> inverseIterator() {
return new ValueInverseIterator(getLastEntry());
}
@Override
public int size() {
return OMVRBTree.this.size();
}
@Override
public boolean contains(final Object o) {
return OMVRBTree.this.containsValue(o);
}
@Override
public boolean remove(final Object o) {
for (OMVRBTreeEntry<K, V> e = getFirstEntry(); e != null; e = next(e)) {
if (valEquals(e.getValue(), o)) {
deleteEntry(e);
return true;
}
}
return false;
}
@Override
public void clear() {
OMVRBTree.this.clear();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
469 |
indexAliasesService.indicesAliases(updateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new IndicesAliasesResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
logger.debug("failed to perform aliases", t);
listener.onFailure(t);
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_alias_TransportIndicesAliasesAction.java
|
602 |
public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetSettingsRequest, GetSettingsResponse, GetSettingsRequestBuilder> {
public GetSettingsRequestBuilder(InternalGenericClient client, String... indices) {
super(client, new GetSettingsRequest().indices(indices));
}
public GetSettingsRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
public GetSettingsRequestBuilder addIndices(String... indices) {
request.indices(ObjectArrays.concat(request.indices(), indices, String.class));
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
*
* For example indices that don't exist.
*/
public GetSettingsRequestBuilder setIndicesOptions(IndicesOptions options) {
request.indicesOptions(options);
return this;
}
public GetSettingsRequestBuilder setNames(String... names) {
request.names(names);
return this;
}
@Override
protected void doExecute(ActionListener<GetSettingsResponse> listener) {
((IndicesAdminClient) client).getSettings(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_get_GetSettingsRequestBuilder.java
|
55 |
public class PaxosClusterMemberEvents implements ClusterMemberEvents, Lifecycle
{
private Cluster cluster;
private AtomicBroadcast atomicBroadcast;
private StringLogger logger;
protected AtomicBroadcastSerializer serializer;
protected Iterable<ClusterMemberListener> listeners = Listeners.newListeners();
private ClusterMembersSnapshot clusterMembersSnapshot;
private ClusterListener.Adapter clusterListener;
private Snapshot snapshot;
private AtomicBroadcastListener atomicBroadcastListener;
private ExecutorService executor;
private final Predicate<ClusterMembersSnapshot> snapshotValidator;
private final Heartbeat heartbeat;
private HeartbeatListenerImpl heartbeatListener;
private ObjectInputStreamFactory lenientObjectInputStream;
private ObjectOutputStreamFactory lenientObjectOutputStream;
public PaxosClusterMemberEvents( final Snapshot snapshot, Cluster cluster, Heartbeat heartbeat,
AtomicBroadcast atomicBroadcast, Logging logging,
Predicate<ClusterMembersSnapshot> validator,
Function2<Iterable<MemberIsAvailable>, MemberIsAvailable,
Iterable<MemberIsAvailable>> snapshotFilter,
ObjectInputStreamFactory lenientObjectInputStream,
ObjectOutputStreamFactory lenientObjectOutputStream)
{
this.snapshot = snapshot;
this.cluster = cluster;
this.heartbeat = heartbeat;
this.atomicBroadcast = atomicBroadcast;
this.lenientObjectInputStream = lenientObjectInputStream;
this.lenientObjectOutputStream = lenientObjectOutputStream;
this.logger = logging.getMessagesLog( getClass() );
clusterListener = new ClusterListenerImpl();
atomicBroadcastListener = new AtomicBroadcastListenerImpl();
this.snapshotValidator = validator;
clusterMembersSnapshot = new ClusterMembersSnapshot( snapshotFilter );
}
@Override
public void addClusterMemberListener( ClusterMemberListener listener )
{
listeners = Listeners.addListener( listener, listeners );
}
@Override
public void removeClusterMemberListener( ClusterMemberListener listener )
{
listeners = Listeners.removeListener( listener, listeners );
}
@Override
public void init()
throws Throwable
{
serializer = new AtomicBroadcastSerializer( lenientObjectInputStream, lenientObjectOutputStream );
cluster.addClusterListener( clusterListener );
atomicBroadcast.addAtomicBroadcastListener( atomicBroadcastListener );
snapshot.setSnapshotProvider( new HighAvailabilitySnapshotProvider() );
heartbeat.addHeartbeatListener( heartbeatListener = new HeartbeatListenerImpl() );
executor = Executors.newSingleThreadExecutor();
}
@Override
public void start()
throws Throwable
{
}
@Override
public void stop()
throws Throwable
{
}
@Override
public void shutdown()
throws Throwable
{
snapshot.setSnapshotProvider( null );
if ( executor != null )
{
executor.shutdown();
executor = null;
}
cluster.removeClusterListener( clusterListener );
atomicBroadcast.removeAtomicBroadcastListener( atomicBroadcastListener );
heartbeat.removeHeartbeatListener( heartbeatListener );
}
private class HighAvailabilitySnapshotProvider implements SnapshotProvider
{
@Override
public void getState( ObjectOutputStream output ) throws IOException
{
output.writeObject( clusterMembersSnapshot );
}
@Override
public void setState( ObjectInputStream input ) throws IOException, ClassNotFoundException
{
clusterMembersSnapshot = ClusterMembersSnapshot.class.cast(input.readObject());
if ( !snapshotValidator.accept( clusterMembersSnapshot ) )
{
executor.submit( new Runnable()
{
@Override
public void run()
{
cluster.leave();
}
} );
}
else
{
// Send current availability events to listeners
Listeners.notifyListeners( listeners, executor, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailableMembers() )
{
listener.memberIsAvailable( memberIsAvailable.getRole(),
memberIsAvailable.getInstanceId(), memberIsAvailable.getRoleUri() );
}
}
} );
}
}
}
public static class UniqueRoleFilter
implements Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>>
{
private final String role;
private final Set<String> roles = new HashSet<String>();
public UniqueRoleFilter( String role )
{
this.role = role;
}
@Override
public Iterable<MemberIsAvailable> apply( Iterable<MemberIsAvailable> previousSnapshot, final MemberIsAvailable newMessage )
{
return Iterables.append( newMessage, Iterables.filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return not( in( newMessage.getInstanceId() ) ).accept( item.getInstanceId() );
}
}, previousSnapshot));
}
}
private static class UniqueInstanceFilter implements Predicate<MemberIsAvailable>
{
private final Set<InstanceId> roles = new HashSet<InstanceId>();
@Override
public boolean accept( MemberIsAvailable item )
{
return roles.add( item.getInstanceId() );
}
}
public static class ClusterMembersSnapshot
implements Serializable
{
private final
Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>> nextSnapshotFunction;
private Iterable<MemberIsAvailable> availableMembers = new ArrayList<MemberIsAvailable>();
public ClusterMembersSnapshot( Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>> nextSnapshotFunction )
{
this.nextSnapshotFunction = nextSnapshotFunction;
}
public void availableMember( MemberIsAvailable memberIsAvailable )
{
availableMembers = toList( nextSnapshotFunction.apply( availableMembers, memberIsAvailable ) );
}
public void unavailableMember( final InstanceId member )
{
availableMembers = toList( filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return !item.getInstanceId().equals( member );
}
}, availableMembers ) );
}
public void unavailableMember( final URI member, final String role )
{
availableMembers = toList( filter(new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return !(item.getClusterUri().equals( member ) && item.getRole().equals( role ));
}
}, availableMembers));
}
public Iterable<MemberIsAvailable> getCurrentAvailableMembers()
{
return availableMembers;
}
public Iterable<MemberIsAvailable> getCurrentAvailable( final InstanceId memberId )
{
return toList( Iterables.filter( new Predicate<MemberIsAvailable>()
{
@Override
public boolean accept( MemberIsAvailable item )
{
return item.getInstanceId().equals( memberId );
}
}, availableMembers) );
}
}
private class ClusterListenerImpl extends ClusterListener.Adapter
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
// Catch up with elections
for ( Map.Entry<String, InstanceId> memberRoles : clusterConfiguration.getRoles().entrySet() )
{
elected( memberRoles.getKey(), memberRoles.getValue(),
clusterConfiguration.getUriForId( memberRoles.getValue() ) );
}
}
@Override
public void elected( String role, final InstanceId instanceId, final URI electedMember )
{
if ( role.equals( ClusterConfiguration.COORDINATOR ) )
{
// Use the cluster coordinator as master for HA
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.coordinatorIsElected( instanceId );
}
} );
}
}
@Override
public void leftCluster( final InstanceId member )
{
// Notify unavailability of members
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailable( member ) )
{
listener.memberIsUnavailable( memberIsAvailable.getRole(), member );
}
}
} );
clusterMembersSnapshot.unavailableMember( member );
}
}
private class AtomicBroadcastListenerImpl implements AtomicBroadcastListener
{
@Override
public void receive( Payload payload )
{
try
{
final Object value = serializer.receive( payload );
if ( value instanceof MemberIsAvailable )
{
final MemberIsAvailable memberIsAvailable = (MemberIsAvailable) value;
// Update snapshot
clusterMembersSnapshot.availableMember( memberIsAvailable );
logger.info("Snapshot:"+clusterMembersSnapshot.getCurrentAvailableMembers());
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsAvailable( memberIsAvailable.getRole(),
memberIsAvailable.getInstanceId(), memberIsAvailable.getRoleUri() );
}
} );
}
else if ( value instanceof MemberIsUnavailable )
{
final MemberIsUnavailable memberIsUnavailable = (MemberIsUnavailable) value;
// Update snapshot
clusterMembersSnapshot.unavailableMember( memberIsUnavailable.getClusterUri(),
memberIsUnavailable.getRole() );
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsUnavailable( memberIsUnavailable.getRole(),
memberIsUnavailable.getInstanceId() );
}
} );
}
}
catch ( Throwable t )
{
logger.error( "Could not handle cluster member available message", t );
}
}
}
private class HeartbeatListenerImpl implements HeartbeatListener
{
@Override
public void failed( final InstanceId server )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsFailed( server );
}
} );
}
@Override
public void alive( final InstanceId server )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.memberIsAlive( server );
}
} );
}
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_member_paxos_PaxosClusterMemberEvents.java
|
4,212 |
public class StoreModule extends AbstractModule {
private final Settings settings;
private final IndexStore indexStore;
private Class<? extends Distributor> distributor;
public StoreModule(Settings settings, IndexStore indexStore) {
this.indexStore = indexStore;
this.settings = settings;
}
public void setDistributor(Class<? extends Distributor> distributor) {
this.distributor = distributor;
}
@Override
protected void configure() {
bind(DirectoryService.class).to(indexStore.shardDirectory()).asEagerSingleton();
bind(Store.class).asEagerSingleton();
if (distributor == null) {
distributor = loadDistributor(settings);
}
bind(Distributor.class).to(distributor).asEagerSingleton();
}
private Class<? extends Distributor> loadDistributor(Settings settings) {
final Class<? extends Distributor> distributor;
final String type = settings.get("index.store.distributor");
if ("least_used".equals(type)) {
distributor = LeastUsedDistributor.class;
} else if ("random".equals(type)) {
distributor = RandomWeightedDistributor.class;
} else {
distributor = settings.getAsClass("index.store.distributor", LeastUsedDistributor.class,
"org.elasticsearch.index.store.distributor.", "Distributor");
}
return distributor;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_store_StoreModule.java
|
3,686 |
return new Comparator<Map.Entry>() {
public int compare(Map.Entry entry1, Map.Entry entry2) {
return SortingUtil.compare(comparator, iterationType, entry1, entry2);
}
};
| 1no label
|
hazelcast_src_main_java_com_hazelcast_util_SortingUtil.java
|
98 |
@SuppressWarnings("serial")
static final class SearchMappingsTask<K,V,U>
extends BulkTask<K,V,U> {
final BiFun<? super K, ? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
SearchMappingsTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
BiFun<? super K, ? super V, ? extends U> searchFunction,
AtomicReference<U> result) {
super(p, b, i, f, t);
this.searchFunction = searchFunction; this.result = result;
}
public final U getRawResult() { return result.get(); }
public final void compute() {
final BiFun<? super K, ? super V, ? extends U> searchFunction;
final AtomicReference<U> result;
if ((searchFunction = this.searchFunction) != null &&
(result = this.result) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
if (result.get() != null)
return;
addToPendingCount(1);
new SearchMappingsTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
searchFunction, result).fork();
}
while (result.get() == null) {
U u;
Node<K,V> p;
if ((p = advance()) == null) {
propagateCompletion();
break;
}
if ((u = searchFunction.apply(p.key, p.val)) != null) {
if (result.compareAndSet(null, u))
quietlyCompleteRoot();
break;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
959 |
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest, Response extends ActionResponse> extends TransportMasterNodeReadOperationAction<Request, Response> {
public TransportClusterInfoAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
}
@Override
protected String executor() {
// read operation, lightweight...
return ThreadPool.Names.SAME;
}
@Override
protected final void masterOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) throws ElasticsearchException {
String[] concreteIndices = state.metaData().concreteIndices(request.indices(), request.indicesOptions());
request.indices(concreteIndices);
doMasterOperation(request, state, listener);
}
protected abstract void doMasterOperation(Request request, ClusterState state, final ActionListener<Response> listener) throws ElasticsearchException;
}
| 1no label
|
src_main_java_org_elasticsearch_action_support_master_info_TransportClusterInfoAction.java
|
103 |
public static class Presentation {
public static class Tab {
public static class Name {
public static final String Rules = "PageImpl_Rules_Tab";
}
public static class Order {
public static final int Rules = 1000;
}
}
public static class Group {
public static class Name {
public static final String Basic = "PageImpl_Basic";
public static final String Page = "PageImpl_Page";
public static final String Rules = "PageImpl_Rules";
}
public static class Order {
public static final int Basic = 1000;
public static final int Page = 2000;
public static final int Rules = 1000;
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageImpl.java
|
221 |
public interface OOrientListener {
public void onStorageRegistered(final OStorage iStorage);
public void onStorageUnregistered(final OStorage iStorage);
public void onShutdown();
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_OOrientListener.java
|
4,510 |
public class TransportNodesListShardStoreMetaData extends TransportNodesOperationAction<TransportNodesListShardStoreMetaData.Request, TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData, TransportNodesListShardStoreMetaData.NodeRequest, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> {
private final IndicesService indicesService;
private final NodeEnvironment nodeEnv;
@Inject
public TransportNodesListShardStoreMetaData(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, NodeEnvironment nodeEnv) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.indicesService = indicesService;
this.nodeEnv = nodeEnv;
}
public ActionFuture<NodesStoreFilesMetaData> list(ShardId shardId, boolean onlyUnallocated, String[] nodesIds, @Nullable TimeValue timeout) {
return execute(new Request(shardId, onlyUnallocated, nodesIds).timeout(timeout));
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return "/cluster/nodes/indices/shard/store";
}
@Override
protected Request newRequest() {
return new Request();
}
@Override
protected NodeRequest newNodeRequest() {
return new NodeRequest();
}
@Override
protected NodeRequest newNodeRequest(String nodeId, Request request) {
return new NodeRequest(nodeId, request);
}
@Override
protected NodeStoreFilesMetaData newNodeResponse() {
return new NodeStoreFilesMetaData();
}
@Override
protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) {
final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList();
final List<FailedNodeException> failures = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeStoreFilesMetaData) { // will also filter out null response for unallocated ones
nodeStoreFilesMetaDatas.add((NodeStoreFilesMetaData) resp);
} else if (resp instanceof FailedNodeException) {
failures.add((FailedNodeException) resp);
}
}
return new NodesStoreFilesMetaData(clusterName, nodeStoreFilesMetaDatas.toArray(new NodeStoreFilesMetaData[nodeStoreFilesMetaDatas.size()]),
failures.toArray(new FailedNodeException[failures.size()]));
}
@Override
protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) throws ElasticsearchException {
if (request.unallocated) {
IndexService indexService = indicesService.indexService(request.shardId.index().name());
if (indexService == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
if (!indexService.hasShard(request.shardId.id())) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
}
IndexMetaData metaData = clusterService.state().metaData().index(request.shardId.index().name());
if (metaData == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
try {
return new NodeStoreFilesMetaData(clusterService.localNode(), listStoreMetaData(request.shardId));
} catch (IOException e) {
throw new ElasticsearchException("Failed to list store metadata for shard [" + request.shardId + "]", e);
}
}
private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) {
InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId.id());
if (indexShard != null) {
return new StoreFilesMetaData(true, shardId, indexShard.store().list());
}
}
// try and see if we an list unallocated
IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name());
if (metaData == null) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
String storeType = metaData.settings().get("index.store.type", "fs");
if (!storeType.contains("fs")) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
File[] shardLocations = nodeEnv.shardLocations(shardId);
File[] shardIndexLocations = new File[shardLocations.length];
for (int i = 0; i < shardLocations.length; i++) {
shardIndexLocations[i] = new File(shardLocations[i], "index");
}
boolean exists = false;
for (File shardIndexLocation : shardIndexLocations) {
if (shardIndexLocation.exists()) {
exists = true;
break;
}
}
if (!exists) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
Map<String, String> checksums = Store.readChecksums(shardIndexLocations);
if (checksums == null) {
checksums = ImmutableMap.of();
}
Map<String, StoreFileMetaData> files = Maps.newHashMap();
for (File shardIndexLocation : shardIndexLocations) {
File[] listedFiles = shardIndexLocation.listFiles();
if (listedFiles == null) {
continue;
}
for (File file : listedFiles) {
// BACKWARD CKS SUPPORT
if (file.getName().endsWith(".cks")) {
continue;
}
if (Store.isChecksum(file.getName())) {
continue;
}
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), checksums.get(file.getName())));
}
}
return new StoreFilesMetaData(false, shardId, files);
}
@Override
protected boolean accumulateExceptions() {
return true;
}
public static class StoreFilesMetaData implements Iterable<StoreFileMetaData>, Streamable {
private boolean allocated;
private ShardId shardId;
private Map<String, StoreFileMetaData> files;
StoreFilesMetaData() {
}
public StoreFilesMetaData(boolean allocated, ShardId shardId, Map<String, StoreFileMetaData> files) {
this.allocated = allocated;
this.shardId = shardId;
this.files = files;
}
public boolean allocated() {
return allocated;
}
public ShardId shardId() {
return this.shardId;
}
public long totalSizeInBytes() {
long totalSizeInBytes = 0;
for (StoreFileMetaData file : this) {
totalSizeInBytes += file.length();
}
return totalSizeInBytes;
}
@Override
public Iterator<StoreFileMetaData> iterator() {
return files.values().iterator();
}
public boolean fileExists(String name) {
return files.containsKey(name);
}
public StoreFileMetaData file(String name) {
return files.get(name);
}
public static StoreFilesMetaData readStoreFilesMetaData(StreamInput in) throws IOException {
StoreFilesMetaData md = new StoreFilesMetaData();
md.readFrom(in);
return md;
}
@Override
public void readFrom(StreamInput in) throws IOException {
allocated = in.readBoolean();
shardId = ShardId.readShardId(in);
int size = in.readVInt();
files = Maps.newHashMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
files.put(md.name(), md);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(allocated);
shardId.writeTo(out);
out.writeVInt(files.size());
for (StoreFileMetaData md : files.values()) {
md.writeTo(out);
}
}
}
static class Request extends NodesOperationRequest<Request> {
private ShardId shardId;
private boolean unallocated;
public Request() {
}
public Request(ShardId shardId, boolean unallocated, Set<String> nodesIds) {
super(nodesIds.toArray(new String[nodesIds.size()]));
this.shardId = shardId;
this.unallocated = unallocated;
}
public Request(ShardId shardId, boolean unallocated, String... nodesIds) {
super(nodesIds);
this.shardId = shardId;
this.unallocated = unallocated;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
unallocated = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeBoolean(unallocated);
}
}
public static class NodesStoreFilesMetaData extends NodesOperationResponse<NodeStoreFilesMetaData> {
private FailedNodeException[] failures;
NodesStoreFilesMetaData() {
}
public NodesStoreFilesMetaData(ClusterName clusterName, NodeStoreFilesMetaData[] nodes, FailedNodeException[] failures) {
super(clusterName, nodes);
this.failures = failures;
}
public FailedNodeException[] failures() {
return failures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeStoreFilesMetaData[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeStoreFilesMetaData.readListShardStoreNodeOperationResponse(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeStoreFilesMetaData response : nodes) {
response.writeTo(out);
}
}
}
static class NodeRequest extends NodeOperationRequest {
private ShardId shardId;
private boolean unallocated;
NodeRequest() {
}
NodeRequest(String nodeId, TransportNodesListShardStoreMetaData.Request request) {
super(request, nodeId);
this.shardId = request.shardId;
this.unallocated = request.unallocated;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
unallocated = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeBoolean(unallocated);
}
}
public static class NodeStoreFilesMetaData extends NodeOperationResponse {
private StoreFilesMetaData storeFilesMetaData;
NodeStoreFilesMetaData() {
}
public NodeStoreFilesMetaData(DiscoveryNode node, StoreFilesMetaData storeFilesMetaData) {
super(node);
this.storeFilesMetaData = storeFilesMetaData;
}
public StoreFilesMetaData storeFilesMetaData() {
return storeFilesMetaData;
}
public static NodeStoreFilesMetaData readListShardStoreNodeOperationResponse(StreamInput in) throws IOException {
NodeStoreFilesMetaData resp = new NodeStoreFilesMetaData();
resp.readFrom(in);
return resp;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.readBoolean()) {
storeFilesMetaData = StoreFilesMetaData.readStoreFilesMetaData(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (storeFilesMetaData == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
storeFilesMetaData.writeTo(out);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_store_TransportNodesListShardStoreMetaData.java
|
5,960 |
public static abstract class SuggestionBuilder<T> implements ToXContent {
private String name;
private String suggester;
private String text;
private String field;
private String analyzer;
private Integer size;
private Integer shardSize;
public SuggestionBuilder(String name, String suggester) {
this.name = name;
this.suggester = suggester;
}
/**
* Same as in {@link SuggestBuilder#setText(String)}, but in the suggestion scope.
*/
@SuppressWarnings("unchecked")
public T text(String text) {
this.text = text;
return (T) this;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
if (text != null) {
builder.field("text", text);
}
builder.startObject(suggester);
if (analyzer != null) {
builder.field("analyzer", analyzer);
}
if (field != null) {
builder.field("field", field);
}
if (size != null) {
builder.field("size", size);
}
if (shardSize != null) {
builder.field("shard_size", shardSize);
}
builder = innerToXContent(builder, params);
builder.endObject();
builder.endObject();
return builder;
}
protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException;
/**
* Sets from what field to fetch the candidate suggestions from. This is an
* required option and needs to be set via this setter or
* {@link org.elasticsearch.search.suggest.SuggestBuilder.TermSuggestionBuilder#setField(String)}
* method
*/
@SuppressWarnings("unchecked")
public T field(String field) {
this.field = field;
return (T)this;
}
/**
* Sets the analyzer to analyse to suggest text with. Defaults to the search
* analyzer of the suggest field.
*/
@SuppressWarnings("unchecked")
public T analyzer(String analyzer) {
this.analyzer = analyzer;
return (T)this;
}
/**
* Sets the maximum suggestions to be returned per suggest text term.
*/
@SuppressWarnings("unchecked")
public T size(int size) {
if (size <= 0) {
throw new ElasticsearchIllegalArgumentException("Size must be positive");
}
this.size = size;
return (T)this;
}
/**
* Sets the maximum number of suggested term to be retrieved from each
* individual shard. During the reduce phase the only the top N suggestions
* are returned based on the <code>size</code> option. Defaults to the
* <code>size</code> option.
* <p/>
* Setting this to a value higher than the `size` can be useful in order to
* get a more accurate document frequency for suggested terms. Due to the
* fact that terms are partitioned amongst shards, the shard level document
* frequencies of suggestions may not be precise. Increasing this will make
* these document frequencies more precise.
*/
@SuppressWarnings("unchecked")
public T shardSize(Integer shardSize) {
this.shardSize = shardSize;
return (T)this;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_suggest_SuggestBuilder.java
|
557 |
public class TransportGetFieldMappingsAction extends TransportClusterInfoAction<GetFieldMappingsRequest, GetFieldMappingsResponse> {
private final IndicesService indicesService;
@Inject
public TransportGetFieldMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
this.indicesService = indicesService;
}
@Override
protected String transportAction() {
return GetFieldMappingsAction.NAME;
}
@Override
protected GetFieldMappingsRequest newRequest() {
return new GetFieldMappingsRequest();
}
@Override
protected GetFieldMappingsResponse newResponse() {
return new GetFieldMappingsResponse();
}
@Override
protected void doMasterOperation(final GetFieldMappingsRequest request, final ClusterState state, final ActionListener<GetFieldMappingsResponse> listener) throws ElasticsearchException {
listener.onResponse(new GetFieldMappingsResponse(findMappings(request.indices(), request.types(), request.fields(), request.includeDefaults())));
}
private ImmutableMap<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> findMappings(String[] concreteIndices,
final String[] types,
final String[] fields,
boolean includeDefaults) {
assert types != null;
assert concreteIndices != null;
if (concreteIndices.length == 0) {
return ImmutableMap.of();
}
boolean isProbablySingleFieldRequest = concreteIndices.length == 1 && types.length == 1 && fields.length == 1;
ImmutableMap.Builder<String, ImmutableMap<String, ImmutableMap<String, FieldMappingMetaData>>> indexMapBuilder = ImmutableMap.builder();
Sets.SetView<String> intersection = Sets.intersection(Sets.newHashSet(concreteIndices), indicesService.indices());
for (String index : intersection) {
IndexService indexService = indicesService.indexService(index);
Collection<String> typeIntersection;
if (types.length == 0) {
typeIntersection = indexService.mapperService().types();
} else {
typeIntersection = Collections2.filter(indexService.mapperService().types(), new Predicate<String>() {
@Override
public boolean apply(String type) {
return Regex.simpleMatch(types, type);
}
});
}
MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>> typeMappings = new MapBuilder<String, ImmutableMap<String, FieldMappingMetaData>>();
for (String type : typeIntersection) {
DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
ImmutableMap<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(documentMapper, fields, includeDefaults, isProbablySingleFieldRequest);
if (!fieldMapping.isEmpty()) {
typeMappings.put(type, fieldMapping);
}
}
if (!typeMappings.isEmpty()) {
indexMapBuilder.put(index, typeMappings.immutableMap());
}
}
return indexMapBuilder.build();
}
private static final ToXContent.Params includeDefaultsParams = new ToXContent.Params() {
final static String INCLUDE_DEFAULTS = "include_defaults";
@Override
public String param(String key) {
if (INCLUDE_DEFAULTS.equals(key)) {
return "true";
}
return null;
}
@Override
public String param(String key, String defaultValue) {
if (INCLUDE_DEFAULTS.equals(key)) {
return "true";
}
return defaultValue;
}
@Override
public boolean paramAsBoolean(String key, boolean defaultValue) {
if (INCLUDE_DEFAULTS.equals(key)) {
return true;
}
return defaultValue;
}
public Boolean paramAsBoolean(String key, Boolean defaultValue) {
if (INCLUDE_DEFAULTS.equals(key)) {
return true;
}
return defaultValue;
}
@Override @Deprecated
public Boolean paramAsBooleanOptional(String key, Boolean defaultValue) {
return paramAsBoolean(key, defaultValue);
}
};
private ImmutableMap<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, String[] fields,
boolean includeDefaults, boolean isProbablySingleFieldRequest) throws ElasticsearchException {
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<String, FieldMappingMetaData>();
ImmutableList<FieldMapper> allFieldMappers = documentMapper.mappers().mappers();
for (String field : fields) {
if (Regex.isMatchAllPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
addFieldMapper(fieldMapper.names().fullName(), fieldMapper, fieldMappings, includeDefaults);
}
} else if (Regex.isSimpleMatchPattern(field)) {
// go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name.
// also make sure we only store each mapper once.
boolean[] resolved = new boolean[allFieldMappers.size()];
for (int i = 0; i < allFieldMappers.size(); i++) {
FieldMapper fieldMapper = allFieldMappers.get(i);
if (Regex.simpleMatch(field, fieldMapper.names().fullName())) {
addFieldMapper(fieldMapper.names().fullName(), fieldMapper, fieldMappings, includeDefaults);
resolved[i] = true;
}
}
for (int i = 0; i < allFieldMappers.size(); i++) {
if (resolved[i]) {
continue;
}
FieldMapper fieldMapper = allFieldMappers.get(i);
if (Regex.simpleMatch(field, fieldMapper.names().indexName())) {
addFieldMapper(fieldMapper.names().indexName(), fieldMapper, fieldMappings, includeDefaults);
resolved[i] = true;
}
}
for (int i = 0; i < allFieldMappers.size(); i++) {
if (resolved[i]) {
continue;
}
FieldMapper fieldMapper = allFieldMappers.get(i);
if (Regex.simpleMatch(field, fieldMapper.names().name())) {
addFieldMapper(fieldMapper.names().name(), fieldMapper, fieldMappings, includeDefaults);
resolved[i] = true;
}
}
} else {
// not a pattern
FieldMapper fieldMapper = documentMapper.mappers().smartNameFieldMapper(field);
if (fieldMapper != null) {
addFieldMapper(field, fieldMapper, fieldMappings, includeDefaults);
} else if (isProbablySingleFieldRequest) {
fieldMappings.put(field, FieldMappingMetaData.NULL);
}
}
}
return fieldMappings.immutableMap();
}
private void addFieldMapper(String field, FieldMapper fieldMapper, MapBuilder<String, FieldMappingMetaData> fieldMappings, boolean includeDefaults) {
if (fieldMappings.containsKey(field)) {
return;
}
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.startObject();
fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
builder.endObject();
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.names().fullName(), builder.bytes()));
} catch (IOException e) {
throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_mapping_get_TransportGetFieldMappingsAction.java
|
93 |
@SuppressWarnings("serial")
static final class ReduceKeysTask<K,V>
extends BulkTask<K,V,K> {
final BiFun<? super K, ? super K, ? extends K> reducer;
K result;
ReduceKeysTask<K,V> rights, nextRight;
ReduceKeysTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceKeysTask<K,V> nextRight,
BiFun<? super K, ? super K, ? extends K> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final K getRawResult() { return result; }
public final void compute() {
final BiFun<? super K, ? super K, ? extends K> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceKeysTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
K r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
K u = p.key;
r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceKeysTask<K,V>
t = (ReduceKeysTask<K,V>)c,
s = t.rights;
while (s != null) {
K tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
79 |
EQUAL {
@Override
public boolean isValidValueType(Class<?> clazz) {
return true;
}
@Override
public boolean isValidCondition(Object condition) {
return true;
}
@Override
public boolean evaluate(Object value, Object condition) {
if (condition==null) {
return value==null;
} else {
return condition.equals(value);
}
}
@Override
public String toString() {
return "=";
}
@Override
public TitanPredicate negate() {
return NOT_EQUAL;
}
},
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Cmp.java
|
0 |
public final class StandaloneRandomizedContext {
private StandaloneRandomizedContext() {
}
/**
* Creates a new {@link RandomizedContext} associated to the current thread
*/
public static void createRandomizedContext(Class<?> testClass, Randomness runnerRandomness) {
//the randomized runner is passed in as null, which is fine as long as we don't try to access it afterwards
RandomizedContext randomizedContext = RandomizedContext.create(Thread.currentThread().getThreadGroup(), testClass, null);
randomizedContext.push(runnerRandomness.clone(Thread.currentThread()));
}
/**
* Destroys the {@link RandomizedContext} associated to the current thread
*/
public static void disposeRandomizedContext() {
RandomizedContext.current().dispose();
}
public static void pushRandomness(Randomness randomness) {
RandomizedContext.current().push(randomness);
}
public static void popAndDestroy() {
RandomizedContext.current().popAndDestroy();
}
/**
* Returns the string formatted seed associated to the current thread's randomized context
*/
public static String getSeedAsString() {
return SeedUtils.formatSeed(RandomizedContext.current().getRandomness().getSeed());
}
/**
* Util method to extract the seed out of a {@link Randomness} instance
*/
public static long getSeed(Randomness randomness) {
return randomness.getSeed();
}
}
| 0true
|
src_test_java_com_carrotsearch_randomizedtesting_StandaloneRandomizedContext.java
|
41 |
public class Stats {
public int waiting_requests;
public int threads;
public int uptime; //seconds
public long cmd_get;
public long cmd_set;
public long cmd_touch;
public long get_hits;
public long get_misses;
public long delete_hits;
public long delete_misses;
public long incr_hits;
public long incr_misses;
public long decr_hits;
public long decr_misses;
public long bytes;
public int curr_connections;
public int total_connections;
// public Stats(int uptime, int threads, long get_misses, long get_hits, long cmd_set, long cmd_get, long bytes) {
// this.uptime = uptime;
// this.threads = threads;
// this.get_misses = get_misses;
// this.get_hits = get_hits;
// this.cmd_set = cmd_set;
// this.cmd_get = cmd_get;
// this.bytes = bytes;
// }
public Stats() {
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_memcache_Stats.java
|
2,453 |
public class MultiMapService implements ManagedService, RemoteService,
MigrationAwareService, EventPublishingService<MultiMapEvent, EventListener>, TransactionalService {
public static final String SERVICE_NAME = "hz:impl:multiMapService";
private static final int STATS_MAP_INITIAL_CAPACITY = 1000;
private static final int REPLICA_ADDRESS_TRY_COUNT = 3;
private static final int REPLICA_ADDRESS_SLEEP_WAIT_MILLIS = 1000;
private final NodeEngine nodeEngine;
private final MultiMapPartitionContainer[] partitionContainers;
private final ConcurrentMap<String, LocalMultiMapStatsImpl> statsMap
= new ConcurrentHashMap<String, LocalMultiMapStatsImpl>(STATS_MAP_INITIAL_CAPACITY);
private final ConstructorFunction<String, LocalMultiMapStatsImpl> localMultiMapStatsConstructorFunction
= new ConstructorFunction<String, LocalMultiMapStatsImpl>() {
public LocalMultiMapStatsImpl createNew(String key) {
return new LocalMultiMapStatsImpl();
}
};
public MultiMapService(NodeEngine nodeEngine) {
this.nodeEngine = nodeEngine;
int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
partitionContainers = new MultiMapPartitionContainer[partitionCount];
}
public void init(final NodeEngine nodeEngine, Properties properties) {
int partitionCount = nodeEngine.getPartitionService().getPartitionCount();
for (int partition = 0; partition < partitionCount; partition++) {
partitionContainers[partition] = new MultiMapPartitionContainer(this, partition);
}
final LockService lockService = nodeEngine.getSharedService(LockService.SERVICE_NAME);
if (lockService != null) {
lockService.registerLockStoreConstructor(SERVICE_NAME, new ConstructorFunction<ObjectNamespace, LockStoreInfo>() {
public LockStoreInfo createNew(final ObjectNamespace key) {
String name = key.getObjectName();
final MultiMapConfig multiMapConfig = nodeEngine.getConfig().findMultiMapConfig(name);
return new LockStoreInfo() {
public int getBackupCount() {
return multiMapConfig.getSyncBackupCount();
}
public int getAsyncBackupCount() {
return multiMapConfig.getAsyncBackupCount();
}
};
}
});
}
}
public void reset() {
for (MultiMapPartitionContainer container : partitionContainers) {
if (container != null) {
container.destroy();
}
}
}
public void shutdown(boolean terminate) {
reset();
for (int i = 0; i < partitionContainers.length; i++) {
partitionContainers[i] = null;
}
}
public MultiMapContainer getOrCreateCollectionContainer(int partitionId, String name) {
return partitionContainers[partitionId].getOrCreateMultiMapContainer(name);
}
public MultiMapPartitionContainer getPartitionContainer(int partitionId) {
return partitionContainers[partitionId];
}
public DistributedObject createDistributedObject(String name) {
return new ObjectMultiMapProxy(this, nodeEngine, name);
}
public void destroyDistributedObject(String name) {
for (MultiMapPartitionContainer container : partitionContainers) {
if (container != null) {
container.destroyCollection(name);
}
}
nodeEngine.getEventService().deregisterAllListeners(SERVICE_NAME, name);
}
public Set<Data> localKeySet(String name) {
Set<Data> keySet = new HashSet<Data>();
ClusterServiceImpl clusterService = (ClusterServiceImpl) nodeEngine.getClusterService();
Address thisAddress = clusterService.getThisAddress();
for (int i = 0; i < nodeEngine.getPartitionService().getPartitionCount(); i++) {
InternalPartition partition = nodeEngine.getPartitionService().getPartition(i);
MultiMapPartitionContainer partitionContainer = getPartitionContainer(i);
MultiMapContainer multiMapContainer = partitionContainer.getCollectionContainer(name);
if (multiMapContainer == null) {
continue;
}
if (thisAddress.equals(partition.getOwnerOrNull())) {
keySet.addAll(multiMapContainer.keySet());
}
}
getLocalMultiMapStatsImpl(name).incrementOtherOperations();
return keySet;
}
public SerializationService getSerializationService() {
return nodeEngine.getSerializationService();
}
public NodeEngine getNodeEngine() {
return nodeEngine;
}
public String addListener(String name, EventListener listener, Data key, boolean includeValue, boolean local) {
EventService eventService = nodeEngine.getEventService();
EventRegistration registration;
final MultiMapEventFilter filter = new MultiMapEventFilter(includeValue, key);
if (local) {
registration = eventService.registerLocalListener(SERVICE_NAME, name, filter, listener);
} else {
registration = eventService.registerListener(SERVICE_NAME, name, filter, listener);
}
return registration.getId();
}
public boolean removeListener(String name, String registrationId) {
EventService eventService = nodeEngine.getEventService();
return eventService.deregisterListener(SERVICE_NAME, name, registrationId);
}
public void dispatchEvent(MultiMapEvent event, EventListener listener) {
EntryListener entryListener = (EntryListener) listener;
EntryEvent entryEvent = new EntryEvent(event.getName(), nodeEngine.getClusterService().getMember(event.getCaller()),
event.getEventType().getType(), nodeEngine.toObject(event.getKey()), nodeEngine.toObject(event.getValue()));
if (event.getEventType().equals(EntryEventType.ADDED)) {
entryListener.entryAdded(entryEvent);
} else if (event.getEventType().equals(EntryEventType.REMOVED)) {
entryListener.entryRemoved(entryEvent);
}
getLocalMultiMapStatsImpl(event.getName()).incrementReceivedEvents();
}
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
int replicaIndex = event.getReplicaIndex();
final MultiMapPartitionContainer partitionContainer = partitionContainers[event.getPartitionId()];
if (partitionContainer == null) {
return null;
}
Map<String, Map> map = new HashMap<String, Map>(partitionContainer.containerMap.size());
for (Map.Entry<String, MultiMapContainer> entry : partitionContainer.containerMap.entrySet()) {
String name = entry.getKey();
MultiMapContainer container = entry.getValue();
if (container.config.getTotalBackupCount() < replicaIndex) {
continue;
}
map.put(name, container.multiMapWrappers);
}
if (map.isEmpty()) {
return null;
}
return new MultiMapMigrationOperation(map);
}
public void insertMigratedData(int partitionId, Map<String, Map> map) {
for (Map.Entry<String, Map> entry : map.entrySet()) {
String name = entry.getKey();
MultiMapContainer container = getOrCreateCollectionContainer(partitionId, name);
Map<Data, MultiMapWrapper> collections = entry.getValue();
container.multiMapWrappers.putAll(collections);
}
}
private void clearMigrationData(int partitionId) {
final MultiMapPartitionContainer partitionContainer = partitionContainers[partitionId];
if (partitionContainer != null) {
partitionContainer.containerMap.clear();
}
}
public void commitMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
clearMigrationData(event.getPartitionId());
}
}
public void rollbackMigration(PartitionMigrationEvent event) {
clearMigrationData(event.getPartitionId());
}
public void clearPartitionReplica(int partitionId) {
clearMigrationData(partitionId);
}
public LocalMapStats createStats(String name) {
LocalMultiMapStatsImpl stats = getLocalMultiMapStatsImpl(name);
long ownedEntryCount = 0;
long backupEntryCount = 0;
long hits = 0;
long lockedEntryCount = 0;
ClusterServiceImpl clusterService = (ClusterServiceImpl) nodeEngine.getClusterService();
Address thisAddress = clusterService.getThisAddress();
for (int i = 0; i < nodeEngine.getPartitionService().getPartitionCount(); i++) {
InternalPartition partition = nodeEngine.getPartitionService().getPartition(i);
MultiMapPartitionContainer partitionContainer = getPartitionContainer(i);
MultiMapContainer multiMapContainer = partitionContainer.getCollectionContainer(name);
if (multiMapContainer == null) {
continue;
}
Address owner = partition.getOwnerOrNull();
if (owner != null) {
if (owner.equals(thisAddress)) {
lockedEntryCount += multiMapContainer.getLockedCount();
for (MultiMapWrapper wrapper : multiMapContainer.multiMapWrappers.values()) {
hits += wrapper.getHits();
ownedEntryCount += wrapper.getCollection(false).size();
}
} else {
int backupCount = multiMapContainer.config.getTotalBackupCount();
for (int j = 1; j <= backupCount; j++) {
Address replicaAddress = partition.getReplicaAddress(j);
int memberSize = nodeEngine.getClusterService().getMembers().size();
int tryCount = REPLICA_ADDRESS_TRY_COUNT;
// wait if the partition table is not updated yet
while (memberSize > backupCount && replicaAddress == null && tryCount-- > 0) {
try {
Thread.sleep(REPLICA_ADDRESS_SLEEP_WAIT_MILLIS);
} catch (InterruptedException e) {
throw ExceptionUtil.rethrow(e);
}
replicaAddress = partition.getReplicaAddress(j);
}
if (replicaAddress != null && replicaAddress.equals(thisAddress)) {
for (MultiMapWrapper wrapper : multiMapContainer.multiMapWrappers.values()) {
backupEntryCount += wrapper.getCollection(false).size();
}
}
}
}
}
}
stats.setOwnedEntryCount(ownedEntryCount);
stats.setBackupEntryCount(backupEntryCount);
stats.setHits(hits);
stats.setLockedEntryCount(lockedEntryCount);
return stats;
}
public LocalMultiMapStatsImpl getLocalMultiMapStatsImpl(String name) {
return ConcurrencyUtil.getOrPutIfAbsent(statsMap, name, localMultiMapStatsConstructorFunction);
}
public <T extends TransactionalObject> T createTransactionalObject(String name, TransactionSupport transaction) {
return (T) new TransactionalMultiMapProxy(nodeEngine, this, name, transaction);
}
public void rollbackTransaction(String transactionId) {
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_multimap_MultiMapService.java
|
210 |
@Deprecated
public class HydratedCacheElement extends Hashtable<String, Object> {
private static final long serialVersionUID = 1L;
public Object getCacheElementItem(String elementItemName, Serializable parentKey) {
return get(elementItemName + "_" + parentKey);
}
public Object putCacheElementItem(String elementItemName, Serializable parentKey, Object value) {
return put(elementItemName +"_"+parentKey, value);
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_cache_engine_HydratedCacheElement.java
|
147 |
public class AtomicDoubleArray implements java.io.Serializable {
private static final long serialVersionUID = -2308431214976778248L;
private final transient long[] array;
private long checkedByteOffset(int i) {
if (i < 0 || i >= array.length)
throw new IndexOutOfBoundsException("index " + i);
return byteOffset(i);
}
private static long byteOffset(int i) {
return ((long) i << shift) + base;
}
/**
* Creates a new {@code AtomicDoubleArray} of the given length,
* with all elements initially zero.
*
* @param length the length of the array
*/
public AtomicDoubleArray(int length) {
array = new long[length];
}
/**
* Creates a new {@code AtomicDoubleArray} with the same length
* as, and all elements copied from, the given array.
*
* @param array the array to copy elements from
* @throws NullPointerException if array is null
*/
public AtomicDoubleArray(double[] array) {
// Visibility guaranteed by final field guarantees
final int len = array.length;
final long[] a = new long[len];
for (int i = 0; i < len; i++)
a[i] = doubleToRawLongBits(array[i]);
this.array = a;
}
/**
* Returns the length of the array.
*
* @return the length of the array
*/
public final int length() {
return array.length;
}
/**
* Gets the current value at position {@code i}.
*
* @param i the index
* @return the current value
*/
public final double get(int i) {
return longBitsToDouble(getRaw(checkedByteOffset(i)));
}
private long getRaw(long offset) {
return unsafe.getLongVolatile(array, offset);
}
/**
* Sets the element at position {@code i} to the given value.
*
* @param i the index
* @param newValue the new value
*/
public final void set(int i, double newValue) {
long next = doubleToRawLongBits(newValue);
unsafe.putLongVolatile(array, checkedByteOffset(i), next);
}
/**
* Eventually sets the element at position {@code i} to the given value.
*
* @param i the index
* @param newValue the new value
*/
public final void lazySet(int i, double newValue) {
long next = doubleToRawLongBits(newValue);
unsafe.putOrderedLong(array, checkedByteOffset(i), next);
}
/**
* Atomically sets the element at position {@code i} to the given value
* and returns the old value.
*
* @param i the index
* @param newValue the new value
* @return the previous value
*/
public final double getAndSet(int i, double newValue) {
long next = doubleToRawLongBits(newValue);
long offset = checkedByteOffset(i);
while (true) {
long current = getRaw(offset);
if (compareAndSetRaw(offset, current, next))
return longBitsToDouble(current);
}
}
/**
* Atomically sets the element at position {@code i} to the given
* updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* @param i the index
* @param expect the expected value
* @param update the new value
* @return true if successful. False return indicates that
* the actual value was not equal to the expected value.
*/
public final boolean compareAndSet(int i, double expect, double update) {
return compareAndSetRaw(checkedByteOffset(i),
doubleToRawLongBits(expect),
doubleToRawLongBits(update));
}
private boolean compareAndSetRaw(long offset, long expect, long update) {
return unsafe.compareAndSwapLong(array, offset, expect, update);
}
/**
* Atomically sets the element at position {@code i} to the given
* updated value
* if the current value is <a href="#bitEquals">bitwise equal</a>
* to the expected value.
*
* <p><a
* href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
* May fail spuriously and does not provide ordering guarantees</a>,
* so is only rarely an appropriate alternative to {@code compareAndSet}.
*
* @param i the index
* @param expect the expected value
* @param update the new value
* @return true if successful
*/
public final boolean weakCompareAndSet(int i, double expect, double update) {
return compareAndSet(i, expect, update);
}
/**
* Atomically adds the given value to the element at index {@code i}.
*
* @param i the index
* @param delta the value to add
* @return the previous value
*/
public final double getAndAdd(int i, double delta) {
long offset = checkedByteOffset(i);
while (true) {
long current = getRaw(offset);
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (compareAndSetRaw(offset, current, next))
return currentVal;
}
}
/**
* Atomically adds the given value to the element at index {@code i}.
*
* @param i the index
* @param delta the value to add
* @return the updated value
*/
public double addAndGet(int i, double delta) {
long offset = checkedByteOffset(i);
while (true) {
long current = getRaw(offset);
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (compareAndSetRaw(offset, current, next))
return nextVal;
}
}
/**
* Returns the String representation of the current values of array.
* @return the String representation of the current values of array
*/
public String toString() {
int iMax = array.length - 1;
if (iMax == -1)
return "[]";
// Double.toString(Math.PI).length() == 17
StringBuilder b = new StringBuilder((17 + 2) * (iMax + 1));
b.append('[');
for (int i = 0;; i++) {
b.append(longBitsToDouble(getRaw(byteOffset(i))));
if (i == iMax)
return b.append(']').toString();
b.append(',').append(' ');
}
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @param s the stream
* @throws java.io.IOException if an I/O error occurs
* @serialData The length of the array is emitted (int), followed by all
* of its elements (each a {@code double}) in the proper order.
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
// Write out array length
int length = length();
s.writeInt(length);
// Write out all elements in the proper order.
for (int i = 0; i < length; i++)
s.writeDouble(get(i));
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
* @throws ClassNotFoundException if the class of a serialized object
* could not be found
* @throws java.io.IOException if an I/O error occurs
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
// Read in array length and allocate array
int length = s.readInt();
unsafe.putObjectVolatile(this, arrayOffset, new long[length]);
// Read in all elements in the proper order.
for (int i = 0; i < length; i++)
set(i, s.readDouble());
}
// Unsafe mechanics
private static final sun.misc.Unsafe unsafe = getUnsafe();
private static final long arrayOffset;
private static final int base = unsafe.arrayBaseOffset(long[].class);
private static final int shift;
static {
try {
Class<?> k = AtomicDoubleArray.class;
arrayOffset = unsafe.objectFieldOffset
(k.getDeclaredField("array"));
int scale = unsafe.arrayIndexScale(long[].class);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
shift = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
private static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
}
| 0true
|
src_main_java_jsr166e_extra_AtomicDoubleArray.java
|
629 |
public class GatewaySnapshotStatus {
public static enum Stage {
NONE((byte) 0),
INDEX((byte) 1),
TRANSLOG((byte) 2),
FINALIZE((byte) 3),
DONE((byte) 4),
FAILURE((byte) 5);
private final byte value;
Stage(byte value) {
this.value = value;
}
public byte value() {
return this.value;
}
public static Stage fromValue(byte value) {
if (value == 0) {
return Stage.NONE;
} else if (value == 1) {
return Stage.INDEX;
} else if (value == 2) {
return Stage.TRANSLOG;
} else if (value == 3) {
return Stage.FINALIZE;
} else if (value == 4) {
return Stage.DONE;
} else if (value == 5) {
return Stage.FAILURE;
}
throw new ElasticsearchIllegalArgumentException("No stage found for [" + value + "]");
}
}
final Stage stage;
final long startTime;
final long time;
final long indexSize;
final int expectedNumberOfOperations;
public GatewaySnapshotStatus(Stage stage, long startTime, long time, long indexSize, int expectedNumberOfOperations) {
this.stage = stage;
this.startTime = startTime;
this.time = time;
this.indexSize = indexSize;
this.expectedNumberOfOperations = expectedNumberOfOperations;
}
public Stage getStage() {
return this.stage;
}
public long getStartTime() {
return this.startTime;
}
public TimeValue getTime() {
return TimeValue.timeValueMillis(time);
}
public ByteSizeValue getIndexSize() {
return new ByteSizeValue(indexSize);
}
public int getExpectedNumberOfOperations() {
return expectedNumberOfOperations;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_GatewaySnapshotStatus.java
|
5 |
public class NetworkReceiver
implements MessageSource, Lifecycle
{
public interface Configuration
{
HostnamePort clusterServer();
int defaultPort();
String name(); // Name of this cluster instance. Null in most cases, but tools may use e.g. "Backup"
}
public interface NetworkChannelsListener
{
void listeningAt( URI me );
void channelOpened( URI to );
void channelClosed( URI to );
}
public static final String CLUSTER_SCHEME = "cluster";
public static final String INADDR_ANY = "0.0.0.0";
private ChannelGroup channels;
// Receiving
private NioServerSocketChannelFactory nioChannelFactory;
private ServerBootstrap serverBootstrap;
private Iterable<MessageProcessor> processors = Listeners.newListeners();
private Configuration config;
private StringLogger msgLog;
private Map<URI, Channel> connections = new ConcurrentHashMap<URI, Channel>();
private Iterable<NetworkChannelsListener> listeners = Listeners.newListeners();
volatile boolean bindingDetected = false;
public NetworkReceiver( Configuration config, Logging logging )
{
this.config = config;
this.msgLog = logging.getMessagesLog( getClass() );
}
@Override
public void init()
throws Throwable
{
ThreadRenamingRunnable.setThreadNameDeterminer( ThreadNameDeterminer.CURRENT );
}
@Override
public void start()
throws Throwable
{
channels = new DefaultChannelGroup();
// Listen for incoming connections
nioChannelFactory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool( new NamedThreadFactory( "Cluster boss" ) ),
Executors.newFixedThreadPool( 2, new NamedThreadFactory( "Cluster worker" ) ), 2 );
serverBootstrap = new ServerBootstrap( nioChannelFactory );
serverBootstrap.setOption("child.tcpNoDelay", true);
serverBootstrap.setPipelineFactory( new NetworkNodePipelineFactory() );
int[] ports = config.clusterServer().getPorts();
int minPort = ports[0];
int maxPort = ports.length == 2 ? ports[1] : minPort;
// Try all ports in the given range
listen( minPort, maxPort );
}
@Override
public void stop()
throws Throwable
{
msgLog.debug( "Shutting down NetworkReceiver" );
channels.close().awaitUninterruptibly();
serverBootstrap.releaseExternalResources();
msgLog.debug( "Shutting down NetworkReceiver complete" );
}
@Override
public void shutdown()
throws Throwable
{
}
private void listen( int minPort, int maxPort )
throws URISyntaxException, ChannelException, UnknownHostException
{
ChannelException ex = null;
for ( int checkPort = minPort; checkPort <= maxPort; checkPort++ )
{
try
{
InetAddress host;
String address = config.clusterServer().getHost();
InetSocketAddress localAddress;
if ( address == null || address.equals( INADDR_ANY ))
{
localAddress = new InetSocketAddress( checkPort );
}
else
{
host = InetAddress.getByName( address );
localAddress = new InetSocketAddress( host, checkPort );
}
Channel listenChannel = serverBootstrap.bind( localAddress );
listeningAt( getURI( localAddress ) );
channels.add( listenChannel );
return;
}
catch ( ChannelException e )
{
ex = e;
}
}
nioChannelFactory.releaseExternalResources();
throw ex;
}
// MessageSource implementation
public void addMessageProcessor( MessageProcessor processor )
{
processors = Listeners.addListener( processor, processors );
}
public void receive( Message message )
{
for ( MessageProcessor processor : processors )
{
try
{
if ( !processor.process( message ) )
{
break;
}
}
catch ( Exception e )
{
// Ignore
}
}
}
private URI getURI( InetSocketAddress address ) throws URISyntaxException
{
String uri;
if (address.getAddress().getHostAddress().startsWith( "0" ))
uri = CLUSTER_SCHEME + "://0.0.0.0:"+address.getPort(); // Socket.toString() already prepends a /
else
uri = CLUSTER_SCHEME + "://" + address.getAddress().getHostAddress()+":"+address.getPort(); // Socket.toString() already prepends a /
// Add name if given
if (config.name() != null)
uri += "/?name="+config.name();
return URI.create( uri );
}
public void listeningAt( final URI me )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<NetworkChannelsListener>()
{
@Override
public void notify( NetworkChannelsListener listener )
{
listener.listeningAt( me );
}
} );
}
protected void openedChannel( final URI uri, Channel ctxChannel )
{
connections.put( uri, ctxChannel );
Listeners.notifyListeners( listeners, new Listeners.Notification<NetworkChannelsListener>()
{
@Override
public void notify( NetworkChannelsListener listener )
{
listener.channelOpened( uri );
}
} );
}
protected void closedChannel( final URI uri )
{
Channel channel = connections.remove( uri );
if ( channel != null )
{
channel.close();
}
Listeners.notifyListeners( listeners, new Listeners.Notification<NetworkChannelsListener>()
{
@Override
public void notify( NetworkChannelsListener listener )
{
listener.channelClosed( uri );
}
} );
}
public void addNetworkChannelsListener( NetworkChannelsListener listener )
{
listeners = Listeners.addListener( listener, listeners );
}
private class NetworkNodePipelineFactory
implements ChannelPipelineFactory
{
@Override
public ChannelPipeline getPipeline() throws Exception
{
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast( "frameDecoder",new ObjectDecoder( 1024 * 1000, NetworkNodePipelineFactory.this.getClass().getClassLoader() ) );
pipeline.addLast( "serverHandler", new MessageReceiver() );
return pipeline;
}
}
private class MessageReceiver
extends SimpleChannelHandler
{
@Override
public void channelOpen( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
Channel ctxChannel = ctx.getChannel();
openedChannel( getURI( (InetSocketAddress) ctxChannel.getRemoteAddress() ), ctxChannel );
channels.add( ctxChannel );
}
@Override
public void messageReceived( ChannelHandlerContext ctx, MessageEvent event ) throws Exception
{
if (!bindingDetected)
{
InetSocketAddress local = ((InetSocketAddress)event.getChannel().getLocalAddress());
bindingDetected = true;
listeningAt( getURI( local ) );
}
final Message message = (Message) event.getMessage();
// Fix FROM header since sender cannot know it's correct IP/hostname
InetSocketAddress remote = (InetSocketAddress) ctx.getChannel().getRemoteAddress();
String remoteAddress = remote.getAddress().getHostAddress();
URI fromHeader = URI.create( message.getHeader( Message.FROM ) );
fromHeader = URI.create(fromHeader.getScheme()+"://"+remoteAddress + ":" + fromHeader.getPort());
message.setHeader( Message.FROM, fromHeader.toASCIIString() );
msgLog.debug( "Received:" + message );
receive( message );
}
@Override
public void channelDisconnected( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
closedChannel( getURI( (InetSocketAddress) ctx.getChannel().getRemoteAddress() ) );
}
@Override
public void channelClosed( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
closedChannel( getURI( (InetSocketAddress) ctx.getChannel().getRemoteAddress() ) );
channels.remove( ctx.getChannel() );
}
@Override
public void exceptionCaught( ChannelHandlerContext ctx, ExceptionEvent e ) throws Exception
{
if ( !(e.getCause() instanceof ConnectException) )
{
msgLog.error( "Receive exception:", e.getCause() );
}
}
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_com_NetworkReceiver.java
|
304 |
public class MergeEhCacheManagerFactoryBean extends EhCacheManagerFactoryBean implements ApplicationContextAware {
private ApplicationContext applicationContext;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@javax.annotation.Resource(name="blMergedCacheConfigLocations")
protected Set<String> mergedCacheConfigLocations;
protected List<Resource> configLocations;
@Override
public void destroy() {
super.destroy();
try {
CacheManager cacheManager = getObject();
Field cacheManagerTimer = CacheManager.class.getDeclaredField("cacheManagerTimer");
cacheManagerTimer.setAccessible(true);
Object failSafeTimer = cacheManagerTimer.get(cacheManager);
Field timer = failSafeTimer.getClass().getDeclaredField("timer");
timer.setAccessible(true);
Object time = timer.get(failSafeTimer);
Field thread = time.getClass().getDeclaredField("thread");
thread.setAccessible(true);
Thread item = (Thread) thread.get(time);
item.setContextClassLoader(Thread.currentThread().getContextClassLoader().getParent());
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@PostConstruct
public void configureMergedItems() {
List<Resource> temp = new ArrayList<Resource>();
if (mergedCacheConfigLocations != null && !mergedCacheConfigLocations.isEmpty()) {
for (String location : mergedCacheConfigLocations) {
temp.add(applicationContext.getResource(location));
}
}
if (configLocations != null && !configLocations.isEmpty()) {
for (Resource resource : configLocations) {
temp.add(resource);
}
}
try {
MergeXmlConfigResource merge = new MergeXmlConfigResource();
ResourceInputStream[] sources = new ResourceInputStream[temp.size()];
int j=0;
for (Resource resource : temp) {
sources[j] = new ResourceInputStream(resource.getInputStream(), resource.getURL().toString());
j++;
}
setConfigLocation(merge.getMergedConfigResource(sources));
} catch (Exception e) {
throw new FatalBeanException("Unable to merge cache locations", e);
}
}
public void setConfigLocations(List<Resource> configLocations) throws BeansException {
this.configLocations = configLocations;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_cache_ehcache_MergeEhCacheManagerFactoryBean.java
|
268 |
public class OCommandManager {
private Map<String, Class<? extends OCommandRequest>> commandRequesters = new HashMap<String, Class<? extends OCommandRequest>>();
private Map<Class<? extends OCommandRequest>, Class<? extends OCommandExecutor>> commandReqExecMap = new HashMap<Class<? extends OCommandRequest>, Class<? extends OCommandExecutor>>();
private static OCommandManager instance = new OCommandManager();
protected OCommandManager() {
registerRequester("sql", OCommandSQL.class);
registerRequester("script", OCommandScript.class);
registerExecutor(OSQLAsynchQuery.class, OCommandExecutorSQLDelegate.class);
registerExecutor(OSQLSynchQuery.class, OCommandExecutorSQLDelegate.class);
registerExecutor(OCommandSQL.class, OCommandExecutorSQLDelegate.class);
registerExecutor(OCommandSQLResultset.class, OCommandExecutorSQLResultsetDelegate.class);
}
public OCommandManager registerRequester(final String iType, final Class<? extends OCommandRequest> iRequest) {
commandRequesters.put(iType, iRequest);
return this;
}
public boolean existsRequester(final String iType) {
return commandRequesters.containsKey(iType);
}
public OCommandRequest getRequester(final String iType) {
final Class<? extends OCommandRequest> reqClass = commandRequesters.get(iType);
if (reqClass == null)
throw new IllegalArgumentException("Cannot find a command requester for type: " + iType);
try {
return reqClass.newInstance();
} catch (Exception e) {
throw new IllegalArgumentException("Cannot create the command requester of class " + reqClass + " for type: " + iType, e);
}
}
public OCommandManager registerExecutor(final Class<? extends OCommandRequest> iRequest,
final Class<? extends OCommandExecutor> iExecutor) {
commandReqExecMap.put(iRequest, iExecutor);
return this;
}
public OCommandManager unregisterExecutor(final Class<? extends OCommandRequest> iRequest) {
commandReqExecMap.remove(iRequest);
return this;
}
public OCommandExecutor getExecutor(OCommandRequestInternal iCommand) {
final Class<? extends OCommandExecutor> executorClass = commandReqExecMap.get(iCommand.getClass());
if (executorClass == null)
throw new OCommandExecutorNotFoundException("Cannot find a command executor for the command request: " + iCommand);
try {
return executorClass.newInstance();
} catch (Exception e) {
throw new OCommandExecutionException("Cannot create the command executor of class " + executorClass
+ " for the command request: " + iCommand, e);
}
}
public static OCommandManager instance() {
return instance;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandManager.java
|
3,248 |
public class AtomicLongPermission extends InstancePermission {
private static final int READ = 0x4;
private static final int MODIFY = 0x8;
private static final int ALL = READ | MODIFY | CREATE | DESTROY;
public AtomicLongPermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
} else if (ActionConstants.ACTION_MODIFY.equals(action)) {
mask |= MODIFY;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
}
}
return mask;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_security_permission_AtomicLongPermission.java
|
2,854 |
final class PromoteFromBackupOperation extends AbstractOperation
implements PartitionAwareOperation, MigrationCycleOperation {
@Override
public void run() throws Exception {
logPromotingPartition();
PartitionMigrationEvent event = createPartitionMigrationEvent();
sendToAllMigrationAwareServices(event);
}
private void sendToAllMigrationAwareServices(PartitionMigrationEvent event) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
for (MigrationAwareService service : nodeEngine.getServices(MigrationAwareService.class)) {
try {
service.beforeMigration(event);
service.commitMigration(event);
} catch (Throwable e) {
logMigrationError(e);
}
}
}
private PartitionMigrationEvent createPartitionMigrationEvent() {
int partitionId = getPartitionId();
return new PartitionMigrationEvent(MigrationEndpoint.DESTINATION, partitionId);
}
private void logMigrationError(Throwable e) {
ILogger logger = getLogger();
logger.warning("While promoting partition " + getPartitionId(), e);
}
private void logPromotingPartition() {
ILogger logger = getLogger();
if (logger.isFinestEnabled()) {
logger.finest("Promoting partition " + getPartitionId());
}
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
public boolean validatesTarget() {
return false;
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
throw new UnsupportedOperationException();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
throw new UnsupportedOperationException();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_PromoteFromBackupOperation.java
|
87 |
public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
149 |
public interface StructuredContentType extends Serializable {
/**
* Gets the primary key.
*
* @return the primary key
*/
@Nullable
public Long getId();
/**
* Sets the primary key.
*
* @param id the new primary key
*/
public void setId(@Nullable Long id);
/**
* Gets the name.
*
* @return the name
*/
@Nonnull
String getName();
/**
* Sets the name.
*/
void setName(@Nonnull String name);
/**
* Gets the description.
* @return
*/
@Nullable
String getDescription();
/**
* Sets the description.
*/
void setDescription(@Nullable String description);
/**
* Returns the template associated with this content type.
* @return
*/
@Nonnull
StructuredContentFieldTemplate getStructuredContentFieldTemplate();
/**
* Sets the template associated with this content type.
* @param scft
*/
void setStructuredContentFieldTemplate(@Nonnull StructuredContentFieldTemplate scft);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentType.java
|
15 |
exe.submit(new Runnable() {
private final int number = atomicInt.incrementAndGet();
@Override
public void run() {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println(number);
}
});
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestBed.java
|
160 |
private class DescendingItr extends AbstractItr {
Node<E> startNode() { return last(); }
Node<E> nextNode(Node<E> p) { return pred(p); }
}
| 0true
|
src_main_java_jsr166y_ConcurrentLinkedDeque.java
|
51 |
@Controller("blAdminAssetController")
@RequestMapping("/" + AdminAssetController.SECTION_KEY)
public class AdminAssetController extends AdminBasicEntityController {
protected static final String SECTION_KEY = "assets";
@Resource(name = "blAssetFormBuilderService")
protected AssetFormBuilderService formService;
@Resource(name = "blStaticAssetService")
protected StaticAssetService staticAssetService;
@Override
protected String getSectionKey(Map<String, String> pathVars) {
//allow external links to work for ToOne items
if (super.getSectionKey(pathVars) != null) {
return super.getSectionKey(pathVars);
}
return SECTION_KEY;
}
@Override
@SuppressWarnings("unchecked")
@RequestMapping(value = "", method = RequestMethod.GET)
public String viewEntityList(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@RequestParam MultiValueMap<String, String> requestParams) throws Exception {
String returnPath = super.viewEntityList(request, response, model, pathVars, requestParams);
// Remove the default add button and replace it with an upload asset button
List<EntityFormAction> mainActions = (List<EntityFormAction>) model.asMap().get("mainActions");
Iterator<EntityFormAction> actions = mainActions.iterator();
while (actions.hasNext()) {
EntityFormAction action = actions.next();
if (EntityFormAction.ADD.equals(action.getId())) {
actions.remove();
break;
}
}
mainActions.add(0, new EntityFormAction("UPLOAD_ASSET")
.withButtonClass("upload-asset")
.withIconClass("icon-camera")
.withDisplayText("Upload_Asset"));
// Change the listGrid view to one that has a hidden form for uploading the image.
model.addAttribute("viewType", "entityListWithUploadForm");
ListGrid listGrid = (ListGrid) model.asMap().get("listGrid");
formService.addImageThumbnailField(listGrid, "fullUrl");
return returnPath;
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
public String viewEntityForm(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id) throws Exception {
model.addAttribute("cmsUrlPrefix", staticAssetService.getStaticAssetUrlPrefix());
return super.viewEntityForm(request, response, model, pathVars, id);
}
@Override
@RequestMapping(value = "/{id}", method = RequestMethod.POST)
public String saveEntity(HttpServletRequest request, HttpServletResponse response, Model model,
@PathVariable Map<String, String> pathVars,
@PathVariable(value="id") String id,
@ModelAttribute(value="entityForm") EntityForm entityForm, BindingResult result,
RedirectAttributes ra) throws Exception {
String templatePath = super.saveEntity(request, response, model, pathVars, id, entityForm, result, ra);
if (result.hasErrors()) {
model.addAttribute("cmsUrlPrefix", staticAssetService.getStaticAssetUrlPrefix());
}
return templatePath;
}
@Override
protected String getDefaultEntityType() {
return StaticAssetImpl.class.getName();
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_web_controller_AdminAssetController.java
|
442 |
static final class Fields {
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString VM_NAME = new XContentBuilderString("vm_name");
static final XContentBuilderString VM_VERSION = new XContentBuilderString("vm_version");
static final XContentBuilderString VM_VENDOR = new XContentBuilderString("vm_vendor");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
static final XContentBuilderString THREADS = new XContentBuilderString("threads");
static final XContentBuilderString MAX_UPTIME = new XContentBuilderString("max_uptime");
static final XContentBuilderString MAX_UPTIME_IN_MILLIS = new XContentBuilderString("max_uptime_in_millis");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString HEAP_USED = new XContentBuilderString("heap_used");
static final XContentBuilderString HEAP_USED_IN_BYTES = new XContentBuilderString("heap_used_in_bytes");
static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java
|
1,111 |
public class OSQLFunctionDistance extends OSQLFunctionAbstract {
public static final String NAME = "distance";
private final static double EARTH_RADIUS = 6371;
public OSQLFunctionDistance() {
super(NAME, 4, 5);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
try {
double distance;
final double[] values = new double[4];
for (int i = 0; i < iParameters.length; ++i) {
if (iParameters[i] == null)
return null;
values[i] = ((Double) OType.convert(iParameters[i], Double.class)).doubleValue();
}
final double deltaLat = Math.toRadians(values[2] - values[0]);
final double deltaLon = Math.toRadians(values[3] - values[1]);
final double a = Math.pow(Math.sin(deltaLat / 2), 2) + Math.cos(Math.toRadians(values[0]))
* Math.cos(Math.toRadians(values[2])) * Math.pow(Math.sin(deltaLon / 2), 2);
distance = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a)) * EARTH_RADIUS;
return distance;
} catch (Exception e) {
return null;
}
}
public String getSyntax() {
return "Syntax error: distance(<field-x>,<field-y>,<x-value>,<y-value>[,<unit>])";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_geo_OSQLFunctionDistance.java
|
2,550 |
private static final class ConstructorCache {
private final ConcurrentMap<ClassLoader, ConcurrentMap<String, WeakReference<Constructor>>> cache;
private ConstructorCache() {
// Guess 16 classloaders to not waste to much memory (16 is default concurrency level)
cache = new ConcurrentReferenceHashMap<ClassLoader, ConcurrentMap<String, WeakReference<Constructor>>>(16);
}
private <T> Constructor put(ClassLoader classLoader, String className, Constructor<T> constructor) {
ClassLoader cl = classLoader == null ? ClassLoaderUtil.class.getClassLoader() : classLoader;
ConcurrentMap<String, WeakReference<Constructor>> innerCache = cache.get(cl);
if (innerCache == null) {
// Let's guess a start of 100 classes per classloader
innerCache = new ConcurrentHashMap<String, WeakReference<Constructor>>(100);
ConcurrentMap<String, WeakReference<Constructor>> old = cache.putIfAbsent(cl, innerCache);
if (old != null) {
innerCache = old;
}
}
innerCache.put(className, new WeakReference<Constructor>(constructor));
return constructor;
}
public <T> Constructor<T> get(ClassLoader classLoader, String className) {
ConcurrentMap<String, WeakReference<Constructor>> innerCache = cache.get(classLoader);
if (innerCache == null) {
return null;
}
WeakReference<Constructor> reference = innerCache.get(className);
Constructor constructor = reference == null ? null : reference.get();
if (reference != null && constructor == null) {
innerCache.remove(className);
}
return (Constructor<T>) constructor;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_ClassLoaderUtil.java
|
626 |
public class DocsStatus {
long numDocs = 0;
long maxDoc = 0;
long deletedDocs = 0;
/**
* The number of docs.
*/
public long getNumDocs() {
return numDocs;
}
/**
* The max doc.
*/
public long getMaxDoc() {
return maxDoc;
}
/**
* The number of deleted docs in the index.
*/
public long getDeletedDocs() {
return deletedDocs;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_DocsStatus.java
|
3,859 |
public class HasChildQueryParser implements QueryParser {
public static final String NAME = "has_child";
@Inject
public HasChildQueryParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query innerQuery = null;
boolean queryFound = false;
float boost = 1.0f;
String childType = null;
ScoreType scoreType = null;
int shortCircuitParentDocSet = 8192;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
// TODO we need to set the type, but, `query` can come before `type`... (see HasChildFilterParser)
// since we switch types, make sure we change the context
String[] origTypes = QueryParseContext.setTypesWithPrevious(childType == null ? null : new String[]{childType});
try {
innerQuery = parseContext.parseInnerQuery();
queryFound = true;
} finally {
QueryParseContext.setTypes(origTypes);
}
} else {
throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) {
childType = parser.text();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [has_child] query has been removed, use a filter as a facet_filter in the relevant global facet");
} else if ("score_type".equals(currentFieldName) || "scoreType".equals(currentFieldName)) {
String scoreTypeValue = parser.text();
if (!"none".equals(scoreTypeValue)) {
scoreType = ScoreType.fromString(scoreTypeValue);
}
} else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) {
String scoreModeValue = parser.text();
if (!"none".equals(scoreModeValue)) {
scoreType = ScoreType.fromString(scoreModeValue);
}
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("short_circuit_cutoff".equals(currentFieldName)) {
shortCircuitParentDocSet = parser.intValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound) {
throw new QueryParsingException(parseContext.index(), "[has_child] requires 'query' field");
}
if (innerQuery == null) {
return null;
}
if (childType == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] requires 'type' field");
}
innerQuery.setBoost(boost);
DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
if (childDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] No mapping for for type [" + childType + "]");
}
if (!childDocMapper.parentFieldMapper().active()) {
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] does not have parent mapping");
}
String parentType = childDocMapper.parentFieldMapper().type();
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
if (parentDocMapper == null) {
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]");
}
Filter nonNestedDocsFilter = null;
if (parentDocMapper.hasNestedObjects()) {
nonNestedDocsFilter = parseContext.cacheFilter(NonNestedDocsFilter.INSTANCE, null);
}
// wrap the query with type query
innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
Query query;
Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
if (!deleteByQuery && scoreType != null) {
query = new ChildrenQuery(parentType, childType, parentFilter, innerQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
} else {
query = new ChildrenConstantScoreQuery(innerQuery, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
if (deleteByQuery) {
query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
}
}
if (queryName != null) {
parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
}
query.setBoost(boost);
return query;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_HasChildQueryParser.java
|
32 |
static final class ParameterContextInformation
implements IContextInformation {
private final Declaration declaration;
private final ProducedReference producedReference;
private final ParameterList parameterList;
private final int argumentListOffset;
private final Unit unit;
private final boolean includeDefaulted;
// private final boolean inLinkedMode;
private final boolean namedInvocation;
private ParameterContextInformation(Declaration declaration,
ProducedReference producedReference, Unit unit,
ParameterList parameterList, int argumentListOffset,
boolean includeDefaulted, boolean namedInvocation) {
// boolean inLinkedMode
this.declaration = declaration;
this.producedReference = producedReference;
this.unit = unit;
this.parameterList = parameterList;
this.argumentListOffset = argumentListOffset;
this.includeDefaulted = includeDefaulted;
// this.inLinkedMode = inLinkedMode;
this.namedInvocation = namedInvocation;
}
@Override
public String getContextDisplayString() {
return "Parameters of '" + declaration.getName() + "'";
}
@Override
public Image getImage() {
return getImageForDeclaration(declaration);
}
@Override
public String getInformationDisplayString() {
List<Parameter> ps = getParameters(parameterList,
includeDefaulted, namedInvocation);
if (ps.isEmpty()) {
return "no parameters";
}
StringBuilder result = new StringBuilder();
for (Parameter p: ps) {
boolean isListedValues = namedInvocation &&
p==ps.get(ps.size()-1) &&
p.getModel() instanceof Value &&
p.getType()!=null &&
unit.isIterableParameterType(p.getType());
if (includeDefaulted || !p.isDefaulted() ||
isListedValues) {
if (producedReference==null) {
result.append(p.getName());
}
else {
ProducedTypedReference pr =
producedReference.getTypedParameter(p);
appendParameterContextInfo(result, pr, p, unit,
namedInvocation, isListedValues);
}
if (!isListedValues) {
result.append(namedInvocation ? "; " : ", ");
}
}
}
if (!namedInvocation && result.length()>0) {
result.setLength(result.length()-2);
}
return result.toString();
}
@Override
public boolean equals(Object that) {
if (that instanceof ParameterContextInformation) {
return ((ParameterContextInformation) that).declaration
.equals(declaration);
}
else {
return false;
}
}
int getArgumentListOffset() {
return argumentListOffset;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java
|
154 |
public class JMSArchivedStructuredContentPublisher implements ArchivedStructuredContentPublisher {
private JmsTemplate archiveStructuredContentTemplate;
private Destination archiveStructuredContentDestination;
@Override
public void processStructuredContentArchive(final StructuredContent sc, final String baseNameKey, final String baseTypeKey) {
archiveStructuredContentTemplate.send(archiveStructuredContentDestination, new MessageCreator() {
public Message createMessage(Session session) throws JMSException {
HashMap<String, String> objectMap = new HashMap<String,String>(2);
objectMap.put("nameKey", baseNameKey);
objectMap.put("typeKey", baseTypeKey);
return session.createObjectMessage(objectMap);
}
});
}
public JmsTemplate getArchiveStructuredContentTemplate() {
return archiveStructuredContentTemplate;
}
public void setArchiveStructuredContentTemplate(JmsTemplate archiveStructuredContentTemplate) {
this.archiveStructuredContentTemplate = archiveStructuredContentTemplate;
}
public Destination getArchiveStructuredContentDestination() {
return archiveStructuredContentDestination;
}
public void setArchiveStructuredContentDestination(Destination archiveStructuredContentDestination) {
this.archiveStructuredContentDestination = archiveStructuredContentDestination;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_message_jms_JMSArchivedStructuredContentPublisher.java
|
319 |
new Thread() {
public void run() {
map.lock(key);
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
93 |
private static class ResourceElement
{
private Xid xid = null;
private XAResource resource = null;
private int status;
ResourceElement( Xid xid, XAResource resource )
{
this.xid = xid;
this.resource = resource;
status = RS_ENLISTED;
}
Xid getXid()
{
return xid;
}
XAResource getResource()
{
return resource;
}
int getStatus()
{
return status;
}
void setStatus( int status )
{
this.status = status;
}
@Override
public String toString()
{
String statusString;
switch ( status )
{
case RS_ENLISTED:
statusString = "ENLISTED";
break;
case RS_DELISTED:
statusString = "DELISTED";
break;
case RS_SUSPENDED:
statusString = "SUSPENDED";
break;
case RS_READONLY:
statusString = "READONLY";
break;
default:
statusString = "UNKNOWN";
}
return "Xid[" + xid + "] XAResource[" + resource + "] Status["
+ statusString + "]";
}
}
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_ReadOnlyTransactionImpl.java
|
39 |
@SuppressWarnings("unchecked")
public class OMVRBTreeSet<E> extends AbstractSet<E> implements ONavigableSet<E>, Cloneable, java.io.Serializable {
/**
* The backing map.
*/
private transient ONavigableMap<E, Object> m;
// Dummy value to associate with an Object in the backing Map
private static final Object PRESENT = new Object();
/**
* Constructs a set backed by the specified navigable map.
*/
OMVRBTreeSet(ONavigableMap<E, Object> m) {
this.m = m;
}
/**
* Constructs a new, empty tree set, sorted according to the natural ordering of its elements. All elements inserted into the set
* must implement the {@link Comparable} interface. Furthermore, all such elements must be <i>mutually comparable</i>:
* {@code e1.compareTo(e2)} must not throw a {@code ClassCastException} for any elements {@code e1} and {@code e2} in the set. If
* the user attempts to add an element to the set that violates this constraint (for example, the user attempts to add a string
* element to a set whose elements are integers), the {@code add} call will throw a {@code ClassCastException}.
*/
public OMVRBTreeSet() {
this(new OMVRBTreeMemory<E, Object>());
}
/**
* Constructs a new, empty tree set, sorted according to the specified comparator. All elements inserted into the set must be
* <i>mutually comparable</i> by the specified comparator: {@code comparator.compare(e1, e2)} must not throw a
* {@code ClassCastException} for any elements {@code e1} and {@code e2} in the set. If the user attempts to add an element to the
* set that violates this constraint, the {@code add} call will throw a {@code ClassCastException}.
*
* @param comparator
* the comparator that will be used to order this set. If {@code null}, the {@linkplain Comparable natural ordering} of
* the elements will be used.
*/
public OMVRBTreeSet(Comparator<? super E> comparator) {
this(new OMVRBTreeMemory<E, Object>(comparator));
}
/**
* Constructs a new tree set containing the elements in the specified collection, sorted according to the <i>natural ordering</i>
* of its elements. All elements inserted into the set must implement the {@link Comparable} interface. Furthermore, all such
* elements must be <i>mutually comparable</i>: {@code e1.compareTo(e2)} must not throw a {@code ClassCastException} for any
* elements {@code e1} and {@code e2} in the set.
*
* @param c
* collection whose elements will comprise the new set
* @throws ClassCastException
* if the elements in {@code c} are not {@link Comparable}, or are not mutually comparable
* @throws NullPointerException
* if the specified collection is null
*/
public OMVRBTreeSet(Collection<? extends E> c) {
this();
addAll(c);
}
/**
* Constructs a new tree set containing the same elements and using the same ordering as the specified sorted set.
*
* @param s
* sorted set whose elements will comprise the new set
* @throws NullPointerException
* if the specified sorted set is null
*/
public OMVRBTreeSet(SortedSet<E> s) {
this(s.comparator());
addAll(s);
}
/**
* Returns an iterator over the elements in this set in ascending order.
*
* @return an iterator over the elements in this set in ascending order
*/
@Override
public OLazyIterator<E> iterator() {
return m.navigableKeySet().iterator();
}
/**
* Returns an iterator over the elements in this set in descending order.
*
* @return an iterator over the elements in this set in descending order
* @since 1.6
*/
public OLazyIterator<E> descendingIterator() {
return m.descendingKeySet().iterator();
}
/**
* @since 1.6
*/
public ONavigableSet<E> descendingSet() {
return new OMVRBTreeSet<E>(m.descendingMap());
}
/**
* Returns the number of elements in this set (its cardinality).
*
* @return the number of elements in this set (its cardinality)
*/
@Override
public int size() {
return m.size();
}
/**
* Returns {@code true} if this set contains no elements.
*
* @return {@code true} if this set contains no elements
*/
@Override
public boolean isEmpty() {
return m.isEmpty();
}
/**
* Returns {@code true} if this set contains the specified element. More formally, returns {@code true} if and only if this set
* contains an element {@code e} such that <tt>(o==null ? e==null : o.equals(e))</tt>.
*
* @param o
* object to be checked for containment in this set
* @return {@code true} if this set contains the specified element
* @throws ClassCastException
* if the specified object cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
*/
@Override
public boolean contains(Object o) {
return m.containsKey(o);
}
/**
* Adds the specified element to this set if it is not already present. More formally, adds the specified element {@code e} to
* this set if the set contains no element {@code e2} such that <tt>(e==null ? e2==null : e.equals(e2))</tt>.
* If this set already contains the element, the call leaves the set unchanged and returns {@code false}.
*
* @param e
* element to be added to this set
* @return {@code true} if this set did not already contain the specified element
* @throws ClassCastException
* if the specified object cannot be compared with the elements currently in this set
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
*/
@Override
public boolean add(E e) {
return m.put(e, PRESENT) == null;
}
/**
* Removes the specified element from this set if it is present. More formally, removes an element {@code e} such that
* <tt>(o==null ? e==null : o.equals(e))</tt>, if this set contains such an element. Returns {@code true} if
* this set contained the element (or equivalently, if this set changed as a result of the call). (This set will not contain the
* element once the call returns.)
*
* @param o
* object to be removed from this set, if present
* @return {@code true} if this set contained the specified element
* @throws ClassCastException
* if the specified object cannot be compared with the elements currently in this set
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
*/
@Override
public boolean remove(Object o) {
return m.remove(o) == PRESENT;
}
/**
* Removes all of the elements from this set. The set will be empty after this call returns.
*/
@Override
public void clear() {
m.clear();
}
/**
* Adds all of the elements in the specified collection to this set.
*
* @param c
* collection containing elements to be added to this set
* @return {@code true} if this set changed as a result of the call
* @throws ClassCastException
* if the elements provided cannot be compared with the elements currently in the set
* @throws NullPointerException
* if the specified collection is null or if any element is null and this set uses natural ordering, or its comparator
* does not permit null elements
*/
@Override
public boolean addAll(Collection<? extends E> c) {
// Use linear-time version if applicable
if (m.size() == 0 && c.size() > 0 && c instanceof SortedSet && m instanceof OMVRBTree) {
SortedSet<? extends E> set = (SortedSet<? extends E>) c;
OMVRBTree<E, Object> map = (OMVRBTree<E, Object>) m;
Comparator<? super E> cc = (Comparator<? super E>) set.comparator();
Comparator<? super E> mc = map.comparator();
if (cc == mc || (cc != null && cc.equals(mc))) {
map.addAllForOTreeSet(set, PRESENT);
return true;
}
}
return super.addAll(c);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} or {@code toElement} is null and this set uses natural ordering, or its comparator does not
* permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
* @since 1.6
*/
public ONavigableSet<E> subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive) {
return new OMVRBTreeSet<E>(m.subMap(fromElement, fromInclusive, toElement, toInclusive));
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code toElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
* @since 1.6
*/
public ONavigableSet<E> headSet(E toElement, boolean inclusive) {
return new OMVRBTreeSet<E>(m.headMap(toElement, inclusive));
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
* @since 1.6
*/
public ONavigableSet<E> tailSet(E fromElement, boolean inclusive) {
return new OMVRBTreeSet<E>(m.tailMap(fromElement, inclusive));
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} or {@code toElement} is null and this set uses natural ordering, or its comparator does not
* permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
*/
public SortedSet<E> subSet(E fromElement, E toElement) {
return subSet(fromElement, true, toElement, false);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code toElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
*/
public SortedSet<E> headSet(E toElement) {
return headSet(toElement, false);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if {@code fromElement} is null and this set uses natural ordering, or its comparator does not permit null elements
* @throws IllegalArgumentException
* {@inheritDoc}
*/
public SortedSet<E> tailSet(E fromElement) {
return tailSet(fromElement, true);
}
public Comparator<? super E> comparator() {
return m.comparator();
}
/**
* @throws NoSuchElementException
* {@inheritDoc}
*/
public E first() {
return m.firstKey();
}
/**
* @throws NoSuchElementException
* {@inheritDoc}
*/
public E last() {
return m.lastKey();
}
// ONavigableSet API methods
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E lower(E e) {
return m.lowerKey(e);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E floor(E e) {
return m.floorKey(e);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E ceiling(E e) {
return m.ceilingKey(e);
}
/**
* @throws ClassCastException
* {@inheritDoc}
* @throws NullPointerException
* if the specified element is null and this set uses natural ordering, or its comparator does not permit null elements
* @since 1.6
*/
public E higher(E e) {
return m.higherKey(e);
}
/**
* @since 1.6
*/
public E pollFirst() {
Map.Entry<E, ?> e = m.pollFirstEntry();
return (e == null) ? null : e.getKey();
}
/**
* @since 1.6
*/
public E pollLast() {
Map.Entry<E, ?> e = m.pollLastEntry();
return (e == null) ? null : e.getKey();
}
/**
* Returns a shallow copy of this {@code OTreeSet} instance. (The elements themselves are not cloned.)
*
* @return a shallow copy of this set
*/
@Override
public Object clone() {
OMVRBTreeSet<E> clone = null;
try {
clone = (OMVRBTreeSet<E>) super.clone();
} catch (CloneNotSupportedException e) {
throw new InternalError();
}
clone.m = new OMVRBTreeMemory<E, Object>(m);
return clone;
}
/**
* Save the state of the {@code OTreeSet} instance to a stream (that is, serialize it).
*
* @serialData Emits the comparator used to order this set, or {@code null} if it obeys its elements' natural ordering (Object),
* followed by the size of the set (the number of elements it contains) (int), followed by all of its elements (each
* an Object) in order (as determined by the set's Comparator, or by the elements' natural ordering if the set has no
* Comparator).
*/
private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException {
// Write out any hidden stuff
s.defaultWriteObject();
// Write out Comparator
s.writeObject(m.comparator());
// Write out size
s.writeInt(m.size());
// Write out all elements in the proper order.
for (Iterator<E> i = m.keySet().iterator(); i.hasNext();)
s.writeObject(i.next());
}
/**
* Reconstitute the {@code OTreeSet} instance from a stream (that is, deserialize it).
*/
private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException {
// Read in any hidden stuff
s.defaultReadObject();
// Read in Comparator
Comparator<? super E> c = (Comparator<? super E>) s.readObject();
// Create backing OMVRBTree
OMVRBTree<E, Object> tm;
if (c == null)
tm = new OMVRBTreeMemory<E, Object>();
else
tm = new OMVRBTreeMemory<E, Object>(c);
m = tm;
// Read in size
int size = s.readInt();
tm.readOTreeSet(size, s, PRESENT);
}
private static final long serialVersionUID = -2479143000061671589L;
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTreeSet.java
|
256 |
public interface OCollateFactory {
/**
* @return Set of supported collate names of this factory
*/
Set<String> getNames();
/**
* Returns the requested collate
*
* @param name
*/
OCollate getCollate(String name);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_collate_OCollateFactory.java
|
921 |
public final class LockServiceImpl implements LockService, ManagedService, RemoteService, MembershipAwareService,
MigrationAwareService, ClientAwareService {
private final NodeEngine nodeEngine;
private final LockStoreContainer[] containers;
private final ConcurrentMap<ObjectNamespace, EntryTaskScheduler> evictionProcessors
= new ConcurrentHashMap<ObjectNamespace, EntryTaskScheduler>();
private final ConcurrentMap<String, ConstructorFunction<ObjectNamespace, LockStoreInfo>> constructors
= new ConcurrentHashMap<String, ConstructorFunction<ObjectNamespace, LockStoreInfo>>();
private final ConstructorFunction<ObjectNamespace, EntryTaskScheduler> schedulerConstructor =
new ConstructorFunction<ObjectNamespace, EntryTaskScheduler>() {
@Override
public EntryTaskScheduler createNew(ObjectNamespace namespace) {
LockEvictionProcessor entryProcessor = new LockEvictionProcessor(nodeEngine, namespace);
ScheduledExecutorService scheduledExecutor =
nodeEngine.getExecutionService().getDefaultScheduledExecutor();
return EntryTaskSchedulerFactory
.newScheduler(scheduledExecutor, entryProcessor, ScheduleType.POSTPONE);
}
};
public LockServiceImpl(NodeEngine nodeEngine) {
this.nodeEngine = nodeEngine;
this.containers = new LockStoreContainer[nodeEngine.getPartitionService().getPartitionCount()];
for (int i = 0; i < containers.length; i++) {
containers[i] = new LockStoreContainer(this, i);
}
}
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
registerLockStoreConstructor(SERVICE_NAME, new ConstructorFunction<ObjectNamespace, LockStoreInfo>() {
public LockStoreInfo createNew(ObjectNamespace key) {
return new LockStoreInfo() {
@Override
public int getBackupCount() {
return 1;
}
@Override
public int getAsyncBackupCount() {
return 0;
}
};
}
});
}
@Override
public void reset() {
for (LockStoreContainer container : containers) {
for (LockStoreImpl lockStore : container.getLockStores()) {
lockStore.clear();
}
}
}
@Override
public void shutdown(boolean terminate) {
for (LockStoreContainer container : containers) {
container.clear();
}
}
@Override
public void registerLockStoreConstructor(String serviceName,
ConstructorFunction<ObjectNamespace, LockStoreInfo> constructorFunction) {
boolean put = constructors.putIfAbsent(serviceName, constructorFunction) == null;
if (!put) {
throw new IllegalArgumentException("LockStore constructor for service[" + serviceName + "] "
+ "is already registered!");
}
}
/**
* Gets the constructor for the given service, or null if the constructor doesn't exist.
*
* @param serviceName the name of the constructor to look up.
* @return the found ConstructorFunction.
*/
ConstructorFunction<ObjectNamespace, LockStoreInfo> getConstructor(String serviceName) {
return constructors.get(serviceName);
}
@Override
public LockStore createLockStore(int partitionId, ObjectNamespace namespace) {
final LockStoreContainer container = getLockContainer(partitionId);
container.getOrCreateLockStore(namespace);
return new LockStoreProxy(container, namespace);
}
@Override
public void clearLockStore(int partitionId, ObjectNamespace namespace) {
LockStoreContainer container = getLockContainer(partitionId);
container.clearLockStore(namespace);
}
void scheduleEviction(ObjectNamespace namespace, Data key, long delay) {
EntryTaskScheduler scheduler = getOrPutSynchronized(
evictionProcessors, namespace, evictionProcessors, schedulerConstructor);
scheduler.schedule(delay, key, null);
}
void cancelEviction(ObjectNamespace namespace, Data key) {
EntryTaskScheduler scheduler = getOrPutSynchronized(
evictionProcessors, namespace, evictionProcessors, schedulerConstructor);
scheduler.cancel(key);
}
public LockStoreContainer getLockContainer(int partitionId) {
return containers[partitionId];
}
public LockStoreImpl getLockStore(int partitionId, ObjectNamespace namespace) {
return getLockContainer(partitionId).getOrCreateLockStore(namespace);
}
@Override
public void memberAdded(MembershipServiceEvent event) {
}
@Override
public void memberRemoved(MembershipServiceEvent event) {
final MemberImpl member = event.getMember();
final String uuid = member.getUuid();
releaseLocksOf(uuid);
}
@Override
public void memberAttributeChanged(MemberAttributeServiceEvent event) {
}
private void releaseLocksOf(String uuid) {
for (LockStoreContainer container : containers) {
for (LockStoreImpl lockStore : container.getLockStores()) {
releaseLock(uuid, container, lockStore);
}
}
}
private void releaseLock(String uuid, LockStoreContainer container, LockStoreImpl lockStore) {
Collection<LockResource> locks = lockStore.getLocks();
for (LockResource lock : locks) {
Data key = lock.getKey();
if (uuid.equals(lock.getOwner()) && !lock.isTransactional()) {
sendUnlockOperation(container, lockStore, key);
}
}
}
private void sendUnlockOperation(LockStoreContainer container, LockStoreImpl lockStore, Data key) {
UnlockOperation op = new UnlockOperation(lockStore.getNamespace(), key, -1, true);
op.setAsyncBackup(true);
op.setNodeEngine(nodeEngine);
op.setServiceName(SERVICE_NAME);
op.setService(LockServiceImpl.this);
op.setResponseHandler(ResponseHandlerFactory.createEmptyResponseHandler());
op.setPartitionId(container.getPartitionId());
nodeEngine.getOperationService().executeOperation(op);
}
@Override
public Collection<LockResource> getAllLocks() {
final Collection<LockResource> locks = new LinkedList<LockResource>();
for (LockStoreContainer container : containers) {
for (LockStoreImpl lockStore : container.getLockStores()) {
locks.addAll(lockStore.getLocks());
}
}
return locks;
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
int partitionId = event.getPartitionId();
LockStoreContainer container = containers[partitionId];
int replicaIndex = event.getReplicaIndex();
LockReplicationOperation op = new LockReplicationOperation(container, partitionId, replicaIndex);
if (op.isEmpty()) {
return null;
} else {
return op;
}
}
@Override
public void commitMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
clearPartition(event.getPartitionId());
}
}
private void clearPartition(int partitionId) {
final LockStoreContainer container = containers[partitionId];
for (LockStoreImpl ls : container.getLockStores()) {
ls.clear();
}
}
@Override
public void rollbackMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
clearPartition(event.getPartitionId());
}
}
@Override
public void clearPartitionReplica(int partitionId) {
clearPartition(partitionId);
}
@Override
public DistributedObject createDistributedObject(String objectId) {
return new LockProxy(nodeEngine, this, objectId);
}
@Override
public void destroyDistributedObject(String objectId) {
Data key = nodeEngine.getSerializationService().toData(objectId);
for (LockStoreContainer container : containers) {
InternalLockNamespace namespace = new InternalLockNamespace(objectId);
LockStoreImpl lockStore = container.getOrCreateLockStore(namespace);
lockStore.forceUnlock(key);
}
}
@Override
public void clientDisconnected(String clientUuid) {
releaseLocksOf(clientUuid);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_concurrent_lock_LockServiceImpl.java
|
2,362 |
private class PartitionProcessor
implements Runnable {
@Override
public void run() {
KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource;
if (supervisor.getConfiguration().isCommunicateStats()) {
delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor);
}
while (true) {
if (cancelled.get()) {
return;
}
Integer partitionId = findNewPartitionProcessing();
if (partitionId == null) {
// Job's done
return;
}
// Migration event occurred, just retry
if (partitionId == -1) {
continue;
}
try {
// This call cannot be delegated
((PartitionIdAware) keyValueSource).setPartitionId(partitionId);
delegate.reset();
if (delegate.open(nodeEngine)) {
DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this);
processMapping(partitionId, context, delegate);
delegate.close();
finalizeMapping(partitionId, context);
} else {
// Partition assignment might not be ready yet, postpone the processing and retry later
postponePartitionProcessing(partitionId);
}
} catch (Throwable t) {
handleProcessorThrowable(t);
}
}
}
private Integer findNewPartitionProcessing() {
try {
RequestPartitionResult result = mapReduceService
.processRequest(supervisor.getJobOwner(), new RequestPartitionMapping(name, jobId), name);
// JobSupervisor doesn't exists anymore on jobOwner, job done?
if (result.getResultState() == NO_SUPERVISOR) {
return null;
} else if (result.getResultState() == CHECK_STATE_FAILED) {
// retry
return -1;
} else if (result.getResultState() == NO_MORE_PARTITIONS) {
return null;
} else {
return result.getPartitionId();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_MapCombineTask.java
|
3,615 |
@Beta
public class XAResourceImpl implements XAResource {
private final TransactionManagerServiceImpl transactionManager;
private final TransactionContextImpl transactionContext;
private final ILogger logger;
private int transactionTimeoutSeconds;
public XAResourceImpl(TransactionManagerServiceImpl transactionManager,
TransactionContextImpl transactionContext, NodeEngineImpl nodeEngine) {
this.transactionManager = transactionManager;
this.transactionContext = transactionContext;
this.logger = nodeEngine.getLogger(XAResourceImpl.class);
}
//XAResource --START
@Override
public synchronized void start(Xid xid, int flags) throws XAException {
nullCheck(xid);
switch (flags) {
case TMNOFLAGS:
if (getTransaction(xid) != null) {
final XAException xaException = new XAException(XAException.XAER_DUPID);
logger.severe("Duplicate xid: " + xid, xaException);
throw xaException;
}
try {
final Transaction transaction = getTransaction();
transactionManager.addManagedTransaction(xid, transaction);
transaction.begin();
} catch (IllegalStateException e) {
logger.severe(e);
throw new XAException(XAException.XAER_INVAL);
}
break;
case TMRESUME:
case TMJOIN:
break;
default:
throw new XAException(XAException.XAER_INVAL);
}
}
@Override
public synchronized void end(Xid xid, int flags) throws XAException {
nullCheck(xid);
final TransactionImpl transaction = (TransactionImpl) getTransaction();
final SerializableXID sXid = transaction.getXid();
if (sXid == null || !sXid.equals(xid)) {
logger.severe("started xid: " + sXid + " and given xid : " + xid + " not equal!!!");
}
validateTx(transaction, State.ACTIVE);
switch (flags) {
case XAResource.TMSUCCESS:
//successfully end.
break;
case XAResource.TMFAIL:
transaction.setRollbackOnly();
throw new XAException(XAException.XA_RBROLLBACK);
// break;
case XAResource.TMSUSPEND:
break;
default:
throw new XAException(XAException.XAER_INVAL);
}
}
@Override
public synchronized int prepare(Xid xid) throws XAException {
nullCheck(xid);
final TransactionImpl transaction = (TransactionImpl) getTransaction();
final SerializableXID sXid = transaction.getXid();
if (sXid == null || !sXid.equals(xid)) {
logger.severe("started xid: " + sXid + " and given xid : " + xid + " not equal!!!");
}
validateTx(transaction, State.ACTIVE);
try {
transaction.prepare();
} catch (TransactionException e) {
throw new XAException(XAException.XAER_RMERR);
}
return XAResource.XA_OK;
}
@Override
public synchronized void commit(Xid xid, boolean onePhase) throws XAException {
nullCheck(xid);
final Transaction transaction = getTransaction(xid);
if (onePhase) {
validateTx(transaction, State.ACTIVE);
transaction.prepare();
}
validateTx(transaction, State.PREPARED);
try {
transaction.commit();
transactionManager.removeManagedTransaction(xid);
} catch (TransactionException e) {
throw new XAException(XAException.XAER_RMERR);
}
}
@Override
public synchronized void rollback(Xid xid) throws XAException {
nullCheck(xid);
final Transaction transaction = getTransaction(xid);
//NO_TXN means do not validate state
validateTx(transaction, State.NO_TXN);
try {
transaction.rollback();
transactionManager.removeManagedTransaction(xid);
} catch (TransactionException e) {
throw new XAException(XAException.XAER_RMERR);
}
}
@Override
public synchronized void forget(Xid xid) throws XAException {
throw new XAException(XAException.XAER_PROTO);
}
@Override
public synchronized boolean isSameRM(XAResource xaResource) throws XAException {
if (xaResource instanceof XAResourceImpl) {
XAResourceImpl other = (XAResourceImpl) xaResource;
return transactionManager.equals(other.transactionManager);
}
return false;
}
@Override
public synchronized Xid[] recover(int flag) throws XAException {
return transactionManager.recover();
}
@Override
public synchronized int getTransactionTimeout() throws XAException {
return transactionTimeoutSeconds;
}
@Override
public synchronized boolean setTransactionTimeout(int seconds) throws XAException {
this.transactionTimeoutSeconds = seconds;
return false;
}
//XAResource --END
private void nullCheck(Xid xid) throws XAException {
if (xid == null) {
final XAException xaException = new XAException(XAException.XAER_INVAL);
logger.severe("Xid cannot be null!!!", xaException);
throw xaException;
}
}
private void validateTx(Transaction tx, State state) throws XAException {
if (tx == null) {
final XAException xaException = new XAException(XAException.XAER_NOTA);
logger.severe("Transaction is not available!!!", xaException);
throw xaException;
}
final State txState = tx.getState();
switch (state) {
case ACTIVE:
if (txState != State.ACTIVE) {
final XAException xaException = new XAException(XAException.XAER_NOTA);
logger.severe("Transaction is not active!!! state: " + txState, xaException);
throw xaException;
}
break;
case PREPARED:
if (txState != State.PREPARED) {
final XAException xaException = new XAException(XAException.XAER_INVAL);
logger.severe("Transaction is not prepared!!! state: " + txState, xaException);
throw xaException;
}
break;
default:
break;
}
}
private Transaction getTransaction(Xid xid) {
return transactionManager.getManagedTransaction(xid);
}
private Transaction getTransaction() {
return transactionContext.getTransaction();
}
@Override
public String toString() {
final String txnId = transactionContext.getTxnId();
final StringBuilder sb = new StringBuilder("XAResourceImpl{");
sb.append("txdId=").append(txnId);
sb.append(", transactionTimeoutSeconds=").append(transactionTimeoutSeconds);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_transaction_impl_XAResourceImpl.java
|
2,773 |
public class SSLSocketChannelWrapper extends DefaultSocketChannelWrapper {
private static final boolean DEBUG = false;
private final ByteBuffer in;
private final ByteBuffer emptyBuffer;
private final ByteBuffer netOutBuffer;
// "reliable" write transport
private final ByteBuffer netInBuffer;
// "reliable" read transport
private final SSLEngine sslEngine;
private volatile boolean handshakeCompleted;
private SSLEngineResult sslEngineResult;
public SSLSocketChannelWrapper(SSLContext sslContext, SocketChannel sc, boolean client) throws Exception {
super(sc);
sslEngine = sslContext.createSSLEngine();
sslEngine.setUseClientMode(client);
sslEngine.setEnableSessionCreation(true);
SSLSession session = sslEngine.getSession();
in = ByteBuffer.allocate(64 * 1024);
emptyBuffer = ByteBuffer.allocate(0);
int netBufferMax = session.getPacketBufferSize();
netOutBuffer = ByteBuffer.allocate(netBufferMax);
netInBuffer = ByteBuffer.allocate(netBufferMax);
}
private void handshake() throws IOException {
if (handshakeCompleted) {
return;
}
if (DEBUG) {
log("Starting handshake...");
}
synchronized (this) {
if (handshakeCompleted) {
if (DEBUG) {
log("Handshake already completed...");
}
return;
}
int counter = 0;
if (DEBUG) {
log("Begin handshake");
}
sslEngine.beginHandshake();
writeInternal(emptyBuffer);
while (counter++ < 250 && sslEngineResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED) {
if (DEBUG) {
log("Handshake status: " + sslEngineResult.getHandshakeStatus());
}
if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_UNWRAP) {
if (DEBUG) {
log("Begin UNWRAP");
}
netInBuffer.clear();
while (socketChannel.read(netInBuffer) < 1) {
try {
if (DEBUG) {
log("Spinning on channel read...");
}
Thread.sleep(50);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
netInBuffer.flip();
unwrap(netInBuffer);
if (DEBUG) {
log("Done UNWRAP: " + sslEngineResult.getHandshakeStatus());
}
if (sslEngineResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED) {
emptyBuffer.clear();
writeInternal(emptyBuffer);
if (DEBUG) {
log("Done WRAP after UNWRAP: " + sslEngineResult.getHandshakeStatus());
}
}
} else if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_WRAP) {
if (DEBUG) {
log("Begin WRAP");
}
emptyBuffer.clear();
writeInternal(emptyBuffer);
if (DEBUG) {
log("Done WRAP: " + sslEngineResult.getHandshakeStatus());
}
} else {
try {
if (DEBUG) {
log("Sleeping... Status: " + sslEngineResult.getHandshakeStatus());
}
Thread.sleep(500);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
if (sslEngineResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED) {
throw new SSLHandshakeException("SSL handshake failed after " + counter
+ " trials! -> " + sslEngineResult.getHandshakeStatus());
}
if (DEBUG) {
log("Handshake completed!");
}
in.clear();
in.flip();
handshakeCompleted = true;
}
}
private void log(String log) {
if (DEBUG) {
System.err.println(getClass().getSimpleName() + "[" + socketChannel.socket().getLocalSocketAddress() + "]: " + log);
}
}
private ByteBuffer unwrap(ByteBuffer b) throws SSLException {
in.clear();
while (b.hasRemaining()) {
sslEngineResult = sslEngine.unwrap(b, in);
if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_TASK) {
if (DEBUG) {
log("Handshake NEED TASK");
}
Runnable task;
while ((task = sslEngine.getDelegatedTask()) != null) {
if (DEBUG) {
log("Running task: " + task);
}
task.run();
}
} else if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.FINISHED
|| sslEngineResult.getStatus() == SSLEngineResult.Status.BUFFER_UNDERFLOW) {
return in;
}
}
return in;
}
public int write(ByteBuffer input) throws IOException {
if (!handshakeCompleted) {
handshake();
}
return writeInternal(input);
}
private int writeInternal(ByteBuffer input) throws IOException {
sslEngineResult = sslEngine.wrap(input, netOutBuffer);
netOutBuffer.flip();
int written = socketChannel.write(netOutBuffer);
if (netOutBuffer.hasRemaining()) {
netOutBuffer.compact();
} else {
netOutBuffer.clear();
}
return written;
}
public int read(ByteBuffer output) throws IOException {
if (!handshakeCompleted) {
handshake();
}
int readBytesCount = 0;
int limit;
if (in.hasRemaining()) {
limit = Math.min(in.remaining(), output.remaining());
for (int i = 0; i < limit; i++) {
output.put(in.get());
readBytesCount++;
}
return readBytesCount;
}
if (netInBuffer.hasRemaining()) {
unwrap(netInBuffer);
in.flip();
limit = Math.min(in.remaining(), output.remaining());
for (int i = 0; i < limit; i++) {
output.put(in.get());
readBytesCount++;
}
if (sslEngineResult.getStatus() != SSLEngineResult.Status.BUFFER_UNDERFLOW) {
netInBuffer.clear();
netInBuffer.flip();
return readBytesCount;
}
}
if (netInBuffer.hasRemaining()) {
netInBuffer.compact();
} else {
netInBuffer.clear();
}
if (socketChannel.read(netInBuffer) == -1) {
netInBuffer.clear();
netInBuffer.flip();
return -1;
}
netInBuffer.flip();
unwrap(netInBuffer);
in.flip();
limit = Math.min(in.remaining(), output.remaining());
for (int i = 0; i < limit; i++) {
output.put(in.get());
readBytesCount++;
}
return readBytesCount;
}
public void close() throws IOException {
sslEngine.closeOutbound();
try {
writeInternal(emptyBuffer);
} catch (Exception ignored) {
}
socketChannel.close();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("SSLSocketChannelWrapper{");
sb.append("socketChannel=").append(socketChannel);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_ssl_SSLSocketChannelWrapper.java
|
215 |
Collections.sort(classes, new Comparator<OClass>() {
public int compare(OClass o1, OClass o2) {
return o1.getName().compareToIgnoreCase(o2.getName());
}
});
| 0true
|
tools_src_main_java_com_orientechnologies_orient_console_OConsoleDatabaseApp.java
|
681 |
public class OHashIndexFactory implements OIndexFactory {
private static final Set<String> TYPES;
static {
final Set<String> types = new HashSet<String>();
types.add(OClass.INDEX_TYPE.UNIQUE_HASH_INDEX.toString());
types.add(OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString());
types.add(OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString());
types.add(OClass.INDEX_TYPE.DICTIONARY_HASH_INDEX.toString());
TYPES = Collections.unmodifiableSet(types);
}
/**
* Index types :
* <ul>
* <li>UNIQUE</li>
* <li>NOTUNIQUE</li>
* <li>FULLTEXT</li>
* <li>DICTIONARY</li>
* </ul>
*/
public Set<String> getTypes() {
return TYPES;
}
public OIndexInternal<?> createIndex(ODatabaseRecord database, String indexType, String algorithm, String valueContainerAlgorithm)
throws OConfigurationException {
if (valueContainerAlgorithm == null) {
if (OClass.INDEX_TYPE.NOTUNIQUE.toString().equals(indexType)
|| OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString().equals(indexType)
|| OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString().equals(indexType)
|| OClass.INDEX_TYPE.FULLTEXT.toString().equals(indexType))
valueContainerAlgorithm = ODefaultIndexFactory.MVRBTREE_VALUE_CONTAINER;
else
valueContainerAlgorithm = ODefaultIndexFactory.NONE_VALUE_CONTAINER;
}
if ((database.getStorage().getType().equals(OEngineLocalPaginated.NAME) || database.getStorage().getType()
.equals(OEngineLocal.NAME))
&& valueContainerAlgorithm.equals(ODefaultIndexFactory.MVRBTREE_VALUE_CONTAINER)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean()) {
OLogManager
.instance()
.warn(
this,
"Index was created using %s as values container. "
+ "This container is deprecated and is not supported any more. To avoid this message please drop and recreate indexes or perform DB export/import.",
valueContainerAlgorithm);
}
OStorage storage = database.getStorage();
OIndexEngine indexEngine;
final String storageType = storage.getType();
if (storageType.equals("memory"))
indexEngine = new OMemoryHashMapIndexEngine();
else if (storageType.equals("local") || storageType.equals("plocal"))
indexEngine = new OLocalHashTableIndexEngine();
else if (storageType.equals("distributed"))
// DISTRIBUTED CASE: HANDLE IT AS FOR LOCAL
indexEngine = new OLocalHashTableIndexEngine();
else if (storageType.equals("remote"))
indexEngine = new ORemoteIndexEngine();
else
throw new OIndexException("Unsupported storage type : " + storageType);
if (OClass.INDEX_TYPE.UNIQUE_HASH_INDEX.toString().equals(indexType))
return new OIndexUnique(indexType, algorithm, indexEngine, valueContainerAlgorithm);
else if (OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString().equals(indexType))
return new OIndexNotUnique(indexType, algorithm, indexEngine, valueContainerAlgorithm);
else if (OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString().equals(indexType))
return new OIndexFullText(indexType, algorithm, indexEngine, valueContainerAlgorithm);
else if (OClass.INDEX_TYPE.DICTIONARY_HASH_INDEX.toString().equals(indexType))
return new OIndexDictionary(indexType, algorithm, indexEngine, valueContainerAlgorithm);
throw new OConfigurationException("Unsupported type : " + indexType);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_OHashIndexFactory.java
|
750 |
public class GetResponse extends ActionResponse implements Iterable<GetField>, ToXContent {
private GetResult getResult;
GetResponse() {
}
GetResponse(GetResult getResult) {
this.getResult = getResult;
}
/**
* Does the document exists.
*/
public boolean isExists() {
return getResult.isExists();
}
/**
* The index the document was fetched from.
*/
public String getIndex() {
return getResult.getIndex();
}
/**
* The type of the document.
*/
public String getType() {
return getResult.getType();
}
/**
* The id of the document.
*/
public String getId() {
return getResult.getId();
}
/**
* The version of the doc.
*/
public long getVersion() {
return getResult.getVersion();
}
/**
* The source of the document if exists.
*/
public byte[] getSourceAsBytes() {
return getResult.source();
}
/**
* Returns the internal source bytes, as they are returned without munging (for example,
* might still be compressed).
*/
public BytesReference getSourceInternal() {
return getResult.internalSourceRef();
}
/**
* Returns bytes reference, also un compress the source if needed.
*/
public BytesReference getSourceAsBytesRef() {
return getResult.sourceRef();
}
/**
* Is the source empty (not available) or not.
*/
public boolean isSourceEmpty() {
return getResult.isSourceEmpty();
}
/**
* The source of the document (as a string).
*/
public String getSourceAsString() {
return getResult.sourceAsString();
}
/**
* The source of the document (As a map).
*/
@SuppressWarnings({"unchecked"})
public Map<String, Object> getSourceAsMap() throws ElasticsearchParseException {
return getResult.sourceAsMap();
}
public Map<String, Object> getSource() {
return getResult.getSource();
}
public Map<String, GetField> getFields() {
return getResult.getFields();
}
public GetField getField(String name) {
return getResult.field(name);
}
@Override
public Iterator<GetField> iterator() {
return getResult.iterator();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return getResult.toXContent(builder, params);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
getResult = GetResult.readGetResult(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
getResult.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_get_GetResponse.java
|
1,573 |
public class CountMapReduce {
public static final String CLASS = Tokens.makeNamespace(CountMapReduce.class) + ".class";
public enum Counters {
VERTICES_COUNTED,
EDGES_COUNTED
}
public static Configuration createConfiguration(final Class<? extends Element> klass) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, LongWritable> {
private boolean isVertex;
private final LongWritable longWritable = new LongWritable();
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.outputs = new SafeMapperOutputs(context);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
final long pathCount = value.pathCount();
this.longWritable.set(pathCount);
context.write(NullWritable.get(), this.longWritable);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_COUNTED, pathCount > 0 ? 1 : 0);
} else {
long edgesCounted = 0;
long pathCount = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
edgesCounted++;
pathCount = pathCount + edge.pathCount();
}
}
this.longWritable.set(pathCount);
context.write(NullWritable.get(), this.longWritable);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_COUNTED, edgesCounted);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
public static class Combiner extends Reducer<NullWritable, LongWritable, NullWritable, LongWritable> {
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final NullWritable key, final Iterable<LongWritable> values, final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable temp : values) {
totalCount = totalCount + temp.get();
}
this.longWritable.set(totalCount);
context.write(key, this.longWritable);
}
}
public static class Reduce extends Reducer<NullWritable, LongWritable, NullWritable, LongWritable> {
private SafeReducerOutputs outputs;
private LongWritable longWritable = new LongWritable();
@Override
public void setup(final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) {
this.outputs = new SafeReducerOutputs(context);
}
@Override
public void reduce(final NullWritable key, final Iterable<LongWritable> values, final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable temp : values) {
totalCount = totalCount + temp.get();
}
this.longWritable.set(totalCount);
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.longWritable);
}
@Override
public void cleanup(final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_util_CountMapReduce.java
|
1,643 |
public class OHazelcastPlugin extends ODistributedAbstractPlugin implements MembershipListener, EntryListener<String, Object> {
protected static final String CONFIG_NODE_PREFIX = "node.";
protected static final String CONFIG_DATABASE_PREFIX = "database.";
protected String nodeId;
protected String hazelcastConfigFile = "hazelcast.xml";
protected Map<String, Member> cachedClusterNodes = new ConcurrentHashMap<String, Member>();
protected OHazelcastDistributedMessageService messageService;
protected long timeOffset = 0;
protected Date startedOn = new Date();
protected volatile STATUS status = STATUS.OFFLINE;
protected String membershipListenerRegistration;
protected volatile HazelcastInstance hazelcastInstance;
protected Object installDatabaseLock = new Object();
public OHazelcastPlugin() {
}
@Override
public void config(final OServer iServer, final OServerParameterConfiguration[] iParams) {
super.config(iServer, iParams);
if (nodeName == null) {
// GENERATE NODE NAME
nodeName = "node" + System.currentTimeMillis();
OLogManager.instance().warn(this, "Generating new node name for current node: %s", nodeName);
// SALVE THE NODE NAME IN CONFIGURATION
boolean found = false;
final OServerConfiguration cfg = iServer.getConfiguration();
for (OServerHandlerConfiguration h : cfg.handlers) {
if (h.clazz.equals(getClass().getName())) {
for (OServerParameterConfiguration p : h.parameters) {
if (p.name.equals("nodeName")) {
found = true;
p.value = nodeName;
break;
}
}
if (!found) {
h.parameters = OArrays.copyOf(h.parameters, h.parameters.length + 1);
h.parameters[h.parameters.length - 1] = new OServerParameterConfiguration("nodeName", nodeName);
}
try {
iServer.saveConfiguration();
} catch (IOException e) {
throw new OConfigurationException("Cannot save server configuration", e);
}
break;
}
}
}
for (OServerParameterConfiguration param : iParams) {
if (param.name.equalsIgnoreCase("configuration.hazelcast"))
hazelcastConfigFile = OSystemVariableResolver.resolveSystemVariables(param.value);
}
}
@Override
public void startup() {
if (!enabled)
return;
super.startup();
status = STATUS.STARTING;
OLogManager.instance().info(this, "Starting distributed server '%s'...", getLocalNodeName());
cachedClusterNodes.clear();
try {
hazelcastInstance = Hazelcast.newHazelcastInstance(new FileSystemXmlConfig(hazelcastConfigFile));
nodeId = hazelcastInstance.getCluster().getLocalMember().getUuid();
timeOffset = System.currentTimeMillis() - hazelcastInstance.getCluster().getClusterTime();
cachedClusterNodes.put(getLocalNodeName(), hazelcastInstance.getCluster().getLocalMember());
membershipListenerRegistration = hazelcastInstance.getCluster().addMembershipListener(this);
OServer.registerServerInstance(getLocalNodeName(), serverInstance);
final IMap<String, Object> configurationMap = getConfigurationMap();
configurationMap.addEntryListener(this, true);
// REGISTER CURRENT NODES
for (Member m : hazelcastInstance.getCluster().getMembers()) {
final String memberName = getNodeName(m);
if (memberName != null)
cachedClusterNodes.put(memberName, m);
}
messageService = new OHazelcastDistributedMessageService(this);
// PUBLISH LOCAL NODE CFG
getConfigurationMap().put(CONFIG_NODE_PREFIX + getLocalNodeId(), getLocalNodeConfiguration());
installNewDatabases(true);
loadDistributedDatabases();
// REGISTER CURRENT MEMBERS
setStatus(STATUS.ONLINE);
} catch (FileNotFoundException e) {
throw new OConfigurationException("Error on creating Hazelcast instance", e);
}
}
@Override
public long getDistributedTime(final long iTime) {
return iTime - timeOffset;
}
@Override
public void sendShutdown() {
shutdown();
}
@Override
public void shutdown() {
if (!enabled)
return;
setStatus(STATUS.SHUTDOWNING);
messageService.shutdown();
super.shutdown();
cachedClusterNodes.clear();
if (membershipListenerRegistration != null) {
hazelcastInstance.getCluster().removeMembershipListener(membershipListenerRegistration);
}
try {
hazelcastInstance.shutdown();
} catch (Exception e) {
OLogManager.instance().error(this, "Error on shutting down Hazelcast instance", e);
} finally {
hazelcastInstance = null;
}
setStatus(STATUS.OFFLINE);
getConfigurationMap().remove(CONFIG_NODE_PREFIX + getLocalNodeId());
}
@Override
public ODocument getClusterConfiguration() {
if (!enabled)
return null;
final ODocument cluster = new ODocument();
final HazelcastInstance instance = getHazelcastInstance();
cluster.field("localName", instance.getName());
cluster.field("localId", instance.getCluster().getLocalMember().getUuid());
// INSERT MEMBERS
final List<ODocument> members = new ArrayList<ODocument>();
cluster.field("members", members, OType.EMBEDDEDLIST);
// members.add(getLocalNodeConfiguration());
for (Member member : cachedClusterNodes.values()) {
members.add(getNodeConfigurationById(member.getUuid()));
}
return cluster;
}
public ODocument getNodeConfigurationById(final String iNodeId) {
return (ODocument) getConfigurationMap().get(CONFIG_NODE_PREFIX + iNodeId);
}
@Override
public ODocument getLocalNodeConfiguration() {
final ODocument nodeCfg = new ODocument();
nodeCfg.field("id", getLocalNodeId());
nodeCfg.field("name", getLocalNodeName());
nodeCfg.field("startedOn", startedOn);
List<Map<String, Object>> listeners = new ArrayList<Map<String, Object>>();
nodeCfg.field("listeners", listeners, OType.EMBEDDEDLIST);
for (OServerNetworkListener listener : serverInstance.getNetworkListeners()) {
final Map<String, Object> listenerCfg = new HashMap<String, Object>();
listeners.add(listenerCfg);
listenerCfg.put("protocol", listener.getProtocolType().getSimpleName());
listenerCfg.put("listen", listener.getListeningAddress());
}
nodeCfg.field("databases", getManagedDatabases());
return nodeCfg;
}
public boolean isEnabled() {
return enabled;
}
public STATUS getStatus() {
return status;
}
public boolean checkStatus(final STATUS iStatus2Check) {
return status.equals(iStatus2Check);
}
@Override
public void setStatus(final STATUS iStatus) {
if (status.equals(iStatus))
// NO CHANGE
return;
status = iStatus;
// DON'T PUT THE STATUS IN CFG ANYMORE
// getConfigurationMap().put(CONFIG_NODE_PREFIX + getLocalNodeId(), getLocalNodeConfiguration());
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE, "updated node status to '%s'", status);
}
@Override
public Object sendRequest(final String iDatabaseName, final String iClusterName, final OAbstractRemoteTask iTask,
final EXECUTION_MODE iExecutionMode) {
final OHazelcastDistributedRequest req = new OHazelcastDistributedRequest(getLocalNodeName(), iDatabaseName, iClusterName,
iTask, iExecutionMode);
final OHazelcastDistributedDatabase db = messageService.getDatabase(iDatabaseName);
final ODistributedResponse response = db.send(req);
if (response != null)
return response.getPayload();
return null;
}
@Override
public void sendRequest2Node(final String iDatabaseName, final String iTargetNodeName, final OAbstractRemoteTask iTask) {
final OHazelcastDistributedRequest req = new OHazelcastDistributedRequest(getLocalNodeName(), iDatabaseName, null, iTask,
EXECUTION_MODE.NO_RESPONSE);
final OHazelcastDistributedDatabase db = messageService.getDatabase(iDatabaseName);
db.send2Node(req, iTargetNodeName);
}
public Set<String> getManagedDatabases() {
return messageService.getDatabases();
}
public String getLocalNodeName() {
return nodeName;
}
@Override
public String getLocalNodeId() {
return nodeId;
}
@Override
public void onCreate(final ODatabase iDatabase) {
final OHazelcastDistributedDatabase distribDatabase = messageService.registerDatabase(iDatabase.getName());
distribDatabase.configureDatabase((ODatabaseDocumentTx) ((ODatabaseComplex<?>) iDatabase).getDatabaseOwner(), false, false)
.setOnline();
onOpen(iDatabase);
}
@SuppressWarnings("unchecked")
public ODocument getStats() {
final ODocument doc = new ODocument();
final Map<String, HashMap<String, Object>> nodes = new HashMap<String, HashMap<String, Object>>();
doc.field("nodes", nodes);
Map<String, Object> localNode = new HashMap<String, Object>();
doc.field("localNode", localNode);
localNode.put("name", getLocalNodeName());
Map<String, Object> databases = new HashMap<String, Object>();
localNode.put("databases", databases);
for (String dbName : messageService.getDatabases()) {
Map<String, Object> db = new HashMap<String, Object>();
databases.put(dbName, db);
final OProfilerEntry chrono = Orient.instance().getProfiler().getChrono("distributed.replication." + dbName + ".resynch");
if (chrono != null)
db.put("resync", new ODocument().fromJSON(chrono.toJSON()));
}
for (Entry<String, QueueConfig> entry : hazelcastInstance.getConfig().getQueueConfigs().entrySet()) {
final String queueName = entry.getKey();
if (!queueName.startsWith(OHazelcastDistributedMessageService.NODE_QUEUE_PREFIX))
continue;
final IQueue<Object> queue = hazelcastInstance.getQueue(queueName);
final String[] names = queueName.split("\\.");
HashMap<String, Object> node = nodes.get(names[2]);
if (node == null) {
node = new HashMap<String, Object>();
nodes.put(names[2], node);
}
if (names[3].equals("response")) {
node.put("responses", queue.size());
} else {
final String dbName = names[3];
Map<String, Object> db = (HashMap<String, Object>) node.get(dbName);
if (db == null) {
db = new HashMap<String, Object>(2);
node.put(dbName, db);
}
db.put("requests", queue.size());
final Object lastMessage = queue.peek();
if (lastMessage != null)
db.put("lastMessage", lastMessage.toString());
}
}
return doc;
}
public String getNodeName(final Member iMember) {
final ODocument cfg = getNodeConfigurationById(iMember.getUuid());
return (String) (cfg != null ? cfg.field("name") : null);
}
public Set<String> getRemoteNodeIds() {
return cachedClusterNodes.keySet();
}
@Override
public void memberAdded(final MembershipEvent iEvent) {
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE, "added new node id=%s name=%s", iEvent.getMember(),
getNodeName(iEvent.getMember()));
}
/**
* Removes the node map entry.
*/
@Override
public void memberRemoved(final MembershipEvent iEvent) {
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE, "node removed id=%s name=%s", iEvent.getMember(),
getNodeName(iEvent.getMember()));
final Member member = iEvent.getMember();
final String nodeName = getNodeName(member);
if (nodeName != null) {
cachedClusterNodes.remove(nodeName);
for (String dbName : messageService.getDatabases()) {
final OHazelcastDistributedDatabase db = messageService.getDatabase(dbName);
db.removeNodeInConfiguration(nodeName, false);
}
}
OClientConnectionManager.instance().pushDistribCfg2Clients(getClusterConfiguration());
}
@Override
public void entryAdded(EntryEvent<String, Object> iEvent) {
final String key = iEvent.getKey();
if (key.startsWith(CONFIG_NODE_PREFIX)) {
if (!iEvent.getMember().equals(hazelcastInstance.getCluster().getLocalMember())) {
final ODocument cfg = (ODocument) iEvent.getValue();
cachedClusterNodes.put((String) cfg.field("name"), (Member) iEvent.getMember());
ODistributedServerLog.info(this, getLocalNodeName(), null, DIRECTION.NONE,
"added node configuration id=%s name=%s, now %d nodes are configured", iEvent.getMember(),
getNodeName(iEvent.getMember()), cachedClusterNodes.size());
installNewDatabases(false);
}
} else if (key.startsWith(CONFIG_DATABASE_PREFIX)) {
saveDatabaseConfiguration(key.substring(CONFIG_DATABASE_PREFIX.length()), (ODocument) iEvent.getValue());
OClientConnectionManager.instance().pushDistribCfg2Clients(getClusterConfiguration());
}
}
@Override
public void entryUpdated(EntryEvent<String, Object> iEvent) {
final String key = iEvent.getKey();
if (key.startsWith(CONFIG_NODE_PREFIX)) {
ODistributedServerLog.info(this, getLocalNodeName(), null, DIRECTION.NONE, "updated node configuration id=%s name=%s",
iEvent.getMember(), getNodeName(iEvent.getMember()));
final ODocument cfg = (ODocument) iEvent.getValue();
cachedClusterNodes.put((String) cfg.field("name"), (Member) iEvent.getMember());
} else if (key.startsWith(CONFIG_DATABASE_PREFIX)) {
if (!iEvent.getMember().equals(hazelcastInstance.getCluster().getLocalMember())) {
final String dbName = key.substring(CONFIG_DATABASE_PREFIX.length());
ODistributedServerLog.info(this, getLocalNodeName(), null, DIRECTION.NONE, "update configuration db=%s from=%s", dbName,
getNodeName(iEvent.getMember()));
installNewDatabases(false);
saveDatabaseConfiguration(dbName, (ODocument) iEvent.getValue());
OClientConnectionManager.instance().pushDistribCfg2Clients(getClusterConfiguration());
}
}
}
@Override
public void entryRemoved(EntryEvent<String, Object> iEvent) {
final String key = iEvent.getKey();
if (key.startsWith(CONFIG_NODE_PREFIX)) {
final String nName = getNodeName(iEvent.getMember());
ODistributedServerLog.info(this, getLocalNodeName(), null, DIRECTION.NONE, "removed node configuration id=%s name=%s",
iEvent.getMember(), nName);
cachedClusterNodes.remove(nName);
} else if (key.startsWith(CONFIG_DATABASE_PREFIX)) {
synchronized (cachedDatabaseConfiguration) {
cachedDatabaseConfiguration.remove(key.substring(CONFIG_DATABASE_PREFIX.length()));
}
}
}
@Override
public void entryEvicted(EntryEvent<String, Object> iEvent) {
}
@Override
public boolean isNodeAvailable(final String iNodeName) {
return cachedClusterNodes.containsKey(iNodeName);
}
public boolean isOffline() {
return status != STATUS.ONLINE;
}
public void waitUntilOnline() throws InterruptedException {
while (!status.equals(STATUS.ONLINE))
Thread.sleep(100);
}
public HazelcastInstance getHazelcastInstance() {
while (hazelcastInstance == null) {
// WAIT UNTIL THE INSTANCE IS READY
try {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
return hazelcastInstance;
}
public Lock getLock(final String iName) {
return getHazelcastInstance().getLock(iName);
}
public Class<? extends OReplicationConflictResolver> getConfictResolverClass() {
return confictResolverClass;
}
@Override
public String toString() {
return getLocalNodeName();
}
/**
* Executes the request on local node. In case of error returns the Exception itself
*
* @param database
*/
public Serializable executeOnLocalNode(final ODistributedRequest req, ODatabaseDocumentTx database) {
final OAbstractRemoteTask task = req.getTask();
try {
return (Serializable) task.execute(serverInstance, this, database);
} catch (Throwable e) {
return e;
}
}
@Override
public ODistributedPartition newPartition(final List<String> partition) {
return new OHazelcastDistributionPartition(partition);
}
protected IMap<String, Object> getConfigurationMap() {
return getHazelcastInstance().getMap("orientdb");
}
/**
* Initializes all the available server's databases as distributed.
*/
protected void loadDistributedDatabases() {
for (Entry<String, String> storageEntry : serverInstance.getAvailableStorageNames().entrySet()) {
final String databaseName = storageEntry.getKey();
if (messageService.getDatabase(databaseName) == null) {
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE, "opening database '%s'...", databaseName);
final ODistributedConfiguration cfg = getDatabaseConfiguration(databaseName);
if (!getConfigurationMap().containsKey(CONFIG_DATABASE_PREFIX + databaseName)) {
// PUBLISH CFG FIRST TIME
getConfigurationMap().put(CONFIG_DATABASE_PREFIX + databaseName, cfg.serialize());
}
final boolean hotAlignment = cfg.isHotAlignment();
messageService.registerDatabase(databaseName).configureDatabase(null, hotAlignment, hotAlignment).setOnline();
}
}
}
@SuppressWarnings("unchecked")
protected void installNewDatabases(final boolean iStartup) {
if (cachedClusterNodes.size() <= 1)
// NO OTHER NODES WHERE ALIGN
return;
// LOCKING THIS RESOURCE PREVENT CONCURRENT INSTALL OF THE SAME DB
synchronized (installDatabaseLock) {
for (Entry<String, Object> entry : getConfigurationMap().entrySet()) {
if (entry.getKey().startsWith(CONFIG_DATABASE_PREFIX)) {
final String databaseName = entry.getKey().substring(CONFIG_DATABASE_PREFIX.length());
final ODocument config = (ODocument) entry.getValue();
final Boolean autoDeploy = config.field("autoDeploy");
if (autoDeploy != null && autoDeploy) {
final Boolean hotAlignment = config.field("hotAlignment");
final String dbPath = serverInstance.getDatabaseDirectory() + databaseName;
final Set<String> configuredDatabases = serverInstance.getAvailableStorageNames().keySet();
if (configuredDatabases.contains(databaseName)) {
if (iStartup && hotAlignment != null && !hotAlignment) {
// DROP THE DATABASE ON CURRENT NODE
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE,
"dropping local database %s in %s and get a fresh copy from a remote node...", databaseName, dbPath);
Orient.instance().unregisterStorageByName(databaseName);
OFileUtils.deleteRecursively(new File(dbPath));
} else
// HOT ALIGNMENT RUNNING, DON'T INSTALL THE DB FROM SCRATCH BUT RATHER LET TO THE NODE TO ALIGN BY READING THE QUEUE
continue;
}
final OHazelcastDistributedDatabase distrDatabase = messageService.registerDatabase(databaseName);
// READ ALL THE MESSAGES DISCARDING EVERYTHING UNTIL DEPLOY
distrDatabase.setWaitForTaskType(ODeployDatabaseTask.class);
distrDatabase.configureDatabase(null, false, false);
final Map<String, OBuffer> results = (Map<String, OBuffer>) sendRequest(databaseName, null, new ODeployDatabaseTask(),
EXECUTION_MODE.RESPONSE);
// EXTRACT THE REAL RESULT
OBuffer result = null;
for (Entry<String, OBuffer> r : results.entrySet())
if (r.getValue().getBuffer() != null && r.getValue().getBuffer().length > 0) {
result = r.getValue();
ODistributedServerLog.warn(this, getLocalNodeName(), r.getKey(), DIRECTION.IN, "installing database %s in %s...",
databaseName, dbPath);
break;
}
if (result == null)
throw new ODistributedException("No response received from remote nodes for auto-deploy of database");
new File(dbPath).mkdirs();
final ODatabaseDocumentTx db = new ODatabaseDocumentTx("local:" + dbPath);
final ByteArrayInputStream in = new ByteArrayInputStream(result.getBuffer());
try {
db.restore(in, null, null);
in.close();
db.close();
Orient.instance().unregisterStorageByName(db.getName());
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE,
"installed database %s in %s, setting it online...", databaseName, dbPath);
distrDatabase.setOnline();
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.NONE, "database %s is online", databaseName);
} catch (IOException e) {
ODistributedServerLog.warn(this, getLocalNodeName(), null, DIRECTION.IN,
"error on copying database '%s' on local server", e, databaseName);
}
}
}
}
}
}
@Override
protected ODocument loadDatabaseConfiguration(final String iDatabaseName, final File file) {
// FIRST LOOK IN THE CLUSTER
if (hazelcastInstance != null) {
final ODocument cfg = (ODocument) getConfigurationMap().get(CONFIG_DATABASE_PREFIX + iDatabaseName);
if (cfg != null) {
ODistributedServerLog.info(this, getLocalNodeName(), null, DIRECTION.NONE,
"loaded database configuration from active cluster");
updateCachedDatabaseConfiguration(iDatabaseName, cfg);
return cfg;
}
}
// NO NODE IN CLUSTER, LOAD FROM FILE
return super.loadDatabaseConfiguration(iDatabaseName, file);
}
}
| 1no label
|
distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_OHazelcastPlugin.java
|
99 |
public interface Page extends Serializable {
public Long getId();
public void setId(Long id);
public String getFullUrl();
public void setFullUrl(String fullUrl);
public String getDescription();
public void setDescription(String description);
public PageTemplate getPageTemplate();
public void setPageTemplate(PageTemplate pageTemplate);
public Map<String, PageField> getPageFields();
public void setPageFields(Map<String, PageField> pageFields);
public Boolean getDeletedFlag();
public void setDeletedFlag(Boolean deletedFlag);
public Boolean getArchivedFlag();
public void setArchivedFlag(Boolean archivedFlag);
public SandBox getSandbox();
public void setSandbox(SandBox sandbox);
public Boolean getLockedFlag();
public void setLockedFlag(Boolean lockedFlag);
public Long getOriginalPageId();
public void setOriginalPageId(Long originalPageId);
public SandBox getOriginalSandBox();
public void setOriginalSandBox(SandBox originalSandBox);
public AdminAuditable getAuditable();
public void setAuditable(AdminAuditable auditable);
/**
* Returns the offlineFlag. True indicates that the page should no longer appear on the site.
* The item will still appear within the content administration program but no longer
* be returned as part of the client facing APIs.
*
* @return true if this item is offline
*/
@Nullable
public Boolean getOfflineFlag();
/**
* Sets the offline flag.
*
* @param offlineFlag
*/
public void setOfflineFlag(@Nullable Boolean offlineFlag);
/**
* Gets the integer priority of this content item. Items with a lower priority should
* be displayed before items with a higher priority.
*
* @return the priority as a numeric value
*/
@Nullable
public Integer getPriority();
/**
* Sets the display priority of this item. Lower priorities should be displayed first.
*
* @param priority
*/
public void setPriority(@Nullable Integer priority);
/**
* Returns a map of the targeting rules associated with this page.
*
* Targeting rules are defined in the content mangagement system and used to
* enforce which page is returned to the client.
*
* @return
*/
@Nullable
public Map<String, PageRule> getPageMatchRules();
/**
* Sets the targeting rules for this content item.
*
* @param pageRules
*/
public void setPageMatchRules(@Nullable Map<String, PageRule> pageRules);
/**
* Returns the item (or cart) based rules associated with this content item.
*
* @return
*/
@Nullable
public Set<PageItemCriteria> getQualifyingItemCriteria();
/**
* Sets the item (e.g. cart) based rules associated with this content item.
*
* @param qualifyingItemCriteria
*/
public void setQualifyingItemCriteria(@Nullable Set<PageItemCriteria> qualifyingItemCriteria);
public Page cloneEntity();
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_Page.java
|
137 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_SC")
@EntityListeners(value = { AdminAuditableListener.class })
@AdminPresentationOverrides(
{
@AdminPresentationOverride(name = "auditable.createdBy.id", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.updatedBy.id", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.createdBy.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.updatedBy.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.dateCreated", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "auditable.dateUpdated", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "structuredContentType.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL)),
@AdminPresentationOverride(name = "structuredContentType.structuredContentFieldTemplate.name", value = @AdminPresentation(readOnly = true, visibility = VisibilityEnum.HIDDEN_ALL))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "StructuredContentImpl_baseStructuredContent")
public class StructuredContentImpl implements StructuredContent {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "StructuredContentId")
@GenericGenerator(
name="StructuredContentId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StructuredContentImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.structure.domain.StructuredContentImpl")
}
)
@Column(name = "SC_ID")
protected Long id;
@Embedded
@AdminPresentation(excluded = true)
protected AdminAuditable auditable = new AdminAuditable();
@AdminPresentation(friendlyName = "StructuredContentImpl_Content_Name", order = 1,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description,
prominent = true, gridOrder = 1)
@Column(name = "CONTENT_NAME", nullable = false)
@Index(name="CONTENT_NAME_INDEX", columnNames={"CONTENT_NAME", "ARCHIVED_FLAG", "SC_TYPE_ID"})
protected String contentName;
@ManyToOne(targetEntity = LocaleImpl.class, optional = false)
@JoinColumn(name = "LOCALE_CODE")
@AdminPresentation(friendlyName = "StructuredContentImpl_Locale", order = 2,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description,
prominent = true, gridOrder = 2)
@AdminPresentationToOneLookup(lookupDisplayProperty = "friendlyName", lookupType = LookupType.DROPDOWN)
protected Locale locale;
@Column(name = "PRIORITY", nullable = false)
@AdminPresentation(friendlyName = "StructuredContentImpl_Priority", order = 3,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description)
@Index(name="CONTENT_PRIORITY_INDEX", columnNames={"PRIORITY"})
protected Integer priority;
@ManyToMany(targetEntity = StructuredContentRuleImpl.class, cascade = {CascadeType.ALL})
@JoinTable(name = "BLC_SC_RULE_MAP", inverseJoinColumns = @JoinColumn(name = "SC_RULE_ID", referencedColumnName = "SC_RULE_ID"))
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@MapKeyColumn(name = "MAP_KEY", nullable = false)
@AdminPresentationMapFields(
mapDisplayFields = {
@AdminPresentationMapField(
fieldName = RuleIdentifier.CUSTOMER_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 1,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.CUSTOMER, friendlyName = "Generic_Customer_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.TIME_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 2,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.TIME, friendlyName = "Generic_Time_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.REQUEST_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 3,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.REQUEST, friendlyName = "Generic_Request_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.PRODUCT_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 4,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.PRODUCT, friendlyName = "Generic_Product_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.ORDER_FIELD_KEY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 5,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.ORDER, friendlyName = "Generic_Order_Rule")
),
@AdminPresentationMapField(
fieldName = RuleIdentifier.CATEGORY,
fieldPresentation = @AdminPresentation(fieldType = SupportedFieldType.RULE_SIMPLE, order = 6,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
ruleIdentifier = RuleIdentifier.CATEGORY, friendlyName = "Generic_Category_Rule")
)
}
)
Map<String, StructuredContentRule> structuredContentMatchRules = new HashMap<String, StructuredContentRule>();
@OneToMany(fetch = FetchType.LAZY, targetEntity = StructuredContentItemCriteriaImpl.class, cascade={CascadeType.ALL})
@JoinTable(name = "BLC_QUAL_CRIT_SC_XREF", joinColumns = @JoinColumn(name = "SC_ID"), inverseJoinColumns = @JoinColumn(name = "SC_ITEM_CRITERIA_ID"))
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@AdminPresentation(friendlyName = "Generic_Item_Rule", order = 5,
tab = Presentation.Tab.Name.Rules, tabOrder = Presentation.Tab.Order.Rules,
group = Presentation.Group.Name.Rules, groupOrder = Presentation.Group.Order.Rules,
fieldType = SupportedFieldType.RULE_WITH_QUANTITY,
ruleIdentifier = RuleIdentifier.ORDERITEM)
protected Set<StructuredContentItemCriteria> qualifyingItemCriteria = new HashSet<StructuredContentItemCriteria>();
@Column(name = "ORIG_ITEM_ID")
@Index(name="SC_ORIG_ITEM_ID_INDEX", columnNames={"ORIG_ITEM_ID"})
@AdminPresentation(friendlyName = "StructuredContentImpl_Original_Item_Id", order = 1,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
visibility = VisibilityEnum.HIDDEN_ALL)
protected Long originalItemId;
@ManyToOne (targetEntity = SandBoxImpl.class)
@JoinColumn(name="SANDBOX_ID")
@AdminPresentation(friendlyName = "StructuredContentImpl_Content_SandBox", order = 1,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
excluded = true)
protected SandBox sandbox;
@ManyToOne(targetEntity = SandBoxImpl.class)
@JoinColumn(name = "ORIG_SANDBOX_ID")
@AdminPresentation(excluded = true)
protected SandBox originalSandBox;
@ManyToOne(targetEntity = StructuredContentTypeImpl.class)
@JoinColumn(name="SC_TYPE_ID")
@AdminPresentation(friendlyName = "StructuredContentImpl_Content_Type", order = 2, prominent = true,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description,
requiredOverride = RequiredOverride.REQUIRED)
@AdminPresentationToOneLookup(lookupDisplayProperty = "name", forcePopulateChildProperties = true)
protected StructuredContentType structuredContentType;
@ManyToMany(targetEntity = StructuredContentFieldImpl.class, cascade = CascadeType.ALL)
@JoinTable(name = "BLC_SC_FLD_MAP", joinColumns = @JoinColumn(name = "SC_ID", referencedColumnName = "SC_ID"), inverseJoinColumns = @JoinColumn(name = "SC_FLD_ID", referencedColumnName = "SC_FLD_ID"))
@MapKeyColumn(name = "MAP_KEY")
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@BatchSize(size = 20)
protected Map<String,StructuredContentField> structuredContentFields = new HashMap<String,StructuredContentField>();
@Column(name = "DELETED_FLAG")
@Index(name="SC_DLTD_FLG_INDX", columnNames={"DELETED_FLAG"})
@AdminPresentation(friendlyName = "StructuredContentImpl_Deleted", order = 2,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
visibility = VisibilityEnum.HIDDEN_ALL)
protected Boolean deletedFlag = false;
@Column(name = "ARCHIVED_FLAG")
@Index(name="SC_ARCHVD_FLG_INDX", columnNames={"ARCHIVED_FLAG"})
@AdminPresentation(friendlyName = "StructuredContentImpl_Archived", order = 3,
group = Presentation.Group.Name.Internal, groupOrder = Presentation.Group.Order.Internal,
visibility = VisibilityEnum.HIDDEN_ALL)
protected Boolean archivedFlag = false;
@AdminPresentation(friendlyName = "StructuredContentImpl_Offline", order = 4,
group = Presentation.Group.Name.Description, groupOrder = Presentation.Group.Order.Description)
@Column(name = "OFFLINE_FLAG")
@Index(name="SC_OFFLN_FLG_INDX", columnNames={"OFFLINE_FLAG"})
protected Boolean offlineFlag = false;
@Column (name = "LOCKED_FLAG")
@AdminPresentation(friendlyName = "StructuredContentImpl_Is_Locked",
visibility = VisibilityEnum.HIDDEN_ALL)
@Index(name="SC_LCKD_FLG_INDX", columnNames={"LOCKED_FLAG"})
protected Boolean lockedFlag = false;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getContentName() {
return contentName;
}
@Override
public void setContentName(String contentName) {
this.contentName = contentName;
}
@Override
public Locale getLocale() {
return locale;
}
@Override
public void setLocale(Locale locale) {
this.locale = locale;
}
@Override
public SandBox getSandbox() {
return sandbox;
}
@Override
public void setSandbox(SandBox sandbox) {
this.sandbox = sandbox;
}
@Override
public StructuredContentType getStructuredContentType() {
return structuredContentType;
}
@Override
public void setStructuredContentType(StructuredContentType structuredContentType) {
this.structuredContentType = structuredContentType;
}
@Override
public Map<String, StructuredContentField> getStructuredContentFields() {
return structuredContentFields;
}
@Override
public void setStructuredContentFields(Map<String, StructuredContentField> structuredContentFields) {
this.structuredContentFields = structuredContentFields;
}
@Override
public Boolean getDeletedFlag() {
if (deletedFlag == null) {
return Boolean.FALSE;
} else {
return deletedFlag;
}
}
@Override
public void setDeletedFlag(Boolean deletedFlag) {
this.deletedFlag = deletedFlag;
}
@Override
public Boolean getOfflineFlag() {
if (offlineFlag == null) {
return Boolean.FALSE;
} else {
return offlineFlag;
}
}
@Override
public void setOfflineFlag(Boolean offlineFlag) {
this.offlineFlag = offlineFlag;
}
@Override
public Integer getPriority() {
return priority;
}
@Override
public void setPriority(Integer priority) {
this.priority = priority;
}
@Override
public Long getOriginalItemId() {
return originalItemId;
}
@Override
public void setOriginalItemId(Long originalItemId) {
this.originalItemId = originalItemId;
}
@Override
public Boolean getArchivedFlag() {
if (archivedFlag == null) {
return Boolean.FALSE;
} else {
return archivedFlag;
}
}
@Override
public void setArchivedFlag(Boolean archivedFlag) {
this.archivedFlag = archivedFlag;
}
@Override
public AdminAuditable getAuditable() {
return auditable;
}
@Override
public void setAuditable(AdminAuditable auditable) {
this.auditable = auditable;
}
@Override
public Boolean getLockedFlag() {
if (lockedFlag == null) {
return Boolean.FALSE;
} else {
return lockedFlag;
}
}
@Override
public void setLockedFlag(Boolean lockedFlag) {
this.lockedFlag = lockedFlag;
}
@Override
public SandBox getOriginalSandBox() {
return originalSandBox;
}
@Override
public void setOriginalSandBox(SandBox originalSandBox) {
this.originalSandBox = originalSandBox;
}
@Override
public Map<String, StructuredContentRule> getStructuredContentMatchRules() {
return structuredContentMatchRules;
}
@Override
public void setStructuredContentMatchRules(Map<String, StructuredContentRule> structuredContentMatchRules) {
this.structuredContentMatchRules = structuredContentMatchRules;
}
@Override
public Set<StructuredContentItemCriteria> getQualifyingItemCriteria() {
return qualifyingItemCriteria;
}
@Override
public void setQualifyingItemCriteria(Set<StructuredContentItemCriteria> qualifyingItemCriteria) {
this.qualifyingItemCriteria = qualifyingItemCriteria;
}
public String getMainEntityName() {
return getContentName();
}
@Override
public StructuredContent cloneEntity() {
StructuredContentImpl newContent = new StructuredContentImpl();
newContent.archivedFlag = archivedFlag;
newContent.contentName = contentName;
newContent.deletedFlag = deletedFlag;
newContent.locale = locale;
newContent.offlineFlag = offlineFlag;
newContent.originalItemId = originalItemId;
newContent.priority = priority;
newContent.structuredContentType = structuredContentType;
Map<String, StructuredContentRule> ruleMap = newContent.getStructuredContentMatchRules();
for (String key : structuredContentMatchRules.keySet()) {
StructuredContentRule newField = structuredContentMatchRules.get(key).cloneEntity();
ruleMap.put(key, newField);
}
Set<StructuredContentItemCriteria> criteriaList = newContent.getQualifyingItemCriteria();
for (StructuredContentItemCriteria structuredContentItemCriteria : qualifyingItemCriteria) {
StructuredContentItemCriteria newField = structuredContentItemCriteria.cloneEntity();
criteriaList.add(newField);
}
Map<String, StructuredContentField> fieldMap = newContent.getStructuredContentFields();
for (StructuredContentField field : structuredContentFields.values()) {
StructuredContentField newField = field.cloneEntity();
fieldMap.put(newField.getFieldKey(), newField);
}
return newContent;
}
public static class Presentation {
public static class Tab {
public static class Name {
public static final String Rules = "StructuredContentImpl_Rules_Tab";
}
public static class Order {
public static final int Rules = 1000;
}
}
public static class Group {
public static class Name {
public static final String Description = "StructuredContentImpl_Description";
public static final String Internal = "StructuredContentImpl_Internal";
public static final String Rules = "StructuredContentImpl_Rules";
}
public static class Order {
public static final int Description = 1000;
public static final int Internal = 2000;
public static final int Rules = 1000;
}
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java
|
1,695 |
public class OHttpResponse {
public static final String JSON_FORMAT = "type,indent:-1,rid,version,attribSameRow,class,keepTypes,alwaysFetchEmbeddedDocuments";
public static final char[] URL_SEPARATOR = { '/' };
private final OutputStream out;
public final String httpVersion;
public String headers;
public String[] additionalHeaders;
public String characterSet;
public String contentType;
public String serverInfo;
public String sessionId;
public String callbackFunction;
public String contentEncoding;
public boolean sendStarted = false;
public OHttpResponse(final OutputStream iOutStream, final String iHttpVersion, final String[] iAdditionalHeaders,
final String iResponseCharSet, final String iServerInfo, final String iSessionId, final String iCallbackFunction) {
out = iOutStream;
httpVersion = iHttpVersion;
additionalHeaders = iAdditionalHeaders;
characterSet = iResponseCharSet;
serverInfo = iServerInfo;
sessionId = iSessionId;
callbackFunction = iCallbackFunction;
}
public void send(final int iCode, final String iReason, final String iContentType, final Object iContent, final String iHeaders)
throws IOException {
send(iCode, iReason, iContentType, iContent, iHeaders, true);
}
public void send(final int iCode, final String iReason, final String iContentType, final Object iContent, final String iHeaders,
final boolean iKeepAlive) throws IOException {
if (sendStarted)
// AVOID TO SEND RESPONSE TWICE
return;
sendStarted = true;
final String content;
final String contentType;
if (callbackFunction != null) {
content = callbackFunction + "(" + iContent + ")";
contentType = "text/javascript";
} else {
content = iContent != null ? iContent.toString() : null;
contentType = iContentType;
}
final boolean empty = content == null || content.length() == 0;
writeStatus(empty && iCode == 200 ? 204 : iCode, iReason);
writeHeaders(contentType, iKeepAlive);
if (iHeaders != null)
writeLine(iHeaders);
final String sessId = sessionId != null ? sessionId : "-";
writeLine("Set-Cookie: " + OHttpUtils.OSESSIONID + "=" + sessId + "; Path=/; HttpOnly");
byte[] binaryContent = null;
if (!empty) {
if (contentEncoding != null && contentEncoding.equals(OHttpUtils.CONTENT_ACCEPT_GZIP_ENCODED))
binaryContent = compress(content);
else
binaryContent = OBinaryProtocol.string2bytes(content);
}
writeLine(OHttpUtils.HEADER_CONTENT_LENGTH + (empty ? 0 : binaryContent.length));
writeLine(null);
if (binaryContent != null)
out.write(binaryContent);
out.flush();
}
public void writeStatus(final int iStatus, final String iReason) throws IOException {
writeLine(httpVersion + " " + iStatus + " " + iReason);
}
public void writeHeaders(final String iContentType) throws IOException {
writeHeaders(iContentType, true);
}
public void writeHeaders(final String iContentType, final boolean iKeepAlive) throws IOException {
if (headers != null)
writeLine(headers);
writeLine("Date: " + new Date());
writeLine("Content-Type: " + iContentType + "; charset=" + characterSet);
writeLine("Server: " + serverInfo);
writeLine("Connection: " + (iKeepAlive ? "Keep-Alive" : "close"));
// SET CONTENT ENCDOING
if (contentEncoding != null && contentEncoding.length() > 0) {
writeLine("Content-Encoding: " + contentEncoding);
}
// INCLUDE COMMON CUSTOM HEADERS
if (additionalHeaders != null)
for (String h : additionalHeaders)
writeLine(h);
}
public void writeLine(final String iContent) throws IOException {
writeContent(iContent);
out.write(OHttpUtils.EOL);
}
public void writeContent(final String iContent) throws IOException {
if (iContent != null)
out.write(OBinaryProtocol.string2bytes(iContent));
}
public void writeResult(Object iResult) throws InterruptedException, IOException {
writeResult(iResult, null);
}
@SuppressWarnings("unchecked")
public void writeResult(Object iResult, final String iFormat) throws InterruptedException, IOException {
if (iResult == null)
send(OHttpUtils.STATUS_OK_NOCONTENT_CODE, "", OHttpUtils.CONTENT_TEXT_PLAIN, null, null, true);
else {
if (iResult instanceof Map<?, ?>) {
iResult = ((Map<?, ?>) iResult).entrySet().iterator();
} else if (OMultiValue.isMultiValue(iResult)
&& (OMultiValue.getSize(iResult) > 0 && !(OMultiValue.getFirstValue(iResult) instanceof OIdentifiable))) {
final List<OIdentifiable> resultSet = new ArrayList<OIdentifiable>();
resultSet.add(new ODocument().field("value", iResult));
iResult = resultSet.iterator();
} else if (iResult instanceof OIdentifiable) {
// CONVERT SIGLE VALUE IN A COLLECTION
final List<OIdentifiable> resultSet = new ArrayList<OIdentifiable>();
resultSet.add((OIdentifiable) iResult);
iResult = resultSet.iterator();
} else if (iResult instanceof Iterable<?>)
iResult = ((Iterable<OIdentifiable>) iResult).iterator();
else if (OMultiValue.isMultiValue(iResult))
iResult = OMultiValue.getMultiValueIterator(iResult);
else {
final List<OIdentifiable> resultSet = new ArrayList<OIdentifiable>();
resultSet.add(new ODocument().field("value", iResult));
iResult = resultSet.iterator();
}
if (iResult == null)
send(OHttpUtils.STATUS_OK_NOCONTENT_CODE, "", OHttpUtils.CONTENT_TEXT_PLAIN, null, null, true);
else if (iResult instanceof Iterator<?>)
writeRecords((Iterator<OIdentifiable>) iResult, null, iFormat);
}
}
public void writeRecords(final Iterable<OIdentifiable> iRecords) throws IOException {
if (iRecords == null)
return;
writeRecords(iRecords.iterator(), null, null);
}
public void writeRecords(final Iterable<OIdentifiable> iRecords, final String iFetchPlan) throws IOException {
if (iRecords == null)
return;
writeRecords(iRecords.iterator(), iFetchPlan, null);
}
public void writeRecords(final Iterator<OIdentifiable> iRecords) throws IOException {
writeRecords(iRecords, null, null);
}
public void writeRecords(final Iterator<OIdentifiable> iRecords, final String iFetchPlan, String iFormat) throws IOException {
if (iRecords == null)
return;
if (iFormat == null)
iFormat = JSON_FORMAT;
else
iFormat = JSON_FORMAT + "," + iFormat;
final StringWriter buffer = new StringWriter();
final OJSONWriter json = new OJSONWriter(buffer, iFormat);
json.beginObject();
final String format = iFetchPlan != null ? iFormat + ",fetchPlan:" + iFetchPlan : iFormat;
// WRITE RECORDS
json.beginCollection(-1, true, "result");
formatMultiValue(iRecords, buffer, format);
json.endCollection(-1, true);
json.endObject();
send(OHttpUtils.STATUS_OK_CODE, "OK", OHttpUtils.CONTENT_JSON, buffer.toString(), null);
}
public void formatMultiValue(final Iterator<?> iIterator, final StringWriter buffer, final String format) throws IOException {
if (iIterator != null) {
int counter = 0;
String objectJson;
while (iIterator.hasNext()) {
final Object entry = iIterator.next();
if (entry != null) {
if (counter++ > 0)
buffer.append(", ");
if (entry instanceof OIdentifiable) {
ORecord<?> rec = ((OIdentifiable) entry).getRecord();
try {
objectJson = rec.getRecord().toJSON(format);
buffer.append(objectJson);
} catch (Exception e) {
OLogManager.instance().error(this, "Error transforming record " + rec.getIdentity() + " to JSON", e);
}
} else if (OMultiValue.isMultiValue(entry))
formatMultiValue(OMultiValue.getMultiValueIterator(entry), buffer, format);
else
buffer.append(OJSONWriter.writeValue(entry, format));
}
}
}
}
public void writeRecord(final ORecord<?> iRecord) throws IOException {
writeRecord(iRecord, null, null);
}
public void writeRecord(final ORecord<?> iRecord, final String iFetchPlan, String iFormat) throws IOException {
if (iFormat == null)
iFormat = JSON_FORMAT;
final String format = iFetchPlan != null ? iFormat + ",fetchPlan:" + iFetchPlan : iFormat;
if (iRecord != null)
send(OHttpUtils.STATUS_OK_CODE, "OK", OHttpUtils.CONTENT_JSON, iRecord.toJSON(format), null);
}
public void sendStream(final int iCode, final String iReason, final String iContentType, InputStream iContent, long iSize)
throws IOException {
sendStream(iCode, iReason, iContentType, iContent, iSize, null);
}
public void sendStream(final int iCode, final String iReason, final String iContentType, InputStream iContent, long iSize,
final String iFileName) throws IOException {
writeStatus(iCode, iReason);
writeHeaders(iContentType);
writeLine("Content-Transfer-Encoding: binary");
if (iFileName != null)
writeLine("Content-Disposition: attachment; filename=\"" + iFileName + "\"");
if (iSize < 0) {
// SIZE UNKNOWN: USE A MEMORY BUFFER
final ByteArrayOutputStream o = new ByteArrayOutputStream();
if (iContent != null) {
int b;
while ((b = iContent.read()) > -1)
o.write(b);
}
byte[] content = o.toByteArray();
iContent = new ByteArrayInputStream(content);
iSize = content.length;
}
writeLine(OHttpUtils.HEADER_CONTENT_LENGTH + (iSize));
writeLine(null);
if (iContent != null) {
int b;
while ((b = iContent.read()) > -1)
out.write(b);
}
out.flush();
}
// Compress content string
public byte[] compress(String jsonStr) {
if (jsonStr == null || jsonStr.length() == 0)
return null;
GZIPOutputStream gout = null;
ByteArrayOutputStream baos = null;
try {
byte[] incoming = jsonStr.getBytes("UTF-8");
baos = new ByteArrayOutputStream();
gout = new GZIPOutputStream(baos, 16384); // 16KB
gout.write(incoming);
gout.finish();
return baos.toByteArray();
} catch (Exception ex) {
ex.printStackTrace();
} finally {
try {
if (gout != null)
gout.close();
if (baos != null)
baos.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}
return null;
}
/**
* Stores additional headers to send
*
* @param iHeader
*/
public void setHeader(final String iHeader) {
headers = iHeader;
}
public OutputStream getOutputStream() {
return out;
}
public void flush() throws IOException {
out.flush();
}
public String getContentType() {
return contentType;
}
public void setContentType(String contentType) {
this.contentType = contentType;
}
public String getContentEncoding() {
return contentEncoding;
}
public void setContentEncoding(String contentEncoding) {
this.contentEncoding = contentEncoding;
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_network_protocol_http_OHttpResponse.java
|
15 |
{
OutgoingMessageHolder temporaryOutgoing = new OutgoingMessageHolder();
@Override
public void run()
{
lock.writeLock().lock();
try
{
// Lock timeouts while we are processing the message
synchronized ( timeouts )
{
StateMachine stateMachine = stateMachines.get( message.getMessageType().getClass() );
if ( stateMachine == null )
{
return; // No StateMachine registered for this MessageType type - Ignore this
}
stateMachine.handle( message, temporaryOutgoing );
Message<? extends MessageType> tempMessage;
while ((tempMessage = temporaryOutgoing.nextOutgoingMessage()) != null)
{
outgoing.offer( tempMessage );
}
// Process and send messages
// Allow state machines to send messages to each other as well in this loop
Message<? extends MessageType> outgoingMessage;
List<Message<? extends MessageType>> toSend = new LinkedList<Message<? extends MessageType>>();
try
{
while ( ( outgoingMessage = outgoing.nextOutgoingMessage() ) != null )
{
message.copyHeadersTo( outgoingMessage, CONVERSATION_ID, CREATED_BY );
for ( MessageProcessor outgoingProcessor : outgoingProcessors )
{
try
{
if ( !outgoingProcessor.process( outgoingMessage ) )
{
break;
}
}
catch ( Throwable e )
{
logger.warn( "Outgoing message processor threw exception", e );
}
}
if ( outgoingMessage.hasHeader( Message.TO ) )
{
outgoingMessage.setHeader( Message.INSTANCE_ID, instanceIdHeaderValue );
toSend.add( outgoingMessage );
}
else
{
// Deliver internally if possible
StateMachine internalStatemachine = stateMachines.get( outgoingMessage.getMessageType()
.getClass() );
if ( internalStatemachine != null )
{
internalStatemachine.handle( (Message) outgoingMessage, temporaryOutgoing );
while ((tempMessage = temporaryOutgoing.nextOutgoingMessage()) != null)
{
outgoing.offer( tempMessage );
}
}
}
}
if ( !toSend.isEmpty() ) // the check is necessary, sender may not have started yet
{
sender.process( toSend );
}
}
catch ( Exception e )
{
logger.warn( "Error processing message " + message, e );
}
}
}
finally
{
lock.writeLock().unlock();
}
// Before returning, process delayed executions so that they are done before returning
// This will effectively trigger all notifications created by contexts
executor.drain();
}
} );
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_StateMachines.java
|
236 |
highlighter = new XPostingsHighlighter() {
Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator();
Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator();
@Override
protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
return new String[][]{new String[]{valuesIterator.next()}};
}
@Override
protected int getOffsetForCurrentValue(String field, int docId) {
return offsetsIterator.next();
}
@Override
protected BreakIterator getBreakIterator(String field) {
return new WholeBreakIterator();
}
@Override
protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) {
return new Passage[0];
}
};
| 0true
|
src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java
|
108 |
private class Worker extends OtherThreadExecutor<State>
{
public Worker()
{
super( "other thread", new State( getGraphDb() ) );
}
void beginTx() throws Exception
{
execute( new WorkerCommand<State, Void>()
{
@Override
public Void doWork( State state )
{
state.tx = state.graphDb.beginTx();
return null;
}
} );
}
void finishTx() throws Exception
{
execute( new WorkerCommand<State, Void>()
{
@Override
public Void doWork( State state )
{
state.tx.success();
state.tx.finish();
return null;
}
} );
}
void setProperty( final Node node, final String key, final Object value ) throws Exception
{
execute( new WorkerCommand<State, Object>()
{
@Override
public Object doWork( State state )
{
node.setProperty( key, value );
return null;
}
}, 200, MILLISECONDS );
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestManualAcquireLock.java
|
272 |
public class ElasticsearchTimeoutException extends ElasticsearchException {
public ElasticsearchTimeoutException(String message) {
super(message);
}
public ElasticsearchTimeoutException(String message, Throwable cause) {
super(message, cause);
}
}
| 0true
|
src_main_java_org_elasticsearch_ElasticsearchTimeoutException.java
|
107 |
public static class Tab {
public static class Name {
public static final String Rules = "PageImpl_Rules_Tab";
}
public static class Order {
public static final int Rules = 1000;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageImpl.java
|
6,371 |
public class ConnectTransportException extends ActionTransportException {
private final DiscoveryNode node;
public ConnectTransportException(DiscoveryNode node, String msg) {
this(node, msg, null, null);
}
public ConnectTransportException(DiscoveryNode node, String msg, String action) {
this(node, msg, action, null);
}
public ConnectTransportException(DiscoveryNode node, String msg, Throwable cause) {
this(node, msg, null, cause);
}
public ConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) {
super(node.name(), node.address(), action, msg, cause);
this.node = node;
}
public DiscoveryNode node() {
return node;
}
}
| 1no label
|
src_main_java_org_elasticsearch_transport_ConnectTransportException.java
|
1,176 |
public class OQueryOperatorContainsText extends OQueryTargetOperator {
private boolean ignoreCase = true;
public OQueryOperatorContainsText(final boolean iIgnoreCase) {
super("CONTAINSTEXT", 5, false);
ignoreCase = iIgnoreCase;
}
public OQueryOperatorContainsText() {
super("CONTAINSTEXT", 5, false);
}
@Override
public String getSyntax() {
return "<left> CONTAINSTEXT[( noignorecase ] )] <right>";
}
/**
* This is executed on non-indexed fields.
*/
@Override
public Object evaluateRecord(final OIdentifiable iRecord, ODocument iCurrentResult, final OSQLFilterCondition iCondition,
final Object iLeft, final Object iRight, OCommandContext iContext) {
if (iLeft == null || iRight == null)
return false;
return iLeft.toString().indexOf(iRight.toString()) > -1;
}
@SuppressWarnings({ "unchecked", "deprecation" })
@Override
public Collection<OIdentifiable> filterRecords(final ODatabaseComplex<?> iDatabase, final List<String> iTargetClasses,
final OSQLFilterCondition iCondition, final Object iLeft, final Object iRight) {
final String fieldName;
if (iCondition.getLeft() instanceof OSQLFilterItemField)
fieldName = iCondition.getLeft().toString();
else
fieldName = iCondition.getRight().toString();
final String fieldValue;
if (iCondition.getLeft() instanceof OSQLFilterItemField)
fieldValue = iCondition.getRight().toString();
else
fieldValue = iCondition.getLeft().toString();
final String className = iTargetClasses.get(0);
final OProperty prop = iDatabase.getMetadata().getSchema().getClass(className).getProperty(fieldName);
if (prop == null)
// NO PROPERTY DEFINED
return null;
OIndex<?> fullTextIndex = null;
for (final OIndex<?> indexDefinition : prop.getIndexes()) {
if (indexDefinition instanceof OIndexFullText) {
fullTextIndex = indexDefinition;
break;
}
}
if (fullTextIndex == null) {
return null;
}
return (Collection<OIdentifiable>) fullTextIndex.get(fieldValue);
}
public boolean isIgnoreCase() {
return ignoreCase;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
return OIndexReuseType.INDEX_METHOD;
}
@Override
public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType,
List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) {
final OIndexDefinition indexDefinition = index.getDefinition();
if (indexDefinition.getParamCount() > 1)
return null;
final OIndex<?> internalIndex = index.getInternal();
final Object result;
if (internalIndex instanceof OIndexFullText) {
final Object indexResult = index.get(indexDefinition.createValue(keyParams));
if (indexResult instanceof Collection)
result = indexResult;
else if (indexResult == null)
result = Collections.emptyList();
else
result = Collections.singletonList((OIdentifiable) indexResult);
} else
return null;
updateProfiler(iContext, internalIndex, keyParams, indexDefinition);
if (iOperationType == INDEX_OPERATION_TYPE.COUNT)
return ((Collection<?>) result).size();
return result;
}
@Override
public ORID getBeginRidRange(Object iLeft, Object iRight) {
return null;
}
@Override
public ORID getEndRidRange(Object iLeft, Object iRight) {
return null;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorContainsText.java
|
35 |
public class TitanEdgeTestSuite extends EdgeTestSuite {
public TitanEdgeTestSuite(final GraphTest graphTest) {
super(graphTest);
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_blueprints_TitanEdgeTestSuite.java
|
388 |
public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
Settings transientSettings;
Settings persistentSettings;
ClusterUpdateSettingsResponse() {
this.persistentSettings = ImmutableSettings.EMPTY;
this.transientSettings = ImmutableSettings.EMPTY;
}
ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) {
super(acknowledged);
this.persistentSettings = persistentSettings;
this.transientSettings = transientSettings;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
transientSettings = ImmutableSettings.readSettingsFromStream(in);
persistentSettings = ImmutableSettings.readSettingsFromStream(in);
readAcknowledged(in);
}
public Settings getTransientSettings() {
return transientSettings;
}
public Settings getPersistentSettings() {
return persistentSettings;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
ImmutableSettings.writeSettingsToStream(transientSettings, out);
ImmutableSettings.writeSettingsToStream(persistentSettings, out);
writeAcknowledged(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_settings_ClusterUpdateSettingsResponse.java
|
6,117 |
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
try {
shardSnapshotService.snapshot(entry.getKey(), shardEntry.getValue());
updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.SUCCESS)));
} catch (Throwable t) {
logger.warn("[{}] [{}] failed to create snapshot", t, shardEntry.getKey(), entry.getKey());
updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.FAILED, ExceptionsHelper.detailedMessage(t))));
}
}
});
| 1no label
|
src_main_java_org_elasticsearch_snapshots_SnapshotsService.java
|
5,386 |
public class InternalSum extends MetricsAggregation.SingleValue implements Sum {
public final static Type TYPE = new Type("sum");
public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalSum readResult(StreamInput in) throws IOException {
InternalSum result = new InternalSum();
result.readFrom(in);
return result;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private double sum;
InternalSum() {} // for serialization
InternalSum(String name, double sum) {
super(name);
this.sum = sum;
}
@Override
public double value() {
return sum;
}
public double getValue() {
return sum;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalSum reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return (InternalSum) aggregations.get(0);
}
InternalSum reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalSum) aggregation;
} else {
reduced.sum += ((InternalSum) aggregation).sum;
}
}
if (reduced != null) {
return reduced;
}
return (InternalSum) aggregations.get(0);
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
valueFormatter = ValueFormatterStreams.readOptional(in);
sum = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(valueFormatter, out);
out.writeDouble(sum);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(CommonFields.VALUE, sum);
if (valueFormatter != null) {
builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(sum));
}
builder.endObject();
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_sum_InternalSum.java
|
261 |
@Entity
@Table(name = "BLC_EMAIL_TRACKING_OPENS")
public class EmailTrackingOpensImpl implements EmailTrackingOpens {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "OpenId")
@GenericGenerator(
name="OpenId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="EmailTrackingOpensImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.common.email.domain.EmailTrackingOpensImpl")
}
)
@Column(name = "OPEN_ID")
protected Long id;
@Column(name = "DATE_OPENED")
protected Date dateOpened;
@Column(name = "USER_AGENT")
protected String userAgent;
@ManyToOne(targetEntity = EmailTrackingImpl.class)
@JoinColumn(name = "EMAIL_TRACKING_ID")
@Index(name="TRACKINGOPEN_TRACKING", columnNames={"EMAIL_TRACKING_ID"})
protected EmailTracking emailTracking;
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#getDateOpened()
*/
@Override
public Date getDateOpened() {
return dateOpened;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#setDateOpened(java.util.Date)
*/
@Override
public void setDateOpened(Date dateOpened) {
this.dateOpened = dateOpened;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#getUserAgent()
*/
@Override
public String getUserAgent() {
return userAgent;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#setUserAgent(java.lang.String)
*/
@Override
public void setUserAgent(String userAgent) {
if (userAgent.length() > 255) {
userAgent = userAgent.substring(0,254);
}
this.userAgent = userAgent;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#getEmailTracking()
*/
@Override
public EmailTracking getEmailTracking() {
return emailTracking;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.common.email.domain.EmailTrackingOpens#setEmailTracking(org.broadleafcommerce.common.email.domain.EmailTrackingImpl)
*/
@Override
public void setEmailTracking(EmailTracking emailTracking) {
this.emailTracking = emailTracking;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((dateOpened == null) ? 0 : dateOpened.hashCode());
result = prime * result + ((emailTracking == null) ? 0 : emailTracking.hashCode());
result = prime * result + ((userAgent == null) ? 0 : userAgent.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmailTrackingOpensImpl other = (EmailTrackingOpensImpl) obj;
if (id == null && other.id != null) {
return id.equals(other.id);
}
if (dateOpened == null) {
if (other.dateOpened != null)
return false;
} else if (!dateOpened.equals(other.dateOpened))
return false;
if (emailTracking == null) {
if (other.emailTracking != null)
return false;
} else if (!emailTracking.equals(other.emailTracking))
return false;
if (userAgent == null) {
if (other.userAgent != null)
return false;
} else if (!userAgent.equals(other.userAgent))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_email_domain_EmailTrackingOpensImpl.java
|
1,576 |
public abstract class ODistributedAbstractPlugin extends OServerPluginAbstract implements ODistributedServerManager,
ODatabaseLifecycleListener {
public static final String REPLICATOR_USER = "replicator";
protected static final String MASTER_AUTO = "$auto";
protected static final String PAR_DEF_DISTRIB_DB_CONFIG = "configuration.db.default";
protected static final String FILE_DISTRIBUTED_DB_CONFIG = "distributed-config.json";
protected OServer serverInstance;
protected Map<String, ODocument> cachedDatabaseConfiguration = new HashMap<String, ODocument>();
protected boolean enabled = true;
protected String nodeName = null;
protected Class<? extends OReplicationConflictResolver> confictResolverClass;
protected File defaultDatabaseConfigFile;
protected Map<String, ODistributedPartitioningStrategy> strategies = new HashMap<String, ODistributedPartitioningStrategy>();
@SuppressWarnings("unchecked")
@Override
public void config(OServer oServer, OServerParameterConfiguration[] iParams) {
serverInstance = oServer;
oServer.setVariable("ODistributedAbstractPlugin", this);
for (OServerParameterConfiguration param : iParams) {
if (param.name.equalsIgnoreCase("enabled")) {
if (!Boolean.parseBoolean(OSystemVariableResolver.resolveSystemVariables(param.value))) {
// DISABLE IT
enabled = false;
return;
}
} else if (param.name.equalsIgnoreCase("nodeName"))
nodeName = param.value;
else if (param.name.startsWith(PAR_DEF_DISTRIB_DB_CONFIG)) {
defaultDatabaseConfigFile = new File(OSystemVariableResolver.resolveSystemVariables(param.value));
if (!defaultDatabaseConfigFile.exists())
throw new OConfigurationException("Cannot find distributed database config file: " + defaultDatabaseConfigFile);
} else if (param.name.equalsIgnoreCase("conflict.resolver.impl"))
try {
confictResolverClass = (Class<? extends OReplicationConflictResolver>) Class.forName(param.value);
} catch (ClassNotFoundException e) {
OLogManager.instance().error(this, "Cannot find the conflict resolver implementation '%s'", e, param.value);
}
else if (param.name.startsWith("sharding.strategy.")) {
try {
strategies.put(param.name.substring("sharding.strategy.".length()),
(ODistributedPartitioningStrategy) Class.forName(param.value).newInstance());
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot create sharding strategy instance '%s'", e, param.value);
e.printStackTrace();
}
}
}
if (serverInstance.getUser(REPLICATOR_USER) == null)
// CREATE THE REPLICATOR USER
try {
serverInstance.addUser(REPLICATOR_USER, null, "database.passthrough");
serverInstance.saveConfiguration();
} catch (IOException e) {
throw new OConfigurationException("Error on creating 'replicator' user", e);
}
}
@Override
public void startup() {
if (!enabled)
return;
Orient.instance().addDbLifecycleListener(this);
}
@Override
public void shutdown() {
if (!enabled)
return;
Orient.instance().removeDbLifecycleListener(this);
}
/**
* Auto register myself as hook.
*/
@Override
public void onOpen(final ODatabase iDatabase) {
final String dbDirectory = serverInstance.getDatabaseDirectory();
if (!iDatabase.getURL().substring(iDatabase.getURL().indexOf(":") + 1).startsWith(dbDirectory))
// NOT OWN DB, SKIPT IT
return;
synchronized (cachedDatabaseConfiguration) {
final ODistributedConfiguration cfg = getDatabaseConfiguration(iDatabase.getName());
if (cfg == null)
return;
if (cfg.isReplicationActive(null)) {
if (iDatabase instanceof ODatabaseComplex<?> && !(iDatabase.getStorage() instanceof ODistributedStorage))
((ODatabaseComplex<?>) iDatabase).replaceStorage(new ODistributedStorage(serverInstance,
(OStorageEmbedded) ((ODatabaseComplex<?>) iDatabase).getStorage()));
}
}
}
/**
* Remove myself as hook.
*/
@Override
public void onClose(final ODatabase iDatabase) {
}
@Override
public void sendShutdown() {
super.sendShutdown();
}
@Override
public String getName() {
return "cluster";
}
public String getLocalNodeId() {
return nodeName;
}
public ODistributedPartitioningStrategy getReplicationStrategy(String iStrategy) {
if (iStrategy.startsWith("$"))
iStrategy = iStrategy.substring(1);
final ODistributedPartitioningStrategy strategy = strategies.get(iStrategy);
if (strategy == null)
throw new ODistributedException("Configured strategy '" + iStrategy + "' is not configured");
return strategy;
}
public ODistributedPartitioningStrategy getPartitioningStrategy(final String iStrategyName) {
return strategies.get(iStrategyName);
}
protected ODocument loadDatabaseConfiguration(final String iDatabaseName, final File file) {
if (!file.exists() || file.length() == 0)
return null;
ODistributedServerLog.info(this, getLocalNodeName(), null, DIRECTION.NONE, "loaded database configuration from disk: %s", file);
FileInputStream f = null;
try {
f = new FileInputStream(file);
final byte[] buffer = new byte[(int) file.length()];
f.read(buffer);
final ODocument doc = (ODocument) new ODocument().fromJSON(new String(buffer), "noMap");
updateCachedDatabaseConfiguration(iDatabaseName, doc);
return doc;
} catch (Exception e) {
} finally {
if (f != null)
try {
f.close();
} catch (IOException e) {
}
}
return null;
}
public void updateCachedDatabaseConfiguration(final String iDatabaseName, final ODocument cfg) {
synchronized (cachedDatabaseConfiguration) {
cachedDatabaseConfiguration.put(iDatabaseName, cfg);
OLogManager.instance().info(this, "updated distributed configuration for database: %s:\n----------\n%s\n----------",
iDatabaseName, cfg.toJSON("prettyPrint"));
}
}
public ODistributedConfiguration getDatabaseConfiguration(final String iDatabaseName) {
synchronized (cachedDatabaseConfiguration) {
ODocument cfg = cachedDatabaseConfiguration.get(iDatabaseName);
if (cfg == null) {
cfg = cachedDatabaseConfiguration.get("*");
if (cfg == null) {
cfg = loadDatabaseConfiguration(iDatabaseName, defaultDatabaseConfigFile);
if (cfg == null)
throw new OConfigurationException("Cannot load default distributed database config file: " + defaultDatabaseConfigFile);
}
}
return new ODistributedConfiguration(cfg);
}
}
protected void saveDatabaseConfiguration(final String iDatabaseName, final ODocument cfg) {
synchronized (cachedDatabaseConfiguration) {
final ODocument oldCfg = cachedDatabaseConfiguration.get(iDatabaseName);
if (oldCfg != null && Arrays.equals(oldCfg.toStream(), cfg.toStream()))
// NO CHANGE, SKIP IT
return;
}
// INCREMENT VERSION
Integer oldVersion = cfg.field("version");
if (oldVersion == null)
oldVersion = 0;
cfg.field("version", oldVersion.intValue() + 1);
updateCachedDatabaseConfiguration(iDatabaseName, cfg);
FileOutputStream f = null;
try {
File file = getDistributedConfigFile(iDatabaseName);
OLogManager.instance().config(this, "Saving distributed configuration file for database '%s' in: %s", iDatabaseName, file);
f = new FileOutputStream(file);
f.write(cfg.toJSON().getBytes());
} catch (Exception e) {
OLogManager.instance().error(this, "Error on saving distributed configuration file", e);
} finally {
if (f != null)
try {
f.close();
} catch (IOException e) {
}
}
}
public File getDistributedConfigFile(final String iDatabaseName) {
return new File(serverInstance.getDatabaseDirectory() + iDatabaseName + "/" + FILE_DISTRIBUTED_DB_CONFIG);
}
public OServer getServerInstance() {
return serverInstance;
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_distributed_ODistributedAbstractPlugin.java
|
1,502 |
@SuppressWarnings("SynchronizationOnStaticField")
@PrivateApi
public final class HazelcastInstanceFactory {
private static final ConcurrentMap<String, InstanceFuture> INSTANCE_MAP
= new ConcurrentHashMap<String, InstanceFuture>(5);
private static final AtomicInteger FACTORY_ID_GEN = new AtomicInteger();
private HazelcastInstanceFactory() {
}
public static Set<HazelcastInstance> getAllHazelcastInstances() {
Set<HazelcastInstance> result = new HashSet<HazelcastInstance>();
for (InstanceFuture f : INSTANCE_MAP.values()) {
result.add(f.get());
}
return result;
}
public static HazelcastInstance getHazelcastInstance(String instanceName) {
InstanceFuture instanceFuture = INSTANCE_MAP.get(instanceName);
if (instanceFuture == null) {
return null;
}
try {
return instanceFuture.get();
} catch (IllegalStateException t) {
return null;
}
}
public static HazelcastInstance getOrCreateHazelcastInstance(Config config) {
if (config == null) {
throw new NullPointerException("config can't be null");
}
String name = config.getInstanceName();
hasText(name, "instanceName");
InstanceFuture future = INSTANCE_MAP.get(name);
if (future != null) {
return future.get();
}
future = new InstanceFuture();
InstanceFuture found = INSTANCE_MAP.putIfAbsent(name, future);
if (found != null) {
return found.get();
}
try {
HazelcastInstanceProxy hz = constructHazelcastInstance(config, name, new DefaultNodeContext());
future.set(hz);
return hz;
} catch (Throwable t) {
INSTANCE_MAP.remove(name, future);
future.setFailure(t);
throw ExceptionUtil.rethrow(t);
}
}
public static HazelcastInstance newHazelcastInstance(Config config) {
if (config == null) {
config = new XmlConfigBuilder().build();
}
return newHazelcastInstance(config, config.getInstanceName(), new DefaultNodeContext());
}
private static String createInstanceName(Config config) {
return "_hzInstance_" + FACTORY_ID_GEN.incrementAndGet() + "_" + config.getGroupConfig().getName();
}
public static HazelcastInstance newHazelcastInstance(Config config, String instanceName,
NodeContext nodeContext) {
if (config == null) {
config = new XmlConfigBuilder().build();
}
String name = instanceName;
if (name == null || name.trim().length() == 0) {
name = createInstanceName(config);
}
InstanceFuture future = new InstanceFuture();
if (INSTANCE_MAP.putIfAbsent(name, future) != null) {
throw new DuplicateInstanceNameException("HazelcastInstance with name '" + name + "' already exists!");
}
try {
HazelcastInstanceProxy hz = constructHazelcastInstance(config, name, nodeContext);
future.set(hz);
return hz;
} catch (Throwable t) {
INSTANCE_MAP.remove(name, future);
future.setFailure(t);
throw ExceptionUtil.rethrow(t);
}
}
private static HazelcastInstanceProxy constructHazelcastInstance(Config config, String instanceName,
NodeContext nodeContext) {
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
HazelcastInstanceProxy proxy;
try {
if (classLoader == null) {
Thread.currentThread().setContextClassLoader(HazelcastInstanceFactory.class.getClassLoader());
}
HazelcastInstanceImpl hazelcastInstance = new HazelcastInstanceImpl(instanceName, config, nodeContext);
OutOfMemoryErrorDispatcher.register(hazelcastInstance);
proxy = new HazelcastInstanceProxy(hazelcastInstance);
final Node node = hazelcastInstance.node;
final boolean firstMember = isFirstMember(node);
final int initialWaitSeconds = node.groupProperties.INITIAL_WAIT_SECONDS.getInteger();
if (initialWaitSeconds > 0) {
hazelcastInstance.logger.info("Waiting "
+ initialWaitSeconds + " seconds before completing HazelcastInstance startup...");
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(initialWaitSeconds));
if (firstMember) {
node.partitionService.firstArrangement();
} else {
Thread.sleep(TimeUnit.SECONDS.toMillis(4));
}
} catch (InterruptedException ignored) {
}
}
awaitMinimalClusterSize(hazelcastInstance, node, firstMember);
hazelcastInstance.lifecycleService.fireLifecycleEvent(STARTED);
} catch (Throwable t) {
throw ExceptionUtil.rethrow(t);
} finally {
Thread.currentThread().setContextClassLoader(classLoader);
}
return proxy;
}
private static boolean isFirstMember(Node node) {
final Iterator<Member> iter = node.getClusterService().getMembers().iterator();
return (iter.hasNext() && iter.next().localMember());
}
private static void awaitMinimalClusterSize(HazelcastInstanceImpl hazelcastInstance, Node node, boolean firstMember)
throws InterruptedException {
final int initialMinClusterSize = node.groupProperties.INITIAL_MIN_CLUSTER_SIZE.getInteger();
while (node.getClusterService().getSize() < initialMinClusterSize) {
try {
hazelcastInstance.logger.info("HazelcastInstance waiting for cluster size of " + initialMinClusterSize);
//noinspection BusyWait
Thread.sleep(TimeUnit.SECONDS.toMillis(1));
} catch (InterruptedException ignored) {
}
}
if (initialMinClusterSize > 1) {
if (firstMember) {
node.partitionService.firstArrangement();
} else {
Thread.sleep(TimeUnit.SECONDS.toMillis(3));
}
hazelcastInstance.logger.info("HazelcastInstance starting after waiting for cluster size of "
+ initialMinClusterSize);
}
}
public static void shutdownAll() {
final List<HazelcastInstanceProxy> instances = new LinkedList<HazelcastInstanceProxy>();
for (InstanceFuture f : INSTANCE_MAP.values()) {
try {
HazelcastInstanceProxy instanceProxy = f.get();
instances.add(instanceProxy);
} catch (RuntimeException ignore) {
}
}
INSTANCE_MAP.clear();
OutOfMemoryErrorDispatcher.clear();
ManagementService.shutdownAll();
Collections.sort(instances, new Comparator<HazelcastInstanceProxy>() {
public int compare(HazelcastInstanceProxy o1, HazelcastInstanceProxy o2) {
return o1.getName().compareTo(o2.getName());
}
});
for (HazelcastInstanceProxy proxy : instances) {
proxy.getLifecycleService().shutdown();
proxy.original = null;
}
}
static Map<MemberImpl, HazelcastInstanceImpl> getInstanceImplMap() {
final Map<MemberImpl, HazelcastInstanceImpl> map = new HashMap<MemberImpl, HazelcastInstanceImpl>();
for (InstanceFuture f : INSTANCE_MAP.values()) {
try {
HazelcastInstanceProxy instanceProxy = f.get();
final HazelcastInstanceImpl impl = instanceProxy.original;
if (impl != null) {
map.put(impl.node.getLocalMember(), impl);
}
} catch (RuntimeException ignore) {
}
}
return map;
}
static void remove(HazelcastInstanceImpl instance) {
OutOfMemoryErrorDispatcher.deregister(instance);
InstanceFuture future = INSTANCE_MAP.remove(instance.getName());
if (future != null) {
future.get().original = null;
}
if (INSTANCE_MAP.size() == 0) {
ManagementService.shutdownAll();
}
}
private static class InstanceFuture {
private volatile HazelcastInstanceProxy hz;
private volatile Throwable throwable;
HazelcastInstanceProxy get() {
if (hz != null) {
return hz;
}
boolean restoreInterrupt = false;
synchronized (this) {
while (hz == null && throwable == null) {
try {
wait();
} catch (InterruptedException ignore) {
restoreInterrupt = true;
}
}
}
if (restoreInterrupt) {
Thread.currentThread().interrupt();
}
if (hz != null) {
return hz;
}
throw new IllegalStateException(throwable);
}
void set(HazelcastInstanceProxy proxy) {
synchronized (this) {
this.hz = proxy;
notifyAll();
}
}
public void setFailure(Throwable throwable) {
synchronized (this) {
this.throwable = throwable;
notifyAll();
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_instance_HazelcastInstanceFactory.java
|
3,362 |
final class BasicInvocationFuture<E> implements InternalCompletableFuture<E> {
private BasicInvocation basicInvocation;
private volatile ExecutionCallbackNode<E> callbackHead;
private volatile Object response;
volatile boolean interrupted;
BasicInvocationFuture(BasicInvocation basicInvocation, final Callback<E> callback) {
this.basicInvocation = basicInvocation;
if (callback != null) {
ExecutorCallbackAdapter<E> adapter = new ExecutorCallbackAdapter<E>(callback);
callbackHead = new ExecutionCallbackNode<E>(adapter, basicInvocation.getAsyncExecutor(), null);
}
}
static long decrementTimeout(long timeout, long diff) {
if (timeout != Long.MAX_VALUE) {
timeout -= diff;
}
return timeout;
}
@Override
public void andThen(ExecutionCallback<E> callback, Executor executor) {
isNotNull(callback, "callback");
isNotNull(executor, "executor");
synchronized (this) {
if (response != null) {
runAsynchronous(callback, executor);
return;
}
this.callbackHead = new ExecutionCallbackNode<E>(callback, executor, callbackHead);
}
}
@Override
public void andThen(ExecutionCallback<E> callback) {
andThen(callback, basicInvocation.getAsyncExecutor());
}
private void runAsynchronous(final ExecutionCallback<E> callback, Executor executor) {
try {
executor.execute(new Runnable() {
@Override
public void run() {
try {
Object resp = resolveResponse(response);
if (resp == null || !(resp instanceof Throwable)) {
callback.onResponse((E) resp);
} else {
callback.onFailure((Throwable) resp);
}
} catch (Throwable t) {
//todo: improved error message
basicInvocation.logger.severe("Failed to async for " + basicInvocation, t);
}
}
});
} catch (RejectedExecutionException ignore) {
basicInvocation.logger.finest(ignore);
}
}
/**
* Can be called multiple times, but only the first answer will lead to the future getting triggered. All subsequent
* 'set' calls are ignored.
*
* @param response
*/
public void set(Object response) {
if (response == null) {
throw new IllegalArgumentException("response can't be null");
}
if (response instanceof NormalResponse) {
response = ((NormalResponse) response).getValue();
}
if (response == null) {
response = BasicInvocation.NULL_RESPONSE;
}
ExecutionCallbackNode<E> callbackChain;
synchronized (this) {
if (this.response != null && !(this.response instanceof BasicInvocation.InternalResponse)) {
//it can be that this invocation future already received an answer, e.g. when a an invocation
//already received a response, but before it cleans up itself, it receives a
//HazelcastInstanceNotActiveException.
basicInvocation.logger.info("The InvocationFuture.set method of " + basicInvocation + " can only be called once");
return;
}
this.response = response;
if (response == BasicInvocation.WAIT_RESPONSE) {
return;
}
callbackChain = callbackHead;
callbackHead = null;
this.notifyAll();
}
BasicOperationService operationService = (BasicOperationService) basicInvocation.nodeEngine.operationService;
operationService.deregisterInvocation(basicInvocation.op.getCallId());
while (callbackChain != null) {
runAsynchronous(callbackChain.callback, callbackChain.executor);
callbackChain = callbackChain.next;
}
}
@Override
public E get() throws InterruptedException, ExecutionException {
try {
return get(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
basicInvocation.logger.severe("Unexpected timeout while processing " + this, e);
return null;
}
}
@Override
public E getSafely() {
try {
//todo:
//this method is quite inefficient when there is unchecked exception, because it will be wrapped
//in a ExecutionException, and then it is unwrapped again.
return get();
} catch (Throwable throwable) {
throw ExceptionUtil.rethrow(throwable);
}
}
@Override
public E get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
Object unresolvedResponse = waitForResponse(timeout, unit);
return (E) resolveResponseOrThrowException(unresolvedResponse);
}
private Object waitForResponse(long time, TimeUnit unit) {
if (response != null && response != BasicInvocation.WAIT_RESPONSE) {
return response;
}
long timeoutMs = unit.toMillis(time);
if (timeoutMs < 0) timeoutMs = 0;
final long maxCallTimeout = basicInvocation.callTimeout * 2 > 0 ? basicInvocation.callTimeout * 2 : Long.MAX_VALUE;
final boolean longPolling = timeoutMs > maxCallTimeout;
int pollCount = 0;
while (timeoutMs >= 0) {
final long pollTimeoutMs = Math.min(maxCallTimeout, timeoutMs);
final long startMs = Clock.currentTimeMillis();
long lastPollTime = 0;
pollCount++;
try {
//we should only wait if there is any timeout. We can't call wait with 0, because it is interpreted as infinite.
if (pollTimeoutMs > 0) {
synchronized (this) {
if (response == null || response == BasicInvocation.WAIT_RESPONSE) {
this.wait(pollTimeoutMs);
}
}
}
lastPollTime = Clock.currentTimeMillis() - startMs;
timeoutMs = decrementTimeout(timeoutMs, lastPollTime);
if (response != null) {
if (response == BasicInvocation.WAIT_RESPONSE) {
continue;
}
//if the thread is interrupted, but the response was not an interrupted-response,
//we need to restore the interrupt flag.
if (response != BasicInvocation.INTERRUPTED_RESPONSE && interrupted) {
Thread.currentThread().interrupt();
}
return response;
}
} catch (InterruptedException e) {
interrupted = true;
}
if (!interrupted && /* response == null && */ longPolling) {
// no response!
final Address target = basicInvocation.getTarget();
if (basicInvocation.nodeEngine.getThisAddress().equals(target)) {
// target may change during invocation because of migration!
continue;
}
// TODO: @mm - improve logging (see SystemLogService)
basicInvocation.logger.warning("No response for " + lastPollTime + " ms. " + toString());
boolean executing = isOperationExecuting(target);
if (!executing) {
if (response != null) {
continue;
}
return newOperationTimeoutException(pollCount, pollTimeoutMs);
}
}
}
return BasicInvocation.TIMEOUT_RESPONSE;
}
private Object newOperationTimeoutException(int pollCount, long pollTimeoutMs) {
boolean hasResponse = basicInvocation.potentialResponse == null;
int backupsExpected = basicInvocation.backupsExpected;
int backupsCompleted = basicInvocation.backupsCompleted;
if (hasResponse) {
return new OperationTimeoutException("No response for " + (pollTimeoutMs * pollCount) + " ms."
+ " Aborting invocation! " + toString()
+ " Not all backups have completed "
+ " backups-expected:" + backupsExpected
+ " backups-completed: " + backupsCompleted);
} else {
return new OperationTimeoutException("No response for " + (pollTimeoutMs * pollCount) + " ms."
+ " Aborting invocation! " + toString()
+ " No response has been send "
+ " backups-expected:" + backupsExpected
+ " backups-completed: " + backupsCompleted);
}
}
private Object resolveResponseOrThrowException(Object unresolvedResponse)
throws ExecutionException, InterruptedException, TimeoutException {
Object response = resolveResponse(unresolvedResponse);
if (response == null || !(response instanceof Throwable)) {
return response;
}
if (response instanceof ExecutionException) {
throw (ExecutionException) response;
}
if (response instanceof TimeoutException) {
throw (TimeoutException) response;
}
if (response instanceof InterruptedException) {
throw (InterruptedException) response;
}
if (response instanceof Error) {
throw (Error) response;
}
// To obey Future contract, we should wrap unchecked exceptions with ExecutionExceptions.
throw new ExecutionException((Throwable) response);
}
private Object resolveResponse(Object unresolvedResponse) {
if (unresolvedResponse == BasicInvocation.NULL_RESPONSE) {
return null;
}
if (unresolvedResponse == BasicInvocation.TIMEOUT_RESPONSE) {
return new TimeoutException("Call " + basicInvocation + " encountered a timeout");
}
if (unresolvedResponse == BasicInvocation.INTERRUPTED_RESPONSE) {
return new InterruptedException("Call " + basicInvocation + " was interrupted");
}
Object response = unresolvedResponse;
if (basicInvocation.resultDeserialized && response instanceof Data) {
response = basicInvocation.nodeEngine.toObject(response);
if (response == null) {
return null;
}
}
if (response instanceof NormalResponse) {
NormalResponse responseObj = (NormalResponse) response;
response = responseObj.getValue();
if (response == null) {
return null;
}
//it could be that the value of the response is Data.
if (basicInvocation.resultDeserialized && response instanceof Data) {
response = basicInvocation.nodeEngine.toObject(response);
if (response == null) {
return null;
}
}
}
if (response instanceof Throwable) {
Throwable throwable = ((Throwable) response);
if (basicInvocation.remote) {
fixRemoteStackTrace((Throwable) response, Thread.currentThread().getStackTrace());
}
return throwable;
}
return response;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return response != null;
}
private boolean isOperationExecuting(Address target) {
// ask if op is still being executed?
Boolean executing = Boolean.FALSE;
try {
Operation isStillExecuting = createCheckOperation();
BasicInvocation inv = new BasicTargetInvocation(
basicInvocation.nodeEngine, basicInvocation.serviceName, isStillExecuting,
target, 0, 0, 5000, null, null, true);
Future f = inv.invoke();
// TODO: @mm - improve logging (see SystemLogService)
basicInvocation.logger.warning("Asking if operation execution has been started: " + toString());
executing = (Boolean) basicInvocation.nodeEngine.toObject(f.get(5000, TimeUnit.MILLISECONDS));
} catch (Exception e) {
basicInvocation.logger.warning("While asking 'is-executing': " + toString(), e);
}
// TODO: @mm - improve logging (see SystemLogService)
basicInvocation.logger.warning("'is-executing': " + executing + " -> " + toString());
return executing;
}
private Operation createCheckOperation() {
if (basicInvocation.op instanceof TraceableOperation) {
TraceableOperation traceable = (TraceableOperation) basicInvocation.op;
return new TraceableIsStillExecutingOperation(basicInvocation.serviceName, traceable.getTraceIdentifier());
} else {
return new IsStillExecutingOperation(basicInvocation.op.getCallId());
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("BasicInvocationFuture{");
sb.append("invocation=").append(basicInvocation.toString());
sb.append(", done=").append(isDone());
sb.append('}');
return sb.toString();
}
private static class ExecutionCallbackNode<E> {
private final ExecutionCallback<E> callback;
private final Executor executor;
private final ExecutionCallbackNode<E> next;
private ExecutionCallbackNode(ExecutionCallback<E> callback, Executor executor, ExecutionCallbackNode<E> next) {
this.callback = callback;
this.executor = executor;
this.next = next;
}
}
private static class ExecutorCallbackAdapter<E> implements ExecutionCallback<E> {
private final Callback callback;
private ExecutorCallbackAdapter(Callback callback) {
this.callback = callback;
}
@Override
public void onResponse(E response) {
callback.notify(response);
}
@Override
public void onFailure(Throwable t) {
callback.notify(t);
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_BasicInvocationFuture.java
|
64 |
static final class KeyIterator<K,V> extends BaseIterator<K,V>
implements Iterator<K>, Enumeration<K> {
KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
ConcurrentHashMapV8<K,V> map) {
super(tab, index, size, limit, map);
}
public final K next() {
Node<K,V> p;
if ((p = next) == null)
throw new NoSuchElementException();
K k = p.key;
lastReturned = p;
advance();
return k;
}
public final K nextElement() { return next(); }
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
2,560 |
public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery {
private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0];
private final TransportService transportService;
private final ClusterService clusterService;
private final DiscoveryNodeService discoveryNodeService;
private AllocationService allocationService;
private final ClusterName clusterName;
private final Version version;
private final TimeValue publishTimeout;
private DiscoveryNode localNode;
private volatile boolean master = false;
private final AtomicBoolean initialStateSent = new AtomicBoolean();
private final CopyOnWriteArrayList<InitialStateDiscoveryListener> initialStateListeners = new CopyOnWriteArrayList<InitialStateDiscoveryListener>();
private static final ConcurrentMap<ClusterName, ClusterGroup> clusterGroups = ConcurrentCollections.newConcurrentMap();
@Inject
public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService,
DiscoveryNodeService discoveryNodeService, Version version) {
super(settings);
this.clusterName = clusterName;
this.clusterService = clusterService;
this.transportService = transportService;
this.discoveryNodeService = discoveryNodeService;
this.version = version;
this.publishTimeout = settings.getAsTime("discovery.zen.publish_timeout", DEFAULT_PUBLISH_TIMEOUT);
}
@Override
public void setNodeService(@Nullable NodeService nodeService) {
// nothing to do here
}
@Override
public void setAllocationService(AllocationService allocationService) {
this.allocationService = allocationService;
}
@Override
protected void doStart() throws ElasticsearchException {
synchronized (clusterGroups) {
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
if (clusterGroup == null) {
clusterGroup = new ClusterGroup();
clusterGroups.put(clusterName, clusterGroup);
}
logger.debug("Connected to cluster [{}]", clusterName);
this.localNode = new DiscoveryNode(settings.get("name"), DiscoveryService.generateNodeId(settings), transportService.boundAddress().publishAddress(),
discoveryNodeService.buildAttributes(), version);
clusterGroup.members().add(this);
LocalDiscovery firstMaster = null;
for (LocalDiscovery localDiscovery : clusterGroup.members()) {
if (localDiscovery.localNode().masterNode()) {
firstMaster = localDiscovery;
break;
}
}
if (firstMaster != null && firstMaster.equals(this)) {
// we are the first master (and the master)
master = true;
final LocalDiscovery master = firstMaster;
clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode);
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
// remove the NO_MASTER block in this case
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(Discovery.NO_MASTER_BLOCK);
return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
}
});
} else if (firstMaster != null) {
// update as fast as we can the local node state with the new metadata (so we create indices for example)
final ClusterState masterState = firstMaster.clusterService.state();
clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
// make sure we have the local node id set, we might need it as a result of the new metadata
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()).put(localNode).localNodeId(localNode.id());
return ClusterState.builder(currentState).metaData(masterState.metaData()).nodes(nodesBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
// tell the master to send the fact that we are here
final LocalDiscovery master = firstMaster;
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode);
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
return ClusterState.builder(currentState).nodes(nodesBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
}
});
}
} // else, no master node, the next node that will start will fill things in...
}
@Override
protected void doStop() throws ElasticsearchException {
synchronized (clusterGroups) {
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
if (clusterGroup == null) {
logger.warn("Illegal state, should not have an empty cluster group when stopping, I should be there at teh very least...");
return;
}
clusterGroup.members().remove(this);
if (clusterGroup.members().isEmpty()) {
// no more members, remove and return
clusterGroups.remove(clusterName);
return;
}
LocalDiscovery firstMaster = null;
for (LocalDiscovery localDiscovery : clusterGroup.members()) {
if (localDiscovery.localNode().masterNode()) {
firstMaster = localDiscovery;
break;
}
}
if (firstMaster != null) {
// if the removed node is the master, make the next one as the master
if (master) {
firstMaster.master = true;
}
final Set<String> newMembers = newHashSet();
for (LocalDiscovery discovery : clusterGroup.members()) {
newMembers.add(discovery.localNode.id());
}
final LocalDiscovery master = firstMaster;
master.clusterService.submitStateUpdateTask("local-disco-update", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode.id());
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
if (delta.added()) {
logger.warn("No new nodes should be created when a new discovery view is accepted");
}
// reroute here, so we eagerly remove dead nodes from the routing
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
RoutingAllocation.Result routingResult = master.allocationService.reroute(ClusterState.builder(updatedState).build());
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
}
}
}
@Override
protected void doClose() throws ElasticsearchException {
}
@Override
public DiscoveryNode localNode() {
return localNode;
}
@Override
public void addListener(InitialStateDiscoveryListener listener) {
this.initialStateListeners.add(listener);
}
@Override
public void removeListener(InitialStateDiscoveryListener listener) {
this.initialStateListeners.remove(listener);
}
@Override
public String nodeDescription() {
return clusterName.value() + "/" + localNode.id();
}
public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) {
if (!master) {
throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master");
}
LocalDiscovery[] members = members();
if (members.length > 0) {
publish(members, clusterState, new AckClusterStatePublishResponseHandler(members.length - 1, ackListener));
}
}
private LocalDiscovery[] members() {
ClusterGroup clusterGroup = clusterGroups.get(clusterName);
if (clusterGroup == null) {
return NO_MEMBERS;
}
Queue<LocalDiscovery> members = clusterGroup.members();
return members.toArray(new LocalDiscovery[members.size()]);
}
private void publish(LocalDiscovery[] members, ClusterState clusterState, final ClusterStatePublishResponseHandler publishResponseHandler) {
try {
// we do the marshaling intentionally, to check it works well...
final byte[] clusterStateBytes = Builder.toBytes(clusterState);
for (final LocalDiscovery discovery : members) {
if (discovery.master) {
continue;
}
final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode);
// ignore cluster state messages that do not include "me", not in the game yet...
if (nodeSpecificClusterState.nodes().localNode() != null) {
discovery.clusterService.submitStateUpdateTask("local-disco-receive(from master)", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
ClusterState.Builder builder = ClusterState.builder(nodeSpecificClusterState);
// if the routing table did not change, use the original one
if (nodeSpecificClusterState.routingTable().version() == currentState.routingTable().version()) {
builder.routingTable(currentState.routingTable());
}
if (nodeSpecificClusterState.metaData().version() == currentState.metaData().version()) {
builder.metaData(currentState.metaData());
}
return builder.build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
publishResponseHandler.onFailure(discovery.localNode, t);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
publishResponseHandler.onResponse(discovery.localNode);
}
});
} else {
publishResponseHandler.onResponse(discovery.localNode);
}
}
if (publishTimeout.millis() > 0) {
try {
boolean awaited = publishResponseHandler.awaitAllNodes(publishTimeout);
if (!awaited) {
logger.debug("awaiting all nodes to process published state {} timed out, timeout {}", clusterState.version(), publishTimeout);
}
} catch (InterruptedException e) {
// ignore & restore interrupt
Thread.currentThread().interrupt();
}
}
} catch (Exception e) {
// failure to marshal or un-marshal
throw new ElasticsearchIllegalStateException("Cluster state failed to serialize", e);
}
}
private void sendInitialStateEventIfNeeded() {
if (initialStateSent.compareAndSet(false, true)) {
for (InitialStateDiscoveryListener listener : initialStateListeners) {
listener.initialStateProcessed();
}
}
}
private class ClusterGroup {
private Queue<LocalDiscovery> members = ConcurrentCollections.newQueue();
Queue<LocalDiscovery> members() {
return members;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_discovery_local_LocalDiscovery.java
|
4,522 |
private class ExpiredDocsCollector extends Collector {
private final MapperService mapperService;
private AtomicReaderContext context;
private List<DocToPurge> docsToPurge = new ArrayList<DocToPurge>();
public ExpiredDocsCollector(String index) {
mapperService = indicesService.indexService(index).mapperService();
}
public void setScorer(Scorer scorer) {
}
public boolean acceptsDocsOutOfOrder() {
return true;
}
public void collect(int doc) {
try {
UidAndRoutingFieldsVisitor fieldsVisitor = new UidAndRoutingFieldsVisitor();
context.reader().document(doc, fieldsVisitor);
Uid uid = fieldsVisitor.uid();
final long version = Versions.loadVersion(context.reader(), new Term(UidFieldMapper.NAME, uid.toBytesRef()));
docsToPurge.add(new DocToPurge(uid.type(), uid.id(), version, fieldsVisitor.routing()));
} catch (Exception e) {
logger.trace("failed to collect doc", e);
}
}
public void setNextReader(AtomicReaderContext context) throws IOException {
this.context = context;
}
public List<DocToPurge> getDocsToPurge() {
return this.docsToPurge;
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_ttl_IndicesTTLService.java
|
3,239 |
public final class BytesRefOrdValComparator extends NestedWrappableComparator<BytesRef> {
final IndexFieldData.WithOrdinals<?> indexFieldData;
final BytesRef missingValue;
/* Ords for each slot, times 4.
@lucene.internal */
final long[] ords;
final SortMode sortMode;
/* Values for each slot.
@lucene.internal */
final BytesRef[] values;
/* Which reader last copied a value into the slot. When
we compare two slots, we just compare-by-ord if the
readerGen is the same; else we must compare the
values (slower).
@lucene.internal */
final int[] readerGen;
/* Gen of current reader we are on.
@lucene.internal */
int currentReaderGen = -1;
/* Current reader's doc ord/values.
@lucene.internal */
BytesValues.WithOrdinals termsIndex;
long missingOrd;
/* Bottom slot, or -1 if queue isn't full yet
@lucene.internal */
int bottomSlot = -1;
/* Bottom ord (same as ords[bottomSlot] once bottomSlot
is set). Cached for faster compares.
@lucene.internal */
long bottomOrd;
final BytesRef tempBR = new BytesRef();
public BytesRefOrdValComparator(IndexFieldData.WithOrdinals<?> indexFieldData, int numHits, SortMode sortMode, BytesRef missingValue) {
this.indexFieldData = indexFieldData;
this.sortMode = sortMode;
this.missingValue = missingValue;
ords = new long[numHits];
values = new BytesRef[numHits];
readerGen = new int[numHits];
}
@Override
public int compare(int slot1, int slot2) {
if (readerGen[slot1] == readerGen[slot2]) {
return LongValuesComparator.compare(ords[slot1], ords[slot2]);
}
final BytesRef val1 = values[slot1];
final BytesRef val2 = values[slot2];
if (val1 == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return val1.compareTo(val2);
}
@Override
public int compareBottom(int doc) {
throw new UnsupportedOperationException();
}
@Override
public int compareBottomMissing() {
throw new UnsupportedOperationException();
}
@Override
public void copy(int slot, int doc) {
throw new UnsupportedOperationException();
}
@Override
public void missing(int slot) {
throw new UnsupportedOperationException();
}
@Override
public int compareDocToValue(int doc, BytesRef value) {
throw new UnsupportedOperationException();
}
class PerSegmentComparator extends NestedWrappableComparator<BytesRef> {
final Ordinals.Docs readerOrds;
final BytesValues.WithOrdinals termsIndex;
public PerSegmentComparator(BytesValues.WithOrdinals termsIndex) {
this.readerOrds = termsIndex.ordinals();
this.termsIndex = termsIndex;
if (readerOrds.getNumOrds() > Long.MAX_VALUE / 4) {
throw new IllegalStateException("Current terms index pretends it has more than " + (Long.MAX_VALUE / 4) + " ordinals, which is unsupported by this impl");
}
}
@Override
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
return BytesRefOrdValComparator.this.setNextReader(context);
}
@Override
public int compare(int slot1, int slot2) {
return BytesRefOrdValComparator.this.compare(slot1, slot2);
}
@Override
public void setBottom(final int bottom) {
BytesRefOrdValComparator.this.setBottom(bottom);
}
@Override
public BytesRef value(int slot) {
return BytesRefOrdValComparator.this.value(slot);
}
@Override
public int compareValues(BytesRef val1, BytesRef val2) {
if (val1 == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return val1.compareTo(val2);
}
@Override
public int compareDocToValue(int doc, BytesRef value) {
final long ord = getOrd(doc);
final BytesRef docValue = ord == Ordinals.MISSING_ORDINAL ? missingValue : termsIndex.getValueByOrd(ord);
return compareValues(docValue, value);
}
protected long getOrd(int doc) {
return readerOrds.getOrd(doc);
}
@Override
public int compareBottom(int doc) {
assert bottomSlot != -1;
final long docOrd = getOrd(doc);
final long comparableOrd = docOrd == Ordinals.MISSING_ORDINAL ? missingOrd : docOrd << 2;
return LongValuesComparator.compare(bottomOrd, comparableOrd);
}
@Override
public int compareBottomMissing() {
assert bottomSlot != -1;
return LongValuesComparator.compare(bottomOrd, missingOrd);
}
@Override
public void copy(int slot, int doc) {
final long ord = getOrd(doc);
if (ord == Ordinals.MISSING_ORDINAL) {
ords[slot] = missingOrd;
values[slot] = missingValue;
} else {
assert ord > 0;
ords[slot] = ord << 2;
if (values[slot] == null || values[slot] == missingValue) {
values[slot] = new BytesRef();
}
values[slot].copyBytes(termsIndex.getValueByOrd(ord));
}
readerGen[slot] = currentReaderGen;
}
@Override
public void missing(int slot) {
ords[slot] = missingOrd;
values[slot] = missingValue;
}
}
// for assertions
private boolean consistentInsertedOrd(BytesValues.WithOrdinals termsIndex, long ord, BytesRef value) {
assert ord >= 0 : ord;
assert (ord == 0) == (value == null) : "ord=" + ord + ", value=" + value;
final long previousOrd = ord >>> 2;
final long nextOrd = previousOrd + 1;
final BytesRef previous = previousOrd == 0 ? null : termsIndex.getValueByOrd(previousOrd);
if ((ord & 3) == 0) { // there was an existing ord with the inserted value
assert compareValues(previous, value) == 0;
} else {
assert compareValues(previous, value) < 0;
}
if (nextOrd < termsIndex.ordinals().getMaxOrd()) {
final BytesRef next = termsIndex.getValueByOrd(nextOrd);
assert compareValues(value, next) < 0;
}
return true;
}
// find where to insert an ord in the current terms index
private long ordInCurrentReader(BytesValues.WithOrdinals termsIndex, BytesRef value) {
final long docOrd = binarySearch(termsIndex, value);
assert docOrd != -1; // would mean smaller than null
final long ord;
if (docOrd >= 0) {
// value exists in the current segment
ord = docOrd << 2;
} else {
// value doesn't exist, use the ord between the previous and the next term
ord = ((-2 - docOrd) << 2) + 2;
}
assert (ord & 1) == 0;
return ord;
}
@Override
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
termsIndex = indexFieldData.load(context).getBytesValues(false);
assert termsIndex.ordinals() != null && termsIndex.ordinals().ordinals() != null;
if (missingValue == null) {
missingOrd = Ordinals.MISSING_ORDINAL;
} else {
missingOrd = ordInCurrentReader(termsIndex, missingValue);
assert consistentInsertedOrd(termsIndex, missingOrd, missingValue);
}
FieldComparator<BytesRef> perSegComp = null;
assert termsIndex.ordinals() != null && termsIndex.ordinals().ordinals() != null;
if (termsIndex.isMultiValued()) {
perSegComp = new PerSegmentComparator(termsIndex) {
@Override
protected long getOrd(int doc) {
return getRelevantOrd(readerOrds, doc, sortMode);
}
};
} else {
perSegComp = new PerSegmentComparator(termsIndex);
}
currentReaderGen++;
if (bottomSlot != -1) {
perSegComp.setBottom(bottomSlot);
}
return perSegComp;
}
@Override
public void setBottom(final int bottom) {
bottomSlot = bottom;
final BytesRef bottomValue = values[bottomSlot];
if (bottomValue == null) {
bottomOrd = Ordinals.MISSING_ORDINAL;
} else if (currentReaderGen == readerGen[bottomSlot]) {
bottomOrd = ords[bottomSlot];
} else {
// insert an ord
bottomOrd = ordInCurrentReader(termsIndex, bottomValue);
if (bottomOrd == missingOrd) {
// bottomValue and missingValue and in-between the same field data values -> tie-break
// this is why we multiply ords by 4
assert missingValue != null;
final int cmp = bottomValue.compareTo(missingValue);
if (cmp < 0) {
--bottomOrd;
} else if (cmp > 0) {
++bottomOrd;
}
}
assert consistentInsertedOrd(termsIndex, bottomOrd, bottomValue);
}
readerGen[bottomSlot] = currentReaderGen;
}
@Override
public BytesRef value(int slot) {
return values[slot];
}
final protected static long binarySearch(BytesValues.WithOrdinals a, BytesRef key) {
return binarySearch(a, key, 1, a.ordinals().getNumOrds());
}
final protected static long binarySearch(BytesValues.WithOrdinals a, BytesRef key, long low, long high) {
assert low != Ordinals.MISSING_ORDINAL;
assert high == Ordinals.MISSING_ORDINAL || (a.getValueByOrd(high) == null | a.getValueByOrd(high) != null); // make sure we actually can get these values
assert low == high + 1 || a.getValueByOrd(low) == null | a.getValueByOrd(low) != null;
while (low <= high) {
long mid = (low + high) >>> 1;
BytesRef midVal = a.getValueByOrd(mid);
int cmp;
if (midVal != null) {
cmp = midVal.compareTo(key);
} else {
cmp = -1;
}
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid;
}
return -(low + 1);
}
static long getRelevantOrd(Ordinals.Docs readerOrds, int docId, SortMode sortMode) {
int length = readerOrds.setDocument(docId);
long relevantVal = sortMode.startLong();
long result = 0;
assert sortMode == SortMode.MAX || sortMode == SortMode.MIN;
for (int i = 0; i < length; i++) {
result = relevantVal = sortMode.apply(readerOrds.nextOrd(), relevantVal);
}
assert result >= 0;
assert result <= readerOrds.getMaxOrd();
return result;
// Enable this when the api can tell us that the ords per doc are ordered
/*if (reversed) {
IntArrayRef ref = readerOrds.getOrds(docId);
if (ref.isEmpty()) {
return 0;
} else {
return ref.values[ref.end - 1]; // last element is the highest value.
}
} else {
return readerOrds.getOrd(docId); // returns the lowest value
}*/
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_BytesRefOrdValComparator.java
|
9 |
display.timerExec(100, new Runnable() {
@Override
public void run() {
fCompleted= true;
}
});
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java
|
4,234 |
public final class RamDirectoryService extends AbstractIndexShardComponent implements DirectoryService {
@Inject
public RamDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}
@Override
public long throttleTimeInNanos() {
return 0;
}
@Override
public Directory[] build() {
return new Directory[]{new CustomRAMDirectory()};
}
@Override
public void renameFile(Directory dir, String from, String to) throws IOException {
CustomRAMDirectory leaf = DirectoryUtils.getLeaf(dir, CustomRAMDirectory.class);
assert leaf != null;
leaf.renameTo(from, to);
}
@Override
public void fullDelete(Directory dir) {
}
static class CustomRAMDirectory extends RAMDirectory {
public synchronized void renameTo(String from, String to) throws IOException {
RAMFile fromFile = fileMap.get(from);
if (fromFile == null)
throw new FileNotFoundException(from);
RAMFile toFile = fileMap.get(to);
if (toFile != null) {
sizeInBytes.addAndGet(-fileLength(from));
fileMap.remove(from);
}
fileMap.put(to, fromFile);
}
@Override
public String toString() {
return "ram";
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_store_ram_RamDirectoryService.java
|
255 |
service.submitToMembers(callable, selector, new MultiExecutionCallback() {
public void onResponse(Member member, Object value) {
if (value.equals(msg + AppendCallable.APPENDAGE)) {
responseLatch.countDown();
}
}
public void onComplete(Map<Member, Object> values) {
completeLatch.countDown();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
1,268 |
public class OSingleFileSegment {
protected OStorageLocalAbstract storage;
protected OFile file;
protected OStorageFileConfiguration config;
private boolean wasSoftlyClosedAtPreviousTime = true;
public OSingleFileSegment(final String iPath, final String iType) throws IOException {
file = OFileFactory.instance().create(iType, OSystemVariableResolver.resolveSystemVariables(iPath), "rw");
}
public OSingleFileSegment(final OStorageLocalAbstract iStorage, final OStorageFileConfiguration iConfig) throws IOException {
this(iStorage, iConfig, iConfig.type);
}
public OSingleFileSegment(final OStorageLocalAbstract iStorage, final OStorageFileConfiguration iConfig, final String iType)
throws IOException {
config = iConfig;
storage = iStorage;
file = OFileFactory.instance().create(iType, iStorage.getVariableParser().resolveVariables(iConfig.path), iStorage.getMode());
file.setMaxSize((int) OFileUtils.getSizeAsNumber(iConfig.maxSize));
file.setIncrementSize((int) OFileUtils.getSizeAsNumber(iConfig.incrementSize));
}
public boolean open() throws IOException {
boolean softClosed = file.open();
if (!softClosed) {
// LAST TIME THE FILE WAS NOT CLOSED IN SOFT WAY
OLogManager.instance().warn(this, "segment file '%s' was not closed correctly last time", OFileUtils.getPath(file.getName()));
wasSoftlyClosedAtPreviousTime = false;
}
return softClosed;
}
public void create(final int iStartSize) throws IOException {
file.create(iStartSize);
}
public void close() throws IOException {
if (file != null)
file.close();
}
public void delete() throws IOException {
if (file != null)
file.delete();
}
public void truncate() throws IOException {
// SHRINK TO 0
file.shrink(0);
}
public boolean exists() {
return file.exists();
}
public long getSize() {
return file.getFileSize();
}
public long getFilledUpTo() {
return file.getFilledUpTo();
}
public OStorageFileConfiguration getConfig() {
return config;
}
public OFile getFile() {
return file;
}
public void synch() throws IOException {
file.synch();
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
file.setSoftlyClosed(softlyClosed);
}
public boolean wasSoftlyClosedAtPreviousTime() {
return wasSoftlyClosedAtPreviousTime;
}
public void rename(String iOldName, String iNewName) {
final String osFileName = file.getName();
if (osFileName.startsWith(iOldName)) {
final File newFile = new File(storage.getStoragePath() + "/" + iNewName
+ osFileName.substring(osFileName.lastIndexOf(iOldName) + iOldName.length()));
boolean renamed = file.renameTo(newFile);
while (!renamed) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
renamed = file.renameTo(newFile);
}
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OSingleFileSegment.java
|
1 |
public abstract class AbstractTextCommand implements TextCommand {
protected final TextCommandType type;
private SocketTextReader socketTextReader;
private SocketTextWriter socketTextWriter;
private long requestId = -1;
protected AbstractTextCommand(TextCommandType type) {
this.type = type;
}
@Override
public TextCommandType getType() {
return type;
}
@Override
public SocketTextReader getSocketTextReader() {
return socketTextReader;
}
@Override
public SocketTextWriter getSocketTextWriter() {
return socketTextWriter;
}
@Override
public long getRequestId() {
return requestId;
}
@Override
public void init(SocketTextReader socketTextReader, long requestId) {
this.socketTextReader = socketTextReader;
this.requestId = requestId;
this.socketTextWriter = socketTextReader.getSocketTextWriter();
}
@Override
public boolean isUrgent() {
return false;
}
@Override
public boolean shouldReply() {
return true;
}
@Override
public String toString() {
return "AbstractTextCommand[" + type + "]{"
+ "requestId="
+ requestId
+ '}';
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_AbstractTextCommand.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.