Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
684 | public class TransportPutWarmerAction extends TransportMasterNodeOperationAction<PutWarmerRequest, PutWarmerResponse> {
private final TransportSearchAction searchAction;
@Inject
public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
TransportSearchAction searchAction) {
super(settings, transportService, clusterService, threadPool);
this.searchAction = searchAction;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return PutWarmerAction.NAME;
}
@Override
protected PutWarmerRequest newRequest() {
return new PutWarmerRequest();
}
@Override
protected PutWarmerResponse newResponse() {
return new PutWarmerResponse();
}
@Override
protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) {
String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.searchRequest().indices(), request.searchRequest().indicesOptions());
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
}
@Override
protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener<PutWarmerResponse> listener) throws ElasticsearchException {
// first execute the search request, see that its ok...
searchAction.execute(request.searchRequest(), new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
if (searchResponse.getFailedShards() > 0) {
listener.onFailure(new ElasticsearchException("search failed with failed shards: " + Arrays.toString(searchResponse.getShardFailures())));
return;
}
clusterService.submitStateUpdateTask("put_warmer [" + request.name() + "]", new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new PutWarmerResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new PutWarmerResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.timeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to put warmer [{}] on indices [{}]", t, request.name(), request.searchRequest().indices());
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
String[] concreteIndices = metaData.concreteIndices(request.searchRequest().indices(), request.searchRequest().indicesOptions());
BytesReference source = null;
if (request.searchRequest().source() != null && request.searchRequest().source().length() > 0) {
source = request.searchRequest().source();
} else if (request.searchRequest().extraSource() != null && request.searchRequest().extraSource().length() > 0) {
source = request.searchRequest().extraSource();
}
// now replace it on the metadata
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (String index : concreteIndices) {
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData == null) {
throw new IndexMissingException(new Index(index));
}
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (warmers == null) {
logger.info("[{}] putting warmer [{}]", index, request.name());
warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), source));
} else {
boolean found = false;
List<IndexWarmersMetaData.Entry> entries = new ArrayList<IndexWarmersMetaData.Entry>(warmers.entries().size() + 1);
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
if (entry.name().equals(request.name())) {
found = true;
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), source));
} else {
entries.add(entry);
}
}
if (!found) {
logger.info("[{}] put warmer [{}]", index, request.name());
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), source));
} else {
logger.info("[{}] update warmer [{}]", index, request.name());
}
warmers = new IndexWarmersMetaData(entries.toArray(new IndexWarmersMetaData.Entry[entries.size()]));
}
IndexMetaData.Builder indexBuilder = IndexMetaData.builder(indexMetaData).putCustom(IndexWarmersMetaData.TYPE, warmers);
mdBuilder.put(indexBuilder);
}
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
} | 1no label
| src_main_java_org_elasticsearch_action_admin_indices_warmer_put_TransportPutWarmerAction.java |
5,067 | public class SearchService extends AbstractLifecycleComponent<SearchService> {
public static final String NORMS_LOADING_KEY = "index.norms.loading";
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final IndicesService indicesService;
private final IndicesWarmer indicesWarmer;
private final ScriptService scriptService;
private final CacheRecycler cacheRecycler;
private final PageCacheRecycler pageCacheRecycler;
private final DfsPhase dfsPhase;
private final QueryPhase queryPhase;
private final FetchPhase fetchPhase;
private final long defaultKeepAlive;
private final ScheduledFuture<?> keepAliveReaper;
private final AtomicLong idGenerator = new AtomicLong();
private final ConcurrentMapLong<SearchContext> activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
private final ImmutableMap<String, SearchParseElement> elementParsers;
@Inject
public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesLifecycle indicesLifecycle, IndicesWarmer indicesWarmer, ThreadPool threadPool,
ScriptService scriptService, CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.indicesService = indicesService;
this.indicesWarmer = indicesWarmer;
this.scriptService = scriptService;
this.cacheRecycler = cacheRecycler;
this.pageCacheRecycler = pageCacheRecycler;
this.dfsPhase = dfsPhase;
this.queryPhase = queryPhase;
this.fetchPhase = fetchPhase;
TimeValue keepAliveInterval = componentSettings.getAsTime("keep_alive_interval", timeValueMinutes(1));
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
this.defaultKeepAlive = componentSettings.getAsTime("default_keep_alive", timeValueMinutes(5)).millis();
Map<String, SearchParseElement> elementParsers = new HashMap<String, SearchParseElement>();
elementParsers.putAll(dfsPhase.parseElements());
elementParsers.putAll(queryPhase.parseElements());
elementParsers.putAll(fetchPhase.parseElements());
elementParsers.put("stats", new StatsGroupsParseElement());
this.elementParsers = ImmutableMap.copyOf(elementParsers);
this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);
this.indicesWarmer.addListener(new NormsWarmer());
this.indicesWarmer.addListener(new FieldDataWarmer());
this.indicesWarmer.addListener(new SearchWarmer());
}
@Override
protected void doStart() throws ElasticsearchException {
}
@Override
protected void doStop() throws ElasticsearchException {
for (SearchContext context : activeContexts.values()) {
freeContext(context);
}
activeContexts.clear();
}
@Override
protected void doClose() throws ElasticsearchException {
keepAliveReaper.cancel(false);
}
public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
try {
contextProcessing(context);
dfsPhase.execute(context);
contextProcessedSuccessfully(context);
return context.dfsResult();
} catch (Throwable e) {
logger.trace("Dfs phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QuerySearchResult executeScan(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
assert context.searchType() == SearchType.SCAN;
context.searchType(SearchType.COUNT); // move to COUNT, and then, when scrolling, move to SCAN
assert context.searchType() == SearchType.COUNT;
try {
if (context.scroll() == null) {
throw new ElasticsearchException("Scroll must be provided when scanning...");
}
contextProcessing(context);
queryPhase.execute(context);
contextProcessedSuccessfully(context);
return context.queryResult();
} catch (Throwable e) {
logger.trace("Scan phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
processScroll(request, context);
if (context.searchType() == SearchType.COUNT) {
// first scanning, reset the from to 0
context.searchType(SearchType.SCAN);
context.from(0);
}
queryPhase.execute(context);
shortcutDocIdsToLoadForScanning(context);
fetchPhase.execute(context);
if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
} catch (Throwable e) {
logger.trace("Scan phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QuerySearchResult executeQueryPhase(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
contextProcessing(context);
queryPhase.execute(context);
if (context.searchType() == SearchType.COUNT) {
freeContext(context.id());
} else {
contextProcessedSuccessfully(context);
}
context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
return context.queryResult();
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
contextProcessing(context);
processScroll(request, context);
queryPhase.execute(context);
contextProcessedSuccessfully(context);
context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QuerySearchResult executeQueryPhase(QuerySearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity()));
} catch (Throwable e) {
freeContext(context);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
queryPhase.execute(context);
contextProcessedSuccessfully(context);
context.indexShard().searchService().onQueryPhase(context, System.nanoTime() - time);
return context.queryResult();
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createAndPutContext(request);
contextProcessing(context);
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
try {
queryPhase.execute(context);
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
long time2 = System.nanoTime();
context.indexShard().searchService().onQueryPhase(context, time2 - time);
context.indexShard().searchService().onPreFetchPhase(context);
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(context.id());
} else {
contextProcessedSuccessfully(context);
}
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
} catch (Throwable e) {
logger.trace("Fetch phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity()));
} catch (Throwable e) {
freeContext(context);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
try {
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
try {
queryPhase.execute(context);
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
long time2 = System.nanoTime();
context.indexShard().searchService().onQueryPhase(context, time2 - time);
context.indexShard().searchService().onPreFetchPhase(context);
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
} catch (Throwable e) {
logger.trace("Fetch phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
processScroll(request, context);
context.indexShard().searchService().onPreQueryPhase(context);
long time = System.nanoTime();
try {
queryPhase.execute(context);
} catch (Throwable e) {
context.indexShard().searchService().onFailedQueryPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
long time2 = System.nanoTime();
context.indexShard().searchService().onQueryPhase(context, time2 - time);
context.indexShard().searchService().onPreFetchPhase(context);
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
throw ExceptionsHelper.convertToRuntime(e);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time2);
return new ScrollQueryFetchSearchResult(new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget());
} catch (Throwable e) {
logger.trace("Fetch phase failed", e);
freeContext(context);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
public FetchSearchResult executeFetchPhase(FetchSearchRequest request) throws ElasticsearchException {
SearchContext context = findContext(request.id());
contextProcessing(context);
try {
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
context.indexShard().searchService().onPreFetchPhase(context);
long time = System.nanoTime();
fetchPhase.execute(context);
if (context.scroll() == null) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
context.indexShard().searchService().onFetchPhase(context, System.nanoTime() - time);
return context.fetchResult();
} catch (Throwable e) {
context.indexShard().searchService().onFailedFetchPhase(context);
logger.trace("Fetch phase failed", e);
freeContext(context); // we just try to make sure this is freed - rethrow orig exception.
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
private SearchContext findContext(long id) throws SearchContextMissingException {
SearchContext context = activeContexts.get(id);
if (context == null) {
throw new SearchContextMissingException(id);
}
SearchContext.setCurrent(context);
return context;
}
SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException {
SearchContext context = createContext(request);
activeContexts.put(context.id(), context);
context.indexShard().searchService().onNewContext(context);
return context;
}
SearchContext createContext(ShardSearchRequest request) throws ElasticsearchException {
return createContext(request, null);
}
SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws ElasticsearchException {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(request.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, cacheRecycler, pageCacheRecycler);
SearchContext.setCurrent(context);
try {
context.scroll(request.scroll());
parseSource(context, request.source());
parseSource(context, request.extraSource());
// if the from and size are still not set, default them
if (context.from() == -1) {
context.from(0);
}
if (context.size() == -1) {
context.size(10);
}
// pre process
dfsPhase.preProcess(context);
queryPhase.preProcess(context);
fetchPhase.preProcess(context);
// compute the context keep alive
long keepAlive = defaultKeepAlive;
if (request.scroll() != null && request.scroll().keepAlive() != null) {
keepAlive = request.scroll().keepAlive().millis();
}
context.keepAlive(keepAlive);
} catch (Throwable e) {
context.release();
throw ExceptionsHelper.convertToRuntime(e);
}
return context;
}
public void freeContext(long id) {
SearchContext context = activeContexts.remove(id);
if (context == null) {
return;
}
context.indexShard().searchService().onFreeContext(context);
context.release();
}
private void freeContext(SearchContext context) {
SearchContext removed = activeContexts.remove(context.id());
if (removed != null) {
removed.indexShard().searchService().onFreeContext(removed);
}
context.release();
}
public void freeAllScrollContexts() {
for (SearchContext searchContext : activeContexts.values()) {
if (searchContext.scroll() != null) {
freeContext(searchContext);
}
}
}
private void contextProcessing(SearchContext context) {
// disable timeout while executing a search
context.accessed(-1);
}
private void contextProcessedSuccessfully(SearchContext context) {
context.accessed(threadPool.estimatedTimeInMillis());
}
private void cleanContext(SearchContext context) {
SearchContext.removeCurrent();
}
private void parseSource(SearchContext context, BytesReference source) throws SearchParseException {
// nothing to parse...
if (source == null || source.length() == 0) {
return;
}
XContentParser parser = null;
try {
parser = XContentFactory.xContent(source).createParser(source);
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
parser.nextToken();
SearchParseElement element = elementParsers.get(fieldName);
if (element == null) {
throw new SearchParseException(context, "No parser for element [" + fieldName + "]");
}
element.parse(parser, context);
} else if (token == null) {
break;
}
}
} catch (Throwable e) {
String sSource = "_na_";
try {
sSource = XContentHelper.convertToJson(source, false);
} catch (Throwable e1) {
// ignore
}
throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", e);
} finally {
if (parser != null) {
parser.close();
}
}
}
private static final int[] EMPTY_DOC_IDS = new int[0];
/**
* Shortcut ids to load, we load only "from" and up to "size". The phase controller
* handles this as well since the result is always size * shards for Q_A_F
*/
private void shortcutDocIdsToLoad(SearchContext context) {
TopDocs topDocs = context.queryResult().topDocs();
if (topDocs.scoreDocs.length < context.from()) {
// no more docs...
context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
return;
}
int totalSize = context.from() + context.size();
int[] docIdsToLoad = new int[Math.min(topDocs.scoreDocs.length - context.from(), context.size())];
int counter = 0;
for (int i = context.from(); i < totalSize; i++) {
if (i < topDocs.scoreDocs.length) {
docIdsToLoad[counter] = topDocs.scoreDocs[i].doc;
} else {
break;
}
counter++;
}
context.docIdsToLoad(docIdsToLoad, 0, counter);
}
private void shortcutDocIdsToLoadForScanning(SearchContext context) {
TopDocs topDocs = context.queryResult().topDocs();
if (topDocs.scoreDocs.length == 0) {
// no more docs...
context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
return;
}
int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
for (int i = 0; i < docIdsToLoad.length; i++) {
docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
}
context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
}
private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
// process scroll
context.from(context.from() + context.size());
context.scroll(request.scroll());
// update the context keep alive based on the new scroll value
if (request.scroll() != null && request.scroll().keepAlive() != null) {
context.keepAlive(request.scroll().keepAlive().millis());
}
}
static class NormsWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
final Loading defaultLoading = Loading.parse(indexMetaData.settings().get(NORMS_LOADING_KEY), Loading.LAZY);
final MapperService mapperService = indexShard.mapperService();
final ObjectSet<String> warmUp = new ObjectOpenHashSet<String>();
for (DocumentMapper docMapper : mapperService) {
for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
final String indexName = fieldMapper.names().indexName();
if (fieldMapper.fieldType().indexed() && !fieldMapper.fieldType().omitNorms() && fieldMapper.normsLoading(defaultLoading) == Loading.EAGER) {
warmUp.add(indexName);
}
}
}
final CountDownLatch latch = new CountDownLatch(1);
// Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
try {
for (Iterator<ObjectCursor<String>> it = warmUp.iterator(); it.hasNext(); ) {
final String indexName = it.next().value;
final long start = System.nanoTime();
for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
final NumericDocValues values = ctx.reader().getNormValues(indexName);
if (values != null) {
values.get(0);
}
}
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, TimeValue.timeValueNanos(System.nanoTime() - start));
}
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up norms", t);
} finally {
latch.countDown();
}
}
});
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
static class FieldDataWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
final MapperService mapperService = indexShard.mapperService();
final Map<String, FieldMapper<?>> warmUp = new HashMap<String, FieldMapper<?>>();
boolean parentChild = false;
for (DocumentMapper docMapper : mapperService) {
for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
if (fieldMapper instanceof ParentFieldMapper) {
ParentFieldMapper parentFieldMapper = (ParentFieldMapper) fieldMapper;
if (parentFieldMapper.active()) {
parentChild = true;
}
}
final FieldDataType fieldDataType = fieldMapper.fieldDataType();
if (fieldDataType == null) {
continue;
}
if (fieldDataType.getLoading() != Loading.EAGER) {
continue;
}
final String indexName = fieldMapper.names().indexName();
if (warmUp.containsKey(indexName)) {
continue;
}
warmUp.put(indexName, fieldMapper);
}
}
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(context.newSearcher().reader().leaves().size() * warmUp.size() + (parentChild ? 1 : 0));
for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
for (final FieldMapper<?> fieldMapper : warmUp.values()) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
indexFieldDataService.getForField(fieldMapper).load(ctx);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldMapper.names().name(), TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldMapper.names().name());
} finally {
latch.countDown();
}
}
});
}
}
if (parentChild) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
final long start = System.nanoTime();
indexShard.indexService().cache().idCache().refresh(context.newSearcher().reader().leaves());
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed id_cache, took [{}]", TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up id cache", t);
} finally {
latch.countDown();
}
}
});
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
class SearchWarmer extends IndicesWarmer.Listener {
@Override
public TerminationHandle warm(final IndexShard indexShard, final IndexMetaData indexMetaData, final IndicesWarmer.WarmerContext warmerContext, ThreadPool threadPool) {
IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
if (custom == null) {
return TerminationHandle.NO_WAIT;
}
final Executor executor = threadPool.executor(executor());
final CountDownLatch latch = new CountDownLatch(custom.entries().size());
for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
executor.execute(new Runnable() {
@Override
public void run() {
SearchContext context = null;
try {
long now = System.nanoTime();
ShardSearchRequest request = new ShardSearchRequest(indexShard.shardId().index().name(), indexShard.shardId().id(), indexMetaData.numberOfShards(),
SearchType.QUERY_THEN_FETCH /* we don't use COUNT so sorting will also kick in whatever warming logic*/)
.source(entry.source())
.types(entry.types());
context = createContext(request, warmerContext.newSearcher());
queryPhase.execute(context);
long took = System.nanoTime() - now;
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
} finally {
try {
if (context != null) {
freeContext(context);
cleanContext(context);
}
} finally {
latch.countDown();
}
}
}
});
}
return new TerminationHandle() {
@Override
public void awaitTermination() throws InterruptedException {
latch.await();
}
};
}
}
class Reaper implements Runnable {
@Override
public void run() {
long time = threadPool.estimatedTimeInMillis();
for (SearchContext context : activeContexts.values()) {
if (context.lastAccessTime() == -1) { // its being processed or timeout is disabled
continue;
}
if ((time - context.lastAccessTime() > context.keepAlive())) {
freeContext(context);
}
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_search_SearchService.java |
177 | static final class AdaptedRunnableAction extends ForkJoinTask<Void>
implements RunnableFuture<Void> {
final Runnable runnable;
AdaptedRunnableAction(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
}
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
} | 0true
| src_main_java_jsr166y_ForkJoinTask.java |
2,407 | Arrays.sort(this.entries, new Comparator<Entry>() {
@Override
public int compare(Entry o1, Entry o2) {
return o2.expectedInsertions - o1.expectedInsertions;
}
}); | 0true
| src_main_java_org_elasticsearch_common_util_BloomFilter.java |
371 | public class GetRepositoriesResponse extends ActionResponse implements Iterable<RepositoryMetaData> {
private ImmutableList<RepositoryMetaData> repositories = ImmutableList.of();
GetRepositoriesResponse() {
}
GetRepositoriesResponse(ImmutableList<RepositoryMetaData> repositories) {
this.repositories = repositories;
}
/**
* List of repositories to return
*
* @return list or repositories
*/
public ImmutableList<RepositoryMetaData> repositories() {
return repositories;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableList.Builder<RepositoryMetaData> repositoryListBuilder = ImmutableList.builder();
for (int j = 0; j < size; j++) {
repositoryListBuilder.add(new RepositoryMetaData(
in.readString(),
in.readString(),
ImmutableSettings.readSettingsFromStream(in))
);
}
repositories = repositoryListBuilder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(repositories.size());
for (RepositoryMetaData repository : repositories) {
out.writeString(repository.name());
out.writeString(repository.type());
ImmutableSettings.writeSettingsToStream(repository.settings(), out);
}
}
/**
* Iterator over the repositories data
*
* @return iterator over the repositories data
*/
@Override
public Iterator<RepositoryMetaData> iterator() {
return repositories.iterator();
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_repositories_get_GetRepositoriesResponse.java |
1,981 | return new Scoping() {
public <V> V acceptVisitor(BindingScopingVisitor<V> visitor) {
return visitor.visitScope(scope);
}
@Override
public Scope getScopeInstance() {
return scope;
}
@Override
public String toString() {
return scope.toString();
}
public void applyTo(ScopedBindingBuilder scopedBindingBuilder) {
scopedBindingBuilder.in(scope);
}
}; | 0true
| src_main_java_org_elasticsearch_common_inject_internal_Scoping.java |
425 | future.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
try {
if (collator != null) {
response = collator.collate(((Map) response).entrySet());
}
} finally {
completableFuture.setResult(response);
trackableJobs.remove(jobId);
}
}
@Override
public void onFailure(Throwable t) {
try {
if (t instanceof ExecutionException
&& t.getCause() instanceof CancellationException) {
t = t.getCause();
}
completableFuture.setResult(t);
} finally {
trackableJobs.remove(jobId);
}
}
}); | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientMapReduceProxy.java |
2,741 | static class Request extends NodesOperationRequest<Request> {
private ShardId shardId;
public Request() {
}
public Request(ShardId shardId, Set<String> nodesIds) {
super(nodesIds.toArray(new String[nodesIds.size()]));
this.shardId = shardId;
}
public Request(ShardId shardId, String... nodesIds) {
super(nodesIds);
this.shardId = shardId;
}
public ShardId shardId() {
return this.shardId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
}
} | 0true
| src_main_java_org_elasticsearch_gateway_local_state_shards_TransportNodesListGatewayStartedShards.java |
886 | public class PromotableCandidateOrderOfferImpl implements PromotableCandidateOrderOffer {
private static final long serialVersionUID = 1L;
protected HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateQualifiersMap = new HashMap<OfferItemCriteria, List<PromotableOrderItem>>();
protected Offer offer;
protected PromotableOrder promotableOrder;
protected Money potentialSavings;
public PromotableCandidateOrderOfferImpl(PromotableOrder promotableOrder, Offer offer) {
assert(offer != null);
assert(promotableOrder != null);
this.promotableOrder = promotableOrder;
this.offer = offer;
calculatePotentialSavings();
}
/**
* Instead of calculating the potential savings, you can specify an override of this value.
* This is currently coded only to work if the promotableOrder's isIncludeOrderAndItemAdjustments flag
* is true.
*
* @param promotableOrder
* @param offer
* @param potentialSavings
*/
public PromotableCandidateOrderOfferImpl(PromotableOrder promotableOrder, Offer offer, Money potentialSavings) {
this(promotableOrder, offer);
if (promotableOrder.isIncludeOrderAndItemAdjustments()) {
this.potentialSavings = potentialSavings;
}
}
@Override
public HashMap<OfferItemCriteria, List<PromotableOrderItem>> getCandidateQualifiersMap() {
return candidateQualifiersMap;
}
protected void calculatePotentialSavings() {
Money amountBeforeAdjustments = promotableOrder.calculateSubtotalWithoutAdjustments();
potentialSavings = BroadleafCurrencyUtils.getMoney(BigDecimal.ZERO, getCurrency());
if (getOffer().getDiscountType().equals(OfferDiscountType.AMOUNT_OFF)) {
potentialSavings = BroadleafCurrencyUtils.getMoney(getOffer().getValue(), getCurrency());
} else if (getOffer().getDiscountType().equals(OfferDiscountType.FIX_PRICE)) {
potentialSavings = amountBeforeAdjustments.subtract(BroadleafCurrencyUtils.getMoney(getOffer().getValue(), getCurrency()));
} else if (getOffer().getDiscountType().equals(OfferDiscountType.PERCENT_OFF)) {
potentialSavings = amountBeforeAdjustments.multiply(getOffer().getValue().divide(new BigDecimal("100")));
}
if (potentialSavings.greaterThan(amountBeforeAdjustments)) {
potentialSavings = amountBeforeAdjustments;
}
}
@Override
public Offer getOffer() {
return this.offer;
}
@Override
public PromotableOrder getPromotableOrder() {
return this.promotableOrder;
}
public BroadleafCurrency getCurrency() {
return promotableOrder.getOrderCurrency();
}
@Override
public Money getPotentialSavings() {
return potentialSavings;
}
@Override
public boolean isCombinable() {
Boolean combinable = offer.isCombinableWithOtherOffers();
return (combinable != null && combinable);
}
@Override
public boolean isTotalitarian() {
Boolean totalitarian = offer.isTotalitarianOffer();
return (totalitarian != null && totalitarian.booleanValue());
}
@Override
public int getPriority() {
return offer.getPriority();
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_discount_domain_PromotableCandidateOrderOfferImpl.java |
2,048 | public class EntryBackupOperation extends KeyBasedMapOperation implements BackupOperation {
private EntryBackupProcessor entryProcessor;
public EntryBackupOperation(String name, Data dataKey, EntryBackupProcessor entryProcessor) {
super(name, dataKey);
this.entryProcessor = entryProcessor;
}
public EntryBackupOperation() {
}
public void innerBeforeRun() {
if (entryProcessor instanceof HazelcastInstanceAware) {
((HazelcastInstanceAware) entryProcessor).setHazelcastInstance(getNodeEngine().getHazelcastInstance());
}
}
public void run() {
Map.Entry<Data, Object> mapEntry = recordStore.getMapEntryForBackup(dataKey);
if (mapEntry.getValue() != null) {
Map.Entry<Object, Object> entry = new AbstractMap.SimpleEntry<Object, Object>(mapService.toObject(dataKey), mapService.toObject(mapEntry.getValue()));
entryProcessor.processBackup(entry);
if (entry.getValue() == null){
recordStore.removeBackup(dataKey);
} else {
recordStore.putBackup(dataKey, entry.getValue());
}
}
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
entryProcessor = in.readObject();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(entryProcessor);
}
@Override
public Object getResponse() {
return true;
}
@Override
public String toString() {
return "EntryBackupOperation{}";
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_map_operation_EntryBackupOperation.java |
2,435 | public class ConcurrentHashMapLong<T> implements ConcurrentMapLong<T> {
private final ConcurrentMap<Long, T> map;
public ConcurrentHashMapLong(ConcurrentMap<Long, T> map) {
this.map = map;
}
@Override
public T get(long key) {
return map.get(key);
}
@Override
public T remove(long key) {
return map.remove(key);
}
@Override
public T put(long key, T value) {
return map.put(key, value);
}
@Override
public T putIfAbsent(long key, T value) {
return map.putIfAbsent(key, value);
}
// MAP DELEGATION
@Override
public boolean isEmpty() {
return map.isEmpty();
}
@Override
public int size() {
return map.size();
}
@Override
public T get(Object key) {
return map.get(key);
}
@Override
public boolean containsKey(Object key) {
return map.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return map.containsValue(value);
}
public T put(Long key, T value) {
return map.put(key, value);
}
public T putIfAbsent(Long key, T value) {
return map.putIfAbsent(key, value);
}
public void putAll(Map<? extends Long, ? extends T> m) {
map.putAll(m);
}
@Override
public T remove(Object key) {
return map.remove(key);
}
@Override
public boolean remove(Object key, Object value) {
return map.remove(key, value);
}
public boolean replace(Long key, T oldValue, T newValue) {
return map.replace(key, oldValue, newValue);
}
public T replace(Long key, T value) {
return map.replace(key, value);
}
@Override
public void clear() {
map.clear();
}
@Override
public Set<Long> keySet() {
return map.keySet();
}
@Override
public Collection<T> values() {
return map.values();
}
@Override
public Set<Entry<Long, T>> entrySet() {
return map.entrySet();
}
@Override
public boolean equals(Object o) {
return map.equals(o);
}
@Override
public int hashCode() {
return map.hashCode();
}
@Override
public String toString() {
return map.toString();
}
} | 0true
| src_main_java_org_elasticsearch_common_util_concurrent_ConcurrentHashMapLong.java |
364 | public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
/**
* Constructs unregister repository request builder
*
* @param clusterAdminClient cluster admin client
*/
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest());
}
/**
* Constructs unregister repository request builder with specified repository name
*
* @param clusterAdminClient cluster adming client
*/
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new DeleteRepositoryRequest(name));
}
/**
* Sets the repository name
*
* @param name the repository name
*/
public DeleteRepositoryRequestBuilder setName(String name) {
request.name(name);
return this;
}
@Override
protected void doExecute(ActionListener<DeleteRepositoryResponse> listener) {
((ClusterAdminClient) client).deleteRepository(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_repositories_delete_DeleteRepositoryRequestBuilder.java |
1,134 | public class OSQLMethodAsDateTime extends OAbstractSQLMethod {
public static final String NAME = "asdatetime";
public OSQLMethodAsDateTime() {
super(NAME);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) throws ParseException {
if (ioResult != null) {
if (ioResult instanceof Number) {
ioResult = new Date(((Number) ioResult).longValue());
} else if (!(ioResult instanceof Date)) {
ioResult = ODatabaseRecordThreadLocal.INSTANCE.get().getStorage().getConfiguration().getDateTimeFormatInstance()
.parse(ioResult.toString());
}
}
return ioResult;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodAsDateTime.java |
1,433 | public class MetaDataUpdateSettingsService extends AbstractComponent implements ClusterStateListener {
private final ClusterService clusterService;
private final AllocationService allocationService;
private final DynamicSettings dynamicSettings;
@Inject
public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, @IndexDynamicSettings DynamicSettings dynamicSettings) {
super(settings);
this.clusterService = clusterService;
this.clusterService.add(this);
this.allocationService = allocationService;
this.dynamicSettings = dynamicSettings;
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
// update an index with number of replicas based on data nodes if possible
if (!event.state().nodes().localNodeMaster()) {
return;
}
Map<Integer, List<String>> nrReplicasChanged = new HashMap<Integer, List<String>>();
// we need to do this each time in case it was changed by update settings
for (final IndexMetaData indexMetaData : event.state().metaData()) {
String autoExpandReplicas = indexMetaData.settings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
if (autoExpandReplicas != null && Booleans.parseBoolean(autoExpandReplicas, true)) { // Booleans only work for false values, just as we want it here
try {
int min;
int max;
try {
min = Integer.parseInt(autoExpandReplicas.substring(0, autoExpandReplicas.indexOf('-')));
String sMax = autoExpandReplicas.substring(autoExpandReplicas.indexOf('-') + 1);
if (sMax.equals("all")) {
max = event.state().nodes().dataNodes().size() - 1;
} else {
max = Integer.parseInt(sMax);
}
} catch (Exception e) {
logger.warn("failed to set [{}], wrong format [{}]", e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, autoExpandReplicas);
continue;
}
int numberOfReplicas = event.state().nodes().dataNodes().size() - 1;
if (numberOfReplicas < min) {
numberOfReplicas = min;
} else if (numberOfReplicas > max) {
numberOfReplicas = max;
}
// same value, nothing to do there
if (numberOfReplicas == indexMetaData.numberOfReplicas()) {
continue;
}
if (numberOfReplicas >= min && numberOfReplicas <= max) {
if (!nrReplicasChanged.containsKey(numberOfReplicas)) {
nrReplicasChanged.put(numberOfReplicas, new ArrayList<String>());
}
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.index());
}
} catch (Exception e) {
logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.index());
}
}
}
if (nrReplicasChanged.size() > 0) {
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
Settings settings = ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(indices.toArray(new String[indices.size()])).settings(settings)
.ackTimeout(TimeValue.timeValueMillis(0)) //no need to wait for ack here
.masterNodeTimeout(TimeValue.timeValueMinutes(10));
updateSettings(updateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
for (String index : indices) {
logger.info("[{}] auto expanded replicas to [{}]", index, fNumberOfReplicas);
}
}
@Override
public void onFailure(Throwable t) {
for (String index : indices) {
logger.warn("[{}] fail to auto expand replicas to [{}]", index, fNumberOfReplicas);
}
}
});
}
}
}
public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) {
ImmutableSettings.Builder updatedSettingsBuilder = ImmutableSettings.settingsBuilder();
for (Map.Entry<String, String> entry : request.settings().getAsMap().entrySet()) {
if (entry.getKey().equals("index")) {
continue;
}
if (!entry.getKey().startsWith("index.")) {
updatedSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
} else {
updatedSettingsBuilder.put(entry.getKey(), entry.getValue());
}
}
// never allow to change the number of shards
for (String key : updatedSettingsBuilder.internalMap().keySet()) {
if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) {
listener.onFailure(new ElasticsearchIllegalArgumentException("can't change the number of shards for an index"));
return;
}
}
final Settings closeSettings = updatedSettingsBuilder.build();
final Set<String> removedSettings = Sets.newHashSet();
final Set<String> errors = Sets.newHashSet();
for (Map.Entry<String, String> setting : updatedSettingsBuilder.internalMap().entrySet()) {
if (!dynamicSettings.hasDynamicSetting(setting.getKey())) {
removedSettings.add(setting.getKey());
} else {
String error = dynamicSettings.validateDynamicSetting(setting.getKey(), setting.getValue());
if (error != null) {
errors.add("[" + setting.getKey() + "] - " + error);
}
}
}
if (!errors.isEmpty()) {
listener.onFailure(new ElasticsearchIllegalArgumentException("can't process the settings: " + errors.toString()));
return;
}
if (!removedSettings.isEmpty()) {
for (String removedSetting : removedSettings) {
updatedSettingsBuilder.remove(removedSetting);
}
}
final Settings openSettings = updatedSettingsBuilder.build();
clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
String[] actualIndices = currentState.metaData().concreteIndices(request.indices());
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
// allow to change any settings to a close index, and only allow dynamic settings to be changed
// on an open index
Set<String> openIndices = Sets.newHashSet();
Set<String> closeIndices = Sets.newHashSet();
for (String index : actualIndices) {
if (currentState.metaData().index(index).state() == IndexMetaData.State.OPEN) {
openIndices.add(index);
} else {
closeIndices.add(index);
}
}
if (!removedSettings.isEmpty() && !openIndices.isEmpty()) {
throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT,
"Can't update non dynamic settings[%s] for open indices[%s]",
removedSettings,
openIndices
));
}
int updatedNumberOfReplicas = openSettings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, -1);
if (updatedNumberOfReplicas != -1) {
routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
metaDataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices);
logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices);
}
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
Boolean updatedReadOnly = openSettings.getAsBoolean(IndexMetaData.SETTING_READ_ONLY, null);
if (updatedReadOnly != null) {
for (String index : actualIndices) {
if (updatedReadOnly) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK);
}
}
}
Boolean updateMetaDataBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, null);
if (updateMetaDataBlock != null) {
for (String index : actualIndices) {
if (updateMetaDataBlock) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_METADATA_BLOCK);
}
}
}
Boolean updateWriteBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, null);
if (updateWriteBlock != null) {
for (String index : actualIndices) {
if (updateWriteBlock) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_WRITE_BLOCK);
}
}
}
Boolean updateReadBlock = openSettings.getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, null);
if (updateReadBlock != null) {
for (String index : actualIndices) {
if (updateReadBlock) {
blocks.addIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
} else {
blocks.removeIndexBlock(index, IndexMetaData.INDEX_READ_BLOCK);
}
}
}
if (!openIndices.isEmpty()) {
String[] indices = openIndices.toArray(new String[openIndices.size()]);
metaDataBuilder.updateSettings(openSettings, indices);
}
if (!closeIndices.isEmpty()) {
String[] indices = closeIndices.toArray(new String[closeIndices.size()]);
metaDataBuilder.updateSettings(closeSettings, indices);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder).blocks(blocks).build();
// now, reroute in case things change that require it (like number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState);
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
return updatedState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaDataUpdateSettingsService.java |
401 | index.restore(new HashMap<String, Map<String, List<IndexEntry>>>() {{
put(store1, new HashMap<String, List<IndexEntry>>() {{
put("restore-doc1", new ArrayList<IndexEntry>() {{
add(new IndexEntry(NAME, "first-restored"));
add(new IndexEntry(WEIGHT, 7.0d));
add(new IndexEntry(TIME, 4L));
}});
}});
put(store2, new HashMap<String, List<IndexEntry>>() {{
put("restore-doc1", new ArrayList<IndexEntry>() {{
add(new IndexEntry(NAME, "first-in-second-store"));
add(new IndexEntry(WEIGHT, 4.0d));
add(new IndexEntry(TIME, 5L));
}});
}});
}}, indexRetriever, tx); | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_diskstorage_indexing_IndexProviderTest.java |
1,029 | @SuppressWarnings("unchecked")
public class OCommandExecutorSQLCreateCluster extends OCommandExecutorSQLAbstract implements OCommandDistributedReplicateRequest {
public static final String KEYWORD_CREATE = "CREATE";
public static final String KEYWORD_CLUSTER = "CLUSTER";
public static final String KEYWORD_ID = "ID";
public static final String KEYWORD_DATASEGMENT = "DATASEGMENT";
public static final String KEYWORD_LOCATION = "LOCATION";
public static final String KEYWORD_POSITION = "POSITION";
private String clusterName;
private String clusterType;
private int requestedId = -1;
private String dataSegmentName = "default";
private String location = "default";
private String position = "append";
public OCommandExecutorSQLCreateCluster parse(final OCommandRequest iRequest) {
final ODatabaseRecord database = getDatabase();
init((OCommandRequestText) iRequest);
parserRequiredKeyword(KEYWORD_CREATE);
parserRequiredKeyword(KEYWORD_CLUSTER);
clusterName = parserRequiredWord(false);
if (!clusterName.isEmpty() && Character.isDigit(clusterName.charAt(0)))
throw new IllegalArgumentException("Cluster name cannot begin with a digit");
clusterType = parserRequiredWord(false);
String temp = parseOptionalWord(true);
while (temp != null) {
if (temp.equals(KEYWORD_ID)) {
requestedId = Integer.parseInt(parserRequiredWord(false));
} else if (temp.equals(KEYWORD_DATASEGMENT)) {
dataSegmentName = parserRequiredWord(false);
} else if (temp.equals(KEYWORD_LOCATION)) {
location = parserRequiredWord(false);
} else if (temp.equals(KEYWORD_POSITION)) {
position = parserRequiredWord(false);
}
temp = parseOptionalWord(true);
if (parserIsEnded())
break;
}
final int clusterId = database.getStorage().getClusterIdByName(clusterName);
if (clusterId > -1)
throw new OCommandSQLParsingException("Cluster '" + clusterName + "' already exists");
if (!(database.getStorage() instanceof OLocalPaginatedStorage)) {
final int dataId = database.getStorage().getDataSegmentIdByName(dataSegmentName);
if (dataId == -1)
throw new OCommandSQLParsingException("Data segment '" + dataSegmentName + "' does not exists");
}
if (!Orient.instance().getClusterFactory().isSupported(clusterType))
throw new OCommandSQLParsingException("Cluster type '" + clusterType + "' is not supported");
return this;
}
/**
* Execute the CREATE CLUSTER.
*/
public Object execute(final Map<Object, Object> iArgs) {
if (clusterName == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final ODatabaseRecord database = getDatabase();
if (requestedId == -1) {
return database.addCluster(clusterType, clusterName, location, dataSegmentName);
} else {
return database.addCluster(clusterType, clusterName, requestedId, location, dataSegmentName);
}
}
@Override
public String getSyntax() {
return "CREATE CLUSTER <name> <type> [DATASEGMENT <data-segment>|default] [LOCATION <path>|default] [POSITION <position>|append]";
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLCreateCluster.java |
136 | final class ReadLockView implements Lock {
public void lock() { readLock(); }
public void lockInterruptibly() throws InterruptedException {
readLockInterruptibly();
}
public boolean tryLock() { return tryReadLock() != 0L; }
public boolean tryLock(long time, TimeUnit unit)
throws InterruptedException {
return tryReadLock(time, unit) != 0L;
}
public void unlock() { unstampedUnlockRead(); }
public Condition newCondition() {
throw new UnsupportedOperationException();
}
} | 0true
| src_main_java_jsr166e_StampedLock.java |
1,200 | @Service("blSecurePaymentInfoService")
public class SecurePaymentInfoServiceImpl implements SecurePaymentInfoService {
@Resource(name = "blSecurePaymentInfoDao")
protected SecurePaymentInfoDao securePaymentInfoDao;
public Referenced save(Referenced securePaymentInfo) {
return securePaymentInfoDao.save(securePaymentInfo);
}
public Referenced create(PaymentInfoType paymentInfoType) {
if (paymentInfoType.equals(PaymentInfoType.CREDIT_CARD)) {
CreditCardPaymentInfo ccinfo = securePaymentInfoDao.createCreditCardPaymentInfo();
return ccinfo;
} else if (paymentInfoType.equals(PaymentInfoType.BANK_ACCOUNT)) {
BankAccountPaymentInfo bankinfo = securePaymentInfoDao.createBankAccountPaymentInfo();
return bankinfo;
} else if (paymentInfoType.equals(PaymentInfoType.GIFT_CARD)) {
GiftCardPaymentInfo gcinfo = securePaymentInfoDao.createGiftCardPaymentInfo();
return gcinfo;
}
return null;
}
public Referenced findSecurePaymentInfo(String referenceNumber, PaymentInfoType paymentInfoType) throws WorkflowException {
if (paymentInfoType == PaymentInfoType.CREDIT_CARD) {
CreditCardPaymentInfo ccinfo = findCreditCardInfo(referenceNumber);
if (ccinfo == null) {
throw new WorkflowException("No credit card info associated with credit card payment type with reference number: " + referenceNumber);
}
return ccinfo;
} else if (paymentInfoType == PaymentInfoType.BANK_ACCOUNT) {
BankAccountPaymentInfo bankinfo = findBankAccountInfo(referenceNumber);
if (bankinfo == null) {
throw new WorkflowException("No bank account info associated with bank account payment type with reference number: " + referenceNumber);
}
return bankinfo;
} else if (paymentInfoType == PaymentInfoType.GIFT_CARD) {
GiftCardPaymentInfo gcinfo = findGiftCardInfo(referenceNumber);
if (gcinfo == null) {
throw new WorkflowException("No bank account info associated with gift card payment type with reference number: " + referenceNumber);
}
return gcinfo;
}
return null;
}
public void findAndRemoveSecurePaymentInfo(String referenceNumber, PaymentInfoType paymentInfoType) throws WorkflowException {
Referenced referenced = findSecurePaymentInfo(referenceNumber, paymentInfoType);
if (referenced != null) {
remove(referenced);
}
}
public void remove(Referenced securePaymentInfo) {
securePaymentInfoDao.delete(securePaymentInfo);
}
protected BankAccountPaymentInfo findBankAccountInfo(String referenceNumber) {
return securePaymentInfoDao.findBankAccountInfo(referenceNumber);
}
protected CreditCardPaymentInfo findCreditCardInfo(String referenceNumber) {
return securePaymentInfoDao.findCreditCardInfo(referenceNumber);
}
protected GiftCardPaymentInfo findGiftCardInfo(String referenceNumber) {
return securePaymentInfoDao.findGiftCardInfo(referenceNumber);
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_SecurePaymentInfoServiceImpl.java |
598 | interface ValuesResultListener {
boolean addResult(OIdentifiable identifiable);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_OIndexEngine.java |
979 | public class OObjectSerializerHelperManager {
private static final OObjectSerializerHelperManager instance = new OObjectSerializerHelperManager();
private OObjectSerializerHelperInterface serializerHelper = new OObjectSerializerHelperDocument();
public static OObjectSerializerHelperManager getInstance() {
return instance;
}
public ODocument toStream(final Object iPojo, final ODocument iRecord, final OEntityManager iEntityManager,
final OClass schemaClass, final OUserObject2RecordHandler iObj2RecHandler, final ODatabaseObject db,
final boolean iSaveOnlyDirty) {
return serializerHelper.toStream(iPojo, iRecord, iEntityManager, schemaClass, iObj2RecHandler, db, iSaveOnlyDirty);
}
public String getDocumentBoundField(final Class<?> iClass) {
return serializerHelper.getDocumentBoundField(iClass);
}
public Object getFieldValue(final Object iPojo, final String iProperty) {
return serializerHelper.getFieldValue(iPojo, iProperty);
}
public void invokeCallback(final Object iPojo, final ODocument iDocument, final Class<?> iAnnotation) {
serializerHelper.invokeCallback(iPojo, iDocument, iAnnotation);
}
public void registerHelper(OObjectSerializerHelperInterface iSerializerHelper) {
serializerHelper = iSerializerHelper;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_serializer_object_OObjectSerializerHelperManager.java |
335 | public interface ODatabaseComplex<T extends Object> extends ODatabase, OUserObject2RecordHandler {
public enum OPERATION_MODE {
SYNCHRONOUS, ASYNCHRONOUS, ASYNCHRONOUS_NOANSWER
}
/**
* Creates a new entity instance.
*
* @return The new instance.
*/
public <RET extends Object> RET newInstance();
/**
* Returns the Dictionary manual index.
*
* @return ODictionary instance
*/
public ODictionary<T> getDictionary();
/**
* Returns the current user logged into the database.
*
* @see OSecurity
*/
public OUser getUser();
/**
* Set user for current database instance
*/
public void setUser(OUser user);
/**
* Loads the entity and return it.
*
* @param iObject
* The entity to load. If the entity was already loaded it will be reloaded and all the changes will be lost.
* @return
*/
public <RET extends T> RET load(T iObject);
/**
* Loads a record using a fetch plan.
*
* @param iObject
* Record to load
* @param iFetchPlan
* Fetch plan used
* @return The record received
*/
public <RET extends T> RET load(T iObject, String iFetchPlan);
/**
* Loads a record using a fetch plan.
*
* @param iObject
* Record to load
* @param iFetchPlan
* Fetch plan used
* @return The record received
*/
public <RET extends T> RET load(T iObject, String iFetchPlan, boolean iIgnoreCache, boolean loadTombstone);
/**
* Loads a record using a fetch plan.
*
* @param iObject
* Record to load
* @param iFetchPlan
* Fetch plan used
* @param iIgnoreCache
* Ignore cache or use it
* @return The record received
*/
public <RET extends T> RET load(T iObject, String iFetchPlan, boolean iIgnoreCache);
/**
* Force the reloading of the entity.
*
* @param iObject
* The entity to load. If the entity was already loaded it will be reloaded and all the changes will be lost.
* @param iFetchPlan
* Fetch plan used
* @param iIgnoreCache
* Ignore cache or use it
* @return The loaded entity
*/
public <RET extends T> RET reload(final T iObject, String iFetchPlan, boolean iIgnoreCache);
/**
* Loads the entity by the Record ID.
*
* @param iRecordId
* The unique record id of the entity to load.
* @return The loaded entity
*/
public <RET extends T> RET load(ORID iRecordId);
/**
* Loads the entity by the Record ID using a fetch plan.
*
* @param iRecordId
* The unique record id of the entity to load.
* @param iFetchPlan
* Fetch plan used
* @return The loaded entity
*/
public <RET extends T> RET load(ORID iRecordId, String iFetchPlan);
/**
* Loads the entity by the Record ID using a fetch plan and specifying if the cache must be ignored.
*
* @param iRecordId
* The unique record id of the entity to load.
* @param iFetchPlan
* Fetch plan used
* @param iIgnoreCache
* Ignore cache or use it
* @return The loaded entity
*/
public <RET extends T> RET load(ORID iRecordId, String iFetchPlan, boolean iIgnoreCache);
public <RET extends T> RET load(ORID iRecordId, String iFetchPlan, boolean iIgnoreCache, boolean loadTombstone);
/**
* Saves an entity in synchronous mode. If the entity is not dirty, then the operation will be ignored. For custom entity
* implementations assure to set the entity as dirty.
*
* @param iObject
* The entity to save
* @return The saved entity.
*/
public <RET extends T> RET save(T iObject);
/**
* Saves an entity specifying the mode. If the entity is not dirty, then the operation will be ignored. For custom entity
* implementations assure to set the entity as dirty. If the cluster does not exist, an error will be thrown.
*
*
* @param iObject
* The entity to save
* @param iMode
* Mode of save: synchronous (default) or asynchronous
* @param iForceCreate
* Flag that indicates that record should be created. If record with current rid already exists, exception is thrown
* @param iRecordCreatedCallback
* @param iRecordUpdatedCallback
*/
public <RET extends T> RET save(T iObject, OPERATION_MODE iMode, boolean iForceCreate,
ORecordCallback<? extends Number> iRecordCreatedCallback, ORecordCallback<ORecordVersion> iRecordUpdatedCallback);
/**
* Saves an entity in the specified cluster in synchronous mode. If the entity is not dirty, then the operation will be ignored.
* For custom entity implementations assure to set the entity as dirty. If the cluster does not exist, an error will be thrown.
*
* @param iObject
* The entity to save
* @param iClusterName
* Name of the cluster where to save
* @return The saved entity.
*/
public <RET extends T> RET save(T iObject, String iClusterName);
public boolean updatedReplica(T iObject);
/**
* Saves an entity in the specified cluster specifying the mode. If the entity is not dirty, then the operation will be ignored.
* For custom entity implementations assure to set the entity as dirty. If the cluster does not exist, an error will be thrown.
*
*
* @param iObject
* The entity to save
* @param iClusterName
* Name of the cluster where to save
* @param iMode
* Mode of save: synchronous (default) or asynchronous
* @param iForceCreate
* Flag that indicates that record should be created. If record with current rid already exists, exception is thrown
* @param iRecordCreatedCallback
* @param iRecordUpdatedCallback
*/
public <RET extends T> RET save(T iObject, String iClusterName, OPERATION_MODE iMode, boolean iForceCreate,
ORecordCallback<? extends Number> iRecordCreatedCallback, ORecordCallback<ORecordVersion> iRecordUpdatedCallback);
/**
* Deletes an entity from the database in synchronous mode.
*
* @param iObject
* The entity to delete.
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODatabaseComplex<T> delete(T iObject);
/**
* Deletes the entity with the received RID from the database.
*
* @param iRID
* The RecordID to delete.
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODatabaseComplex<T> delete(ORID iRID);
/**
* Deletes the entity with the received RID from the database.
*
* @param iRID
* The RecordID to delete.
* @param iVersion
* for MVCC
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODatabaseComplex<T> delete(ORID iRID, ORecordVersion iVersion);
public ODatabaseComplex<T> cleanOutRecord(ORID rid, ORecordVersion version);
/**
* Return active transaction. Cannot be null. If no transaction is active, then a OTransactionNoTx instance is returned.
*
* @return OTransaction implementation
*/
public OTransaction getTransaction();
/**
* Begins a new transaction. By default the type is OPTIMISTIC. If a previous transaction was started it will be rollbacked and
* closed before to start a new one. A transaction once begun has to be closed by calling the {@link #commit()} or
* {@link #rollback()}.
*
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODatabaseComplex<T> begin();
/**
* Begins a new transaction specifying the transaction type. If a previous transaction was started it will be rollbacked and
* closed before to start a new one. A transaction once begun has to be closed by calling the {@link #commit()} or
* {@link #rollback()}.
*
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODatabaseComplex<T> begin(TXTYPE iStatus);
/**
* Attaches a transaction as current.
*
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODatabaseComplex<T> begin(OTransaction iTx) throws OTransactionException;
/**
* Commits the current transaction. The approach is all or nothing. All changes will be permanent following the storage type. If
* the operation succeed all the entities changed inside the transaction context will be effectives. If the operation fails, all
* the changed entities will be restored in the datastore. Memory instances are not guaranteed to being restored as well.
*
* @return
*/
public ODatabaseComplex<T> commit() throws OTransactionException;
/**
* Aborts the current running transaction. All the pending changed entities will be restored in the datastore. Memory instances
* are not guaranteed to being restored as well.
*
* @return
*/
public ODatabaseComplex<T> rollback() throws OTransactionException;
/**
* Execute a query against the database.
*
* @param iCommand
* Query command
* @param iArgs
* Optional parameters to bind to the query
* @return List of POJOs
*/
public <RET extends List<?>> RET query(final OQuery<?> iCommand, final Object... iArgs);
/**
* Execute a command against the database. A command can be a SQL statement or a Procedure. If the OStorage used is remote
* (OStorageRemote) then the command will be executed remotely and the result returned back to the calling client.
*
* @param iCommand
* Command request to execute.
* @return The same Command request received as parameter.
* @see OStorageRemote
*/
public <RET extends OCommandRequest> RET command(OCommandRequest iCommand);
/**
* Return the OMetadata instance. Cannot be null.
*
* @return The OMetadata instance.
*/
public OMetadata getMetadata();
/**
* Returns the database owner. Used in wrapped instances to know the up level ODatabase instance.
*
* @return Returns the database owner.
*/
public ODatabaseComplex<?> getDatabaseOwner();
/**
* Internal. Sets the database owner.
*/
public ODatabaseComplex<?> setDatabaseOwner(ODatabaseComplex<?> iOwner);
/**
* Return the underlying database. Used in wrapper instances to know the down level ODatabase instance.
*
* @return The underlying ODatabase implementation.
*/
public <DB extends ODatabase> DB getUnderlying();
/**
* Internal method. Don't call it directly unless you're building an internal component.
*/
public void setInternal(ATTRIBUTES attribute, Object iValue);
/**
* Registers a hook to listen all events for Records.
*
* @param iHookImpl
* ORecordHook implementation
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public <DB extends ODatabaseComplex<?>> DB registerHook(ORecordHook iHookImpl);
public <DB extends ODatabaseComplex<?>> DB registerHook(final ORecordHook iHookImpl, HOOK_POSITION iPosition);
/**
* Retrieves all the registered hooks.
*
* @return A not-null unmodifiable set of ORecordHook instances. If there are no hooks registered, the Set is empty.
*/
public Set<ORecordHook> getHooks();
/**
* Unregisters a previously registered hook.
*
* @param iHookImpl
* ORecordHook implementation
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public <DB extends ODatabaseComplex<?>> DB unregisterHook(ORecordHook iHookImpl);
/**
* Invokes the callback on all the configured hooks.
*
* @param iObject
* The object passed change based on the Database implementation: records for {@link ODatabaseRecord} implementations and
* POJO for {@link ODatabaseObject} implementations.
* @return True if the input record is changed, otherwise false
*/
public RESULT callbackHooks(TYPE iType, OIdentifiable iObject);
/**
* Returns if the Multi Version Concurrency Control is enabled or not. If enabled the version of the record is checked before each
* update and delete against the records.
*
* @return true if enabled, otherwise false
* @see ODatabaseRecord#setMVCC(boolean)
*/
public boolean isMVCC();
/**
* Enables or disables the Multi-Version Concurrency Control. If enabled the version of the record is checked before each update
* and delete against the records.
*
* @param iValue
* @see ODatabaseRecord#isMVCC()
* @return The Database instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public <DB extends ODatabaseComplex<?>> DB setMVCC(boolean iValue);
public String getType();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_ODatabaseComplex.java |
3,967 | public class TermsFilterParser implements FilterParser {
public static final String NAME = "terms";
private IndicesTermsFilterCache termsFilterCache;
@Inject
public TermsFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME, "in"};
}
@Inject(optional = true)
public void setIndicesTermsFilterCache(IndicesTermsFilterCache termsFilterCache) {
this.termsFilterCache = termsFilterCache;
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
MapperService.SmartNameFieldMappers smartNameFieldMappers;
Boolean cache = null;
String filterName = null;
String currentFieldName = null;
String lookupIndex = parseContext.index().name();
String lookupType = null;
String lookupId = null;
String lookupPath = null;
String lookupRouting = null;
boolean lookupCache = true;
CacheKeyFilter.Key cacheKey = null;
XContentParser.Token token;
String execution = "plain";
List<Object> terms = Lists.newArrayList();
String fieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Object value = parser.objectBytes();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No value specified for terms filter");
}
terms.add(value);
}
} else if (token == XContentParser.Token.START_OBJECT) {
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("index".equals(currentFieldName)) {
lookupIndex = parser.text();
} else if ("type".equals(currentFieldName)) {
lookupType = parser.text();
} else if ("id".equals(currentFieldName)) {
lookupId = parser.text();
} else if ("path".equals(currentFieldName)) {
lookupPath = parser.text();
} else if ("routing".equals(currentFieldName)) {
lookupRouting = parser.textOrNull();
} else if ("cache".equals(currentFieldName)) {
lookupCache = parser.booleanValue();
} else {
throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "] within lookup element");
}
}
}
if (lookupType == null) {
throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the type");
}
if (lookupId == null) {
throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the id");
}
if (lookupPath == null) {
throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the path");
}
} else if (token.isValue()) {
if ("execution".equals(currentFieldName)) {
execution = parser.text();
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
cache = parser.booleanValue();
} else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
cacheKey = new CacheKeyFilter.Key(parser.text());
} else {
throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "]");
}
}
}
if (fieldName == null) {
throw new QueryParsingException(parseContext.index(), "terms filter requires a field name, followed by array of terms");
}
FieldMapper fieldMapper = null;
smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
String[] previousTypes = null;
if (smartNameFieldMappers != null) {
if (smartNameFieldMappers.hasMapper()) {
fieldMapper = smartNameFieldMappers.mapper();
fieldName = fieldMapper.names().indexName();
}
// if we have a doc mapper, its explicit type, mark it
if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
}
}
if (lookupId != null) {
// if there are no mappings, then nothing has been indexing yet against this shard, so we can return
// no match (but not cached!), since the Terms Lookup relies on the fact that there are mappings...
if (fieldMapper == null) {
return Queries.MATCH_NO_FILTER;
}
// external lookup, use it
TermsLookup termsLookup = new TermsLookup(fieldMapper, lookupIndex, lookupType, lookupId, lookupRouting, lookupPath, parseContext);
Filter filter = termsFilterCache.termsFilter(termsLookup, lookupCache, cacheKey);
if (filter == null) {
return null;
}
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
return filter;
}
if (terms.isEmpty()) {
return Queries.MATCH_NO_FILTER;
}
try {
Filter filter;
if ("plain".equals(execution)) {
if (fieldMapper != null) {
filter = fieldMapper.termsFilter(terms, parseContext);
} else {
BytesRef[] filterValues = new BytesRef[terms.size()];
for (int i = 0; i < filterValues.length; i++) {
filterValues[i] = BytesRefs.toBytesRef(terms.get(i));
}
filter = new TermsFilter(fieldName, filterValues);
}
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("fielddata".equals(execution)) {
// if there are no mappings, then nothing has been indexing yet against this shard, so we can return
// no match (but not cached!), since the FieldDataTermsFilter relies on a mapping...
if (fieldMapper == null) {
return Queries.MATCH_NO_FILTER;
}
filter = fieldMapper.termsFilter(parseContext.fieldData(), terms, parseContext);
if (cache != null && cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("bool".equals(execution)) {
XBooleanFilter boolFiler = new XBooleanFilter();
if (fieldMapper != null) {
for (Object term : terms) {
boolFiler.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null), BooleanClause.Occur.SHOULD);
}
} else {
for (Object term : terms) {
boolFiler.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null), BooleanClause.Occur.SHOULD);
}
}
filter = boolFiler;
// only cache if explicitly told to, since we cache inner filters
if (cache != null && cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("bool_nocache".equals(execution)) {
XBooleanFilter boolFiler = new XBooleanFilter();
if (fieldMapper != null) {
for (Object term : terms) {
boolFiler.add(fieldMapper.termFilter(term, parseContext), BooleanClause.Occur.SHOULD);
}
} else {
for (Object term : terms) {
boolFiler.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), BooleanClause.Occur.SHOULD);
}
}
filter = boolFiler;
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("and".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (Object term : terms) {
filters.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null));
}
} else {
for (Object term : terms) {
filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null));
}
}
filter = new AndFilter(filters);
// only cache if explicitly told to, since we cache inner filters
if (cache != null && cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("and_nocache".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (Object term : terms) {
filters.add(fieldMapper.termFilter(term, parseContext));
}
} else {
for (Object term : terms) {
filters.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))));
}
}
filter = new AndFilter(filters);
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("or".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (Object term : terms) {
filters.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null));
}
} else {
for (Object term : terms) {
filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null));
}
}
filter = new OrFilter(filters);
// only cache if explicitly told to, since we cache inner filters
if (cache != null && cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else if ("or_nocache".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (Object term : terms) {
filters.add(fieldMapper.termFilter(term, parseContext));
}
} else {
for (Object term : terms) {
filters.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))));
}
}
filter = new OrFilter(filters);
// cache the whole filter by default, or if explicitly told to
if (cache == null || cache) {
filter = parseContext.cacheFilter(filter, cacheKey);
}
} else {
throw new QueryParsingException(parseContext.index(), "terms filter execution value [" + execution + "] not supported");
}
filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);
if (filterName != null) {
parseContext.addNamedFilter(filterName, filter);
}
return filter;
} finally {
if (smartNameFieldMappers != null && smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
QueryParseContext.setTypes(previousTypes);
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_TermsFilterParser.java |
382 | @Service("blLocaleService")
public class LocaleServiceImpl implements LocaleService {
private static final Log LOG = LogFactory.getLog(LocaleServiceImpl.class);
@Resource(name="blLocaleDao")
protected LocaleDao localeDao;
@Override
public Locale findLocaleByCode(String localeCode) {
return localeDao.findLocaleByCode(localeCode);
}
@Override
public Locale findDefaultLocale() {
return localeDao.findDefaultLocale();
}
@Override
public List<Locale> findAllLocales() {
return localeDao.findAllLocales();
}
@Override
@Transactional("blTransactionManager")
public Locale save(Locale locale) {
return localeDao.save(locale);
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_locale_service_LocaleServiceImpl.java |
3,263 | public class ReplicatedMapPermission
extends InstancePermission {
private static final int PUT = 0x4;
private static final int REMOVE = 0x8;
private static final int READ = 0x16;
private static final int LISTEN = 0x32;
private static final int LOCK = 0x64;
private static final int INDEX = 0x128;
private static final int INTERCEPT = 0x256;
private static final int ALL = CREATE | DESTROY | PUT | REMOVE | READ | LISTEN | LOCK | INDEX | INTERCEPT;
public ReplicatedMapPermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
} else if (ActionConstants.ACTION_PUT.equals(action)) {
mask |= PUT;
} else if (ActionConstants.ACTION_REMOVE.equals(action)) {
mask |= REMOVE;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
} else if (ActionConstants.ACTION_LISTEN.equals(action)) {
mask |= LISTEN;
} else if (ActionConstants.ACTION_LOCK.equals(action)) {
mask |= LOCK;
} else if (ActionConstants.ACTION_INDEX.equals(action)) {
mask |= INDEX;
} else if (ActionConstants.ACTION_INTERCEPT.equals(action)) {
mask |= INTERCEPT;
}
}
return mask;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_security_permission_ReplicatedMapPermission.java |
1,599 | public class Console {
private static final String HISTORY_FILE = ".gremlin_titan_hadoop_history";
private static final String STANDARD_INPUT_PROMPT = "gremlin> ";
private static final String STANDARD_RESULT_PROMPT = "==>";
/*static {
try {
System.setProperty("log4j.configuration", "./resources" + File.separatorChar + "log4j.properties");
} catch (Exception e) {
}
}*/
public Console(final IO io, final String inputPrompt, final String resultPrompt) {
io.out.println();
io.out.println(" \\,,,/");
io.out.println(" (o o)");
io.out.println("-----oOOo-(_)-oOOo-----");
final Groovysh groovy = new Groovysh();
groovy.setResultHook(new NullResultHookClosure(groovy));
for (final String imps : Imports.getImports()) {
groovy.execute("import " + imps);
}
for (final String evs : Imports.getEvaluates()) {
groovy.execute(evs);
}
groovy.setResultHook(new ResultHookClosure(groovy, io, resultPrompt));
groovy.setHistory(new History());
final InteractiveShellRunner runner = new InteractiveShellRunner(groovy, new PromptClosure(groovy, inputPrompt));
runner.setErrorHandler(new ErrorHookClosure(runner, io));
try {
runner.setHistory(new History(new File(System.getProperty("user.home") + "/" + HISTORY_FILE)));
} catch (IOException e) {
io.err.println("Unable to create history file: " + HISTORY_FILE);
}
Gremlin.load();
HadoopGremlin.load();
try {
runner.run();
} catch (Error e) {
//System.err.println(e.getMessage());
}
System.exit(0);
}
public Console() {
this(new IO(System.in, System.out, System.err), STANDARD_INPUT_PROMPT, STANDARD_RESULT_PROMPT);
}
public static void main(final String[] args) {
new Console();
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_tinkerpop_gremlin_Console.java |
1,254 | public abstract class FaunusElement extends LifeCycleElement implements InternalElement, Comparable<FaunusElement> {
protected static final Predicate<FaunusProperty> FILTER_DELETED_PROPERTIES = new Predicate<FaunusProperty>() {
@Override
public boolean apply(@Nullable FaunusProperty p) {
return !p.isRemoved();
}
};
protected static final Predicate<StandardFaunusEdge> FILTER_DELETED_EDGES = new Predicate<StandardFaunusEdge>() {
@Override
public boolean apply(@Nullable StandardFaunusEdge e) {
return !e.isRemoved();
}
};
private static final Logger log =
LoggerFactory.getLogger(FaunusElement.class);
public static final long NO_ID = -1;
static final SetMultimap<FaunusRelationType, FaunusRelation> EMPTY_ADJACENCY = ImmutableSetMultimap.of();
protected long id;
protected SetMultimap<FaunusRelationType, FaunusRelation> outAdjacency = EMPTY_ADJACENCY;
protected SetMultimap<FaunusRelationType, FaunusRelation> inAdjacency = EMPTY_ADJACENCY;
public FaunusElement(final long id) {
this.id = id;
}
public abstract FaunusSchemaManager getTypeManager();
@Override
public InternalElement it() {
return this;
}
@Override
public StandardTitanTx tx() {
throw new UnsupportedOperationException();
}
@Override
public void remove() throws UnsupportedOperationException {
lifecycle = ElementLifeCycle.Removed;
throw new UnsupportedOperationException();
}
@Override
public Object getId() {
return this.id;
}
@Override
public long getLongId() {
return this.id;
}
@Override
public boolean hasId() {
return id>=0;
}
@Override
public void setId(final long id) {
Preconditions.checkArgument(id>=0);
this.id = id;
}
void updateSchema(FaunusSerializer.Schema schema) {
schema.addAll(inAdjacency.keySet());
schema.addAll(outAdjacency.keySet());
}
@Override
public boolean isHidden() {
return false;
}
public boolean isModified() {
if (super.isModified()) return true;
if (!(this instanceof FaunusVertex)) return false;
for (Direction dir : Direction.proper) {
for (FaunusRelation r : getAdjacency(dir).values()) {
if (r.isModified()) return true;
}
}
return false;
}
//##################################
// General Relation Handling
//##################################
protected Multiplicity getAdjustedMultiplicity(FaunusRelationType type) {
if (this instanceof FaunusRelation) {
return Multiplicity.MANY2ONE;
} return type.getMultiplicity();
}
SetMultimap<FaunusRelationType, FaunusRelation> getAdjacency(Direction dir) {
assert dir==Direction.IN || dir==Direction.OUT;
if (dir==Direction.IN) return inAdjacency;
else return outAdjacency;
}
protected void initializeAdjacency(Direction dir) {
if ((dir==Direction.OUT || dir==Direction.BOTH) && this.outAdjacency == EMPTY_ADJACENCY)
outAdjacency = HashMultimap.create();
if ((dir==Direction.IN || dir==Direction.BOTH) && this.inAdjacency == EMPTY_ADJACENCY)
inAdjacency = HashMultimap.create();
}
protected void setRelation(final FaunusRelation relation) {
int killedRels = 0;
final Iterator<FaunusRelation> rels = outAdjacency.get(relation.getType()).iterator();
while (rels.hasNext()) {
FaunusRelation r = rels.next();
if (r.isNew()) rels.remove();
r.updateLifeCycle(ElementLifeCycle.Event.REMOVED);
updateLifeCycle(ElementLifeCycle.Event.REMOVED_RELATION);
killedRels++;
}
final Multiplicity adjMulti = getAdjustedMultiplicity(relation.getType());
if (adjMulti != Multiplicity.MANY2ONE && 0 < killedRels) {
// Calling setRelation on a multi-valued type will delete any
// existing relations of that type, no matter how many -- log this
// behavior and suggest addRelation to suppress the warning when
// using a multi-valued type
log.info( "setRelation deleted {} relations of type {} with multiplicity {}; " +
"use addRelation instead of setRelation to avoid deletion",
killedRels, relation.getType(), adjMulti);
}
addRelation(relation);
}
protected FaunusRelation addRelation(final FaunusRelation relation) {
Preconditions.checkNotNull(relation);
FaunusRelation old = null;
for (Direction dir : Direction.proper) {
//Determine applicable directions
if (relation.isProperty() && dir==Direction.IN) {
continue;
} else if (relation.isEdge()) {
FaunusEdge edge = (FaunusEdge)relation;
if (edge.getEdgeLabel().isUnidirected()) {
if (dir==Direction.IN) continue;
} else if (!edge.getVertex(dir).equals(this)) {
continue;
}
}
initializeAdjacency(dir);
SetMultimap<FaunusRelationType, FaunusRelation> adjacency = getAdjacency(dir);
if ((this instanceof FaunusVertex) && adjacency.containsEntry(relation.getType(), relation)) {
//First, check if this relation already exists; if so, consolidate
old = Iterables.getOnlyElement(Iterables.filter(adjacency.get(relation.getType()),
new Predicate<FaunusRelation>() {
@Override
public boolean apply(@Nullable FaunusRelation rel) {
return relation.equals(rel);
}
}));
if (relation.isNew() && old.isRemoved()) {
old.setLifeCycle(ElementLifeCycle.Loaded);
updateLifeCycle(ElementLifeCycle.Event.ADDED_RELATION);
} else if (relation.isLoaded() && old.isNew()) {
old.setLifeCycle(ElementLifeCycle.Loaded);
}
} else {
//Verify multiplicity constraint
switch(relation.getType().getMultiplicity()) {
case MANY2ONE:
if (dir==Direction.OUT)
ensureUniqueness(relation.getType(),adjacency);
break;
case ONE2MANY:
if (dir==Direction.IN)
ensureUniqueness(relation.getType(),adjacency);
break;
case ONE2ONE:
ensureUniqueness(relation.getType(),adjacency);
break;
case SIMPLE:
for (FaunusRelation rel : adjacency.get(relation.getType())) {
if (rel.isRemoved()) continue;
if (relation.isEdge()) {
FaunusEdge e1 = (FaunusEdge)relation, e2 = (FaunusEdge)rel;
if (e1.getVertex(Direction.OUT).equals(e2.getVertex(Direction.OUT)) &&
e1.getVertex(Direction.IN).equals(e2.getVertex(Direction.IN))) {
throw new IllegalArgumentException("A relation already exists which" +
"violates the multiplicity constraint: " + relation.getType().getMultiplicity());
}
} else {
FaunusProperty p1 = (FaunusProperty)relation, p2 = (FaunusProperty)rel;
if (p1.getValue().equals(p2.getValue())) {
throw new IllegalArgumentException("A relation already exists which" +
"violates the multiplicity constraint: " + relation.getType().getMultiplicity());
}
}
}
break;
case MULTI: //Nothing to check
break;
default: throw new AssertionError();
}
adjacency.put(relation.getType(), relation);
updateLifeCycle(ElementLifeCycle.Event.ADDED_RELATION);
log.trace("Added relation {} to {}", relation, this);
}
}
if (old!=null) return old;
else return relation;
}
private static void ensureUniqueness(FaunusRelationType type, SetMultimap<FaunusRelationType, FaunusRelation> adjacency) {
for (FaunusRelation rel : adjacency.get(type)) {
if (!rel.isRemoved()) throw new IllegalArgumentException("A relation already exists which " +
"violates the multiplicity constraint: " + type.getMultiplicity() + " on type " + type);
}
}
public abstract FaunusVertexQuery query();
//##################################
// Property Handling
//##################################
public void setProperty(EdgeLabel label, TitanVertex vertex) {
setProperty((FaunusRelationType)label,vertex);
}
@Override
public void setProperty(PropertyKey key, Object value) {
setProperty((FaunusRelationType)key,value);
}
@Override
public void setProperty(final String key, final Object value) {
FaunusRelationType rt = getTypeManager().getRelationType(key);
if (rt==null) rt = getTypeManager().getOrCreatePropertyKey(key);
setProperty(rt,value);
}
public abstract void setProperty(final FaunusRelationType type, final Object value);
@Override
public <T> T removeProperty(final String key) {
FaunusRelationType rt = getTypeManager().getRelationType(key);
if (rt==null) return null;
return removeProperty(rt);
}
@Override
public <O> O removeProperty(RelationType type) {
if (type.isEdgeLabel() && !(this instanceof FaunusVertex)) throw new IllegalArgumentException("Provided argument" +
"identifies an edge label. Use edge methods to remove those: " + type);
if (outAdjacency.isEmpty()) return null;
FaunusRelationType rtype = (FaunusRelationType)type;
final List<Object> removed = Lists.newArrayList();
final Iterator<FaunusRelation> rels = outAdjacency.get(rtype).iterator();
while (rels.hasNext()) {
FaunusRelation r = rels.next();
if (!r.isRemoved()) {
if (r.isProperty()) removed.add(((FaunusProperty)r).getValue());
else removed.add(((FaunusEdge)r).getVertex(Direction.IN));
}
if (r.isNew()) rels.remove();
r.updateLifeCycle(ElementLifeCycle.Event.REMOVED);
updateLifeCycle(ElementLifeCycle.Event.REMOVED_RELATION);
}
if (removed.isEmpty()) return null;
else if (getAdjustedMultiplicity(rtype)==Multiplicity.MANY2ONE) return (O)removed.iterator().next();
else return (O) removed;
}
public TitanVertex getProperty(EdgeLabel label) {
Preconditions.checkArgument(label!=null);
Preconditions.checkArgument(!(this instanceof FaunusVertex),"Use getEdges() to query for edges on a vertex");
return Iterables.getOnlyElement(query().type(label).titanEdges()).getVertex(Direction.IN);
}
@Override
public <T> T getProperty(PropertyKey key) {
FaunusPropertyKey type = (FaunusPropertyKey)key;
Iterator<TitanProperty> properties = query().type(type).properties().iterator();
if (type.getCardinality()==Cardinality.SINGLE) {
if (properties.hasNext()) return properties.next().getValue();
else return (T)null;
}
List result = Lists.newArrayList();
while (properties.hasNext()) result.add(properties.next().getValue());
return (T)result;
}
@Override
public <T> T getProperty(final String key) {
FaunusRelationType rt = getTypeManager().getRelationType(key);
if (rt==null) return null;
if (rt.isPropertyKey()) return getProperty((FaunusPropertyKey)rt);
else return (T)getProperty((FaunusEdgeLabel)rt);
}
@Override
public Set<String> getPropertyKeys() {
return Sets.newHashSet(Iterables.transform(getPropertyKeysDirect(),new Function<RelationType, String>() {
@Nullable
@Override
public String apply(@Nullable RelationType relationType) {
return relationType.getName();
}
}));
}
protected Iterable<RelationType> getPropertyKeysDirect() {
final Set<RelationType> result = Sets.newHashSet();
for (final TitanRelation r : query().relations()) {
if (r.isEdge() && (this instanceof FaunusVertex)) continue;
result.add(r.getType());
}
return result;
}
public void addAllProperties(final Iterable<FaunusRelation> properties) {
for (final FaunusRelation p : properties) addRelation(p);
}
public Collection<FaunusRelation> getPropertyCollection() {
return (Collection)Lists.newArrayList(
(this instanceof FaunusVertex)?query().properties():query().relations());
}
//##################################
// General Utility
//##################################
@Override
public boolean equals(final Object other) {
if (this==other) return true;
else if (other==null || !(other instanceof TitanElement)) return false;
TitanElement o = (TitanElement)other;
if (!hasId() || !o.hasId()) return o==this;
if (getLongId()!=o.getLongId()) return false;
return true;
}
@Override
public int hashCode() {
return ((Long) this.id).hashCode();
}
@Override
public int compareTo(FaunusElement o) {
return Longs.compare(id, o.getLongId());
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_FaunusElement.java |
939 | @SuppressWarnings({ "unchecked", "serial" })
public class ORecordBytesLazy extends ORecordBytes {
private OSerializableStream serializableContent;
public ORecordBytesLazy() {
}
public ORecordBytesLazy(final OSerializableStream iSerializable) {
this.serializableContent = iSerializable;
}
@Override
public byte[] toStream() {
if (_source == null)
_source = serializableContent.toStream();
return _source;
}
@Override
public ORecordBytesLazy copy() {
final ORecordBytesLazy c = (ORecordBytesLazy) copyTo(new ORecordBytesLazy(serializableContent));
final Boolean pinned = isPinned();
if (pinned != null && !pinned)
c.unpin();
return c;
}
public OSerializableStream getSerializableContent() {
return serializableContent;
}
public void recycle(final OSerializableStream iSerializableContent) {
this.serializableContent = iSerializableContent;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_record_impl_ORecordBytesLazy.java |
144 | static final class ThreadHashCode extends ThreadLocal<HashCode> {
public HashCode initialValue() { return new HashCode(); }
} | 0true
| src_main_java_jsr166e_Striped64.java |
411 | snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
@Override
public void onResponse() {
listener.onResponse(new DeleteSnapshotResponse(true));
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
}); | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_snapshots_delete_TransportDeleteSnapshotAction.java |
680 | public static class Tab {
public static class Name {
public static final String Marketing = "CategoryImpl_Marketing_Tab";
public static final String Media = "CategoryImpl_Media_Tab";
public static final String Advanced = "CategoryImpl_Advanced_Tab";
public static final String Products = "CategoryImpl_Products_Tab";
public static final String SearchFacets = "CategoryImpl_categoryFacetsTab";
}
public static class Order {
public static final int Marketing = 2000;
public static final int Media = 3000;
public static final int Advanced = 4000;
public static final int Products = 5000;
public static final int SearchFacets = 3500;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_CategoryImpl.java |
616 | public class BroadleafThymeleafViewResolver extends ThymeleafViewResolver {
private static final Log LOG = LogFactory.getLog(BroadleafThymeleafViewResolver.class);
/**
* <p>
* Prefix to be used in view names (returned by controllers) for specifying an
* HTTP redirect with AJAX support. That is, if you want a redirect to be followed
* by the browser as the result of an AJAX call or within an iFrame at the parent
* window, you can utilize this prefix. Note that this requires a JavaScript component,
* which is provided as part of BLC.js
*
* If the request was not performed in an AJAX / iFrame context, this method will
* delegate to the normal "redirect:" prefix.
* </p>
* <p>
* Value: <tt>ajaxredirect:</tt>
* </p>
*/
public static final String AJAX_REDIRECT_URL_PREFIX = "ajaxredirect:";
protected Map<String, String> layoutMap = new HashMap<String, String>();
protected String fullPageLayout = "layout/fullPageLayout";
protected String iframeLayout = "layout/iframeLayout";
/*
* This method is a copy of the same method in ThymeleafViewResolver, but since it is marked private,
* we are unable to call it from the BroadleafThymeleafViewResolver
*/
protected boolean canHandle(final String viewName) {
final String[] viewNamesToBeProcessed = getViewNames();
final String[] viewNamesNotToBeProcessed = getExcludedViewNames();
return ((viewNamesToBeProcessed == null || PatternMatchUtils.simpleMatch(viewNamesToBeProcessed, viewName)) &&
(viewNamesNotToBeProcessed == null || !PatternMatchUtils.simpleMatch(viewNamesNotToBeProcessed, viewName)));
}
/**
* Determines which internal method to call for creating the appropriate view. If no
* Broadleaf specific methods match the viewName, it delegates to the parent
* ThymeleafViewResolver createView method
*/
@Override
protected View createView(final String viewName, final Locale locale) throws Exception {
if (!canHandle(viewName)) {
LOG.trace("[THYMELEAF] View {" + viewName + "} cannot be handled by ThymeleafViewResolver. Passing on to the next resolver in the chain");
return null;
}
if (viewName.startsWith(AJAX_REDIRECT_URL_PREFIX)) {
LOG.trace("[THYMELEAF] View {" + viewName + "} is an ajax redirect, and will be handled directly by BroadleafThymeleafViewResolver");
String redirectUrl = viewName.substring(AJAX_REDIRECT_URL_PREFIX.length());
return loadAjaxRedirectView(redirectUrl, locale);
}
return super.createView(viewName, locale);
}
/**
* Performs a Broadleaf AJAX redirect. This is used in conjunction with BLC.js to support
* doing a browser page change as as result of an AJAX call.
*
* @param redirectUrl
* @param locale
* @return
* @throws Exception
*/
protected View loadAjaxRedirectView(String redirectUrl, final Locale locale) throws Exception {
if (isAjaxRequest()) {
String viewName = "utility/blcRedirect";
addStaticVariable(BroadleafControllerUtility.BLC_REDIRECT_ATTRIBUTE, redirectUrl);
return super.loadView(viewName, locale);
} else {
return new RedirectView(redirectUrl, isRedirectContextRelative(), isRedirectHttp10Compatible());
}
}
@Override
protected View loadView(final String originalViewName, final Locale locale) throws Exception {
String viewName = originalViewName;
if (!isAjaxRequest()) {
String longestPrefix = "";
for (Entry<String, String> entry : layoutMap.entrySet()) {
String viewPrefix = entry.getKey();
String viewLayout = entry.getValue();
if (viewPrefix.length() > longestPrefix.length()) {
if (originalViewName.startsWith(viewPrefix)) {
longestPrefix = viewPrefix;
if (!"NONE".equals(viewLayout)) {
viewName = viewLayout;
}
}
}
}
if (longestPrefix.equals("")) {
viewName = getFullPageLayout();
}
}
AbstractThymeleafView view = (AbstractThymeleafView) super.loadView(viewName, locale);
if (!isAjaxRequest()) {
view.addStaticVariable("templateName", originalViewName);
}
return view;
}
@Override
protected Object getCacheKey(String viewName, Locale locale) {
return viewName + "_" + locale + "_" + isAjaxRequest();
}
protected boolean isIFrameRequest() {
HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
String iFrameParameter = request.getParameter("blcIFrame");
return (iFrameParameter != null && "true".equals(iFrameParameter));
}
protected boolean isAjaxRequest() {
// First, let's try to get it from the BroadleafRequestContext
HttpServletRequest request = null;
if (BroadleafRequestContext.getBroadleafRequestContext() != null) {
HttpServletRequest brcRequest = BroadleafRequestContext.getBroadleafRequestContext().getRequest();
if (brcRequest != null) {
request = brcRequest;
}
}
// If we didn't find it there, we might be outside of a security-configured uri. Let's see if the filter got it
if (request == null) {
try {
request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();
} catch (ClassCastException e) {
// In portlet environments, we won't be able to cast to a ServletRequestAttributes. We don't want to
// blow up in these scenarios.
LOG.warn("Unable to cast to ServletRequestAttributes and the request in BroadleafRequestContext " +
"was not set. This may introduce incorrect AJAX behavior.");
}
}
// If we still don't have a request object, we'll default to non-ajax
if (request == null) {
return false;
}
return BroadleafControllerUtility.isAjaxRequest(request);
}
/**
* Gets the map of prefix : layout for use in determining which layout
* to dispatch the request to in non-AJAX calls
*
* @return the layout map
*/
public Map<String, String> getLayoutMap() {
return layoutMap;
}
/**
* @see #getLayoutMap()
* @param layoutMap
*/
public void setLayoutMap(Map<String, String> layoutMap) {
this.layoutMap = layoutMap;
}
/**
* The default layout to use if there is no specifc entry in the layout map
*
* @return the full page layout
*/
public String getFullPageLayout() {
return fullPageLayout;
}
/**
* @see #getFullPageLayout()
* @param fullPageLayout
*/
public void setFullPageLayout(String fullPageLayout) {
this.fullPageLayout = fullPageLayout;
}
/**
* The layout to use for iframe requests
*
* @return the iframe layout
*/
public String getIframeLayout() {
return iframeLayout;
}
/**
* @see #getIframeLayout()
* @param iframeLayout
*/
public void setIframeLayout(String iframeLayout) {
this.iframeLayout = iframeLayout;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_web_BroadleafThymeleafViewResolver.java |
2,364 | GB {
@Override
public long toBytes(long size) {
return x(size, C3 / C0, MAX / (C3 / C0));
}
@Override
public long toKB(long size) {
return x(size, C3 / C1, MAX / (C3 / C1));
}
@Override
public long toMB(long size) {
return x(size, C3 / C2, MAX / (C3 / C2));
}
@Override
public long toGB(long size) {
return size;
}
@Override
public long toTB(long size) {
return size / (C4 / C3);
}
@Override
public long toPB(long size) {
return size / (C5 / C3);
}
}, | 0true
| src_main_java_org_elasticsearch_common_unit_ByteSizeUnit.java |
1,977 | assertTrueAllTheTime(new AssertTask() {
@Override
public void run() {
assertFalse("LoadAll should not have been called", loadAllCalled.get());
}
}, 10); | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapLoaderTest.java |
341 | protected static class NodeRestartRequest extends NodeOperationRequest {
TimeValue delay;
private NodeRestartRequest() {
}
private NodeRestartRequest(String nodeId, NodesRestartRequest request) {
super(request, nodeId);
this.delay = request.delay;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
delay = readTimeValue(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
delay.writeTo(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_node_restart_TransportNodesRestartAction.java |
1,563 | EntryListener entryListener = new EntryListener() {
public void entryAdded(EntryEvent event) {
totalAddedEntryCount.incrementAndGet();
}
public void entryRemoved(EntryEvent event) {
totalRemovedEntryCount.incrementAndGet();
}
public void entryUpdated(EntryEvent event) {
totalUpdatedEntryCount.incrementAndGet();
}
public void entryEvicted(EntryEvent event) {
totalEvictedEntryCount.incrementAndGet();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_jmx_MapMBean.java |
1,960 | public class MapSizeRequest extends AllPartitionsClientRequest implements Portable, RetryableRequest, SecureRequest {
private String name;
public MapSizeRequest() {
}
public MapSizeRequest(String name) {
this.name = name;
}
public String getServiceName() {
return MapService.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return MapPortableHook.F_ID;
}
public int getClassId() {
return MapPortableHook.SIZE;
}
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
}
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
}
@Override
protected OperationFactory createOperationFactory() {
return new SizeOperationFactory(name);
}
@Override
protected Object reduce(Map<Integer, Object> map) {
int total = 0;
MapService mapService = getService();
for (Object result : map.values()) {
Integer size = (Integer) mapService.toObject(result);
total += size;
}
return total;
}
public Permission getRequiredPermission() {
return new MapPermission(name, ActionConstants.ACTION_READ);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_client_MapSizeRequest.java |
199 | public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
protected final ESLogger logger;
private final MeanMetric totalMerges = new MeanMetric();
private final CounterMetric totalMergesNumDocs = new CounterMetric();
private final CounterMetric totalMergesSizeInBytes = new CounterMetric();
private final CounterMetric currentMerges = new CounterMetric();
private final CounterMetric currentMergesNumDocs = new CounterMetric();
private final CounterMetric currentMergesSizeInBytes = new CounterMetric();
private final Set<OnGoingMerge> onGoingMerges = ConcurrentCollections.newConcurrentSet();
private final Set<OnGoingMerge> readOnlyOnGoingMerges = Collections.unmodifiableSet(onGoingMerges);
public TrackingConcurrentMergeScheduler(ESLogger logger) {
super();
this.logger = logger;
}
public long totalMerges() {
return totalMerges.count();
}
public long totalMergeTime() {
return totalMerges.sum();
}
public long totalMergeNumDocs() {
return totalMergesNumDocs.count();
}
public long totalMergeSizeInBytes() {
return totalMergesSizeInBytes.count();
}
public long currentMerges() {
return currentMerges.count();
}
public long currentMergesNumDocs() {
return currentMergesNumDocs.count();
}
public long currentMergesSizeInBytes() {
return currentMergesSizeInBytes.count();
}
public Set<OnGoingMerge> onGoingMerges() {
return readOnlyOnGoingMerges;
}
@Override
protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
int totalNumDocs = merge.totalNumDocs();
// don't used #totalBytesSize() since need to be executed under IW lock, might be fixed in future Lucene version
long totalSizeInBytes = merge.estimatedMergeBytes;
long time = System.currentTimeMillis();
currentMerges.inc();
currentMergesNumDocs.inc(totalNumDocs);
currentMergesSizeInBytes.inc(totalSizeInBytes);
OnGoingMerge onGoingMerge = new OnGoingMerge(merge);
onGoingMerges.add(onGoingMerge);
if (logger.isTraceEnabled()) {
logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", merge.info == null ? "_na_" : merge.info.info.name, merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes));
}
try {
beforeMerge(onGoingMerge);
super.doMerge(merge);
} finally {
long took = System.currentTimeMillis() - time;
onGoingMerges.remove(onGoingMerge);
afterMerge(onGoingMerge);
currentMerges.dec();
currentMergesNumDocs.dec(totalNumDocs);
currentMergesSizeInBytes.dec(totalSizeInBytes);
totalMergesNumDocs.inc(totalNumDocs);
totalMergesSizeInBytes.inc(totalSizeInBytes);
totalMerges.inc(took);
if (took > 20000) { // if more than 20 seconds, DEBUG log it
logger.debug("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
} else if (logger.isTraceEnabled()) {
logger.trace("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
}
}
}
/**
* A callback allowing for custom logic before an actual merge starts.
*/
protected void beforeMerge(OnGoingMerge merge) {
}
/**
* A callback allowing for custom logic before an actual merge starts.
*/
protected void afterMerge(OnGoingMerge merge) {
}
@Override
public MergeScheduler clone() {
// Lucene IW makes a clone internally but since we hold on to this instance
// the clone will just be the identity.
return this;
}
} | 0true
| src_main_java_org_apache_lucene_index_TrackingConcurrentMergeScheduler.java |
711 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_PRODUCT_OPTION")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blStandardElements")
@AdminPresentationClass(friendlyName = "ProductOptionImpl_baseProductOption", populateToOneFields=PopulateToOneFieldsEnum.TRUE)
public class ProductOptionImpl implements ProductOption, AdminMainEntity {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "ProductOptionId")
@GenericGenerator(
name="ProductOptionId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="ProductOptionImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.catalog.domain.ProductOptionImpl")
}
)
@Column(name = "PRODUCT_OPTION_ID")
protected Long id;
@Column(name = "OPTION_TYPE")
@AdminPresentation(friendlyName = "productOption_Type", fieldType = SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration = "org.broadleafcommerce.core.catalog.service.type.ProductOptionType")
protected String type;
@Column(name = "ATTRIBUTE_NAME")
@AdminPresentation(friendlyName = "productOption_name", helpText = "productOption_nameHelp")
protected String attributeName;
@Column(name = "LABEL")
@AdminPresentation(friendlyName = "productOption_Label", helpText = "productOption_labelHelp",
prominent = true,
translatable = true)
protected String label;
@Column(name = "REQUIRED")
@AdminPresentation(friendlyName = "productOption_Required")
protected Boolean required;
@Column(name = "USE_IN_SKU_GENERATION")
@AdminPresentation(friendlyName = "productOption_UseInSKUGeneration")
private Boolean useInSkuGeneration;
@Column(name = "DISPLAY_ORDER")
@AdminPresentation(friendlyName = "productOption_displayOrder")
protected Integer displayOrder;
@Column(name = "VALIDATION_TYPE")
@AdminPresentation(friendlyName = "productOption_validationType", group = "productOption_validation", fieldType = SupportedFieldType.BROADLEAF_ENUMERATION, broadleafEnumeration = "org.broadleafcommerce.core.catalog.service.type.ProductOptionValidationType")
private String productOptionValidationType;
@Column(name = "VALIDATION_STRING")
@AdminPresentation(friendlyName = "productOption_validationSring", group = "productOption_validation")
protected String validationString;
@Column(name = "ERROR_CODE")
@AdminPresentation(friendlyName = "productOption_errorCode", group = "productOption_validation")
protected String errorCode;
@Column(name = "ERROR_MESSAGE")
@AdminPresentation(friendlyName = "productOption_errorMessage", group = "productOption_validation")
protected String errorMessage;
@OneToMany(mappedBy = "productOption", targetEntity = ProductOptionValueImpl.class, cascade = {CascadeType.ALL})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@OrderBy(value = "displayOrder")
@AdminPresentationCollection(addType = AddMethodType.PERSIST, friendlyName = "ProductOptionImpl_Allowed_Values")
protected List<ProductOptionValue> allowedValues = new ArrayList<ProductOptionValue>();
@ManyToMany(fetch = FetchType.LAZY, targetEntity = ProductImpl.class)
@JoinTable(name = "BLC_PRODUCT_OPTION_XREF", joinColumns = @JoinColumn(name = "PRODUCT_OPTION_ID", referencedColumnName = "PRODUCT_OPTION_ID"), inverseJoinColumns = @JoinColumn(name = "PRODUCT_ID", referencedColumnName = "PRODUCT_ID"))
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@BatchSize(size = 50)
protected List<Product> products = new ArrayList<Product>();
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public ProductOptionType getType() {
return ProductOptionType.getInstance(type);
}
@Override
public void setType(ProductOptionType type) {
this.type = type == null ? null : type.getType();
}
@Override
public String getAttributeName() {
return attributeName;
}
@Override
public void setAttributeName(String attributeName) {
this.attributeName = attributeName;
}
@Override
public String getLabel() {
return DynamicTranslationProvider.getValue(this, "label", label);
}
@Override
public void setLabel(String label) {
this.label = label;
}
@Override
public Boolean getRequired() {
return required;
}
@Override
public void setRequired(Boolean required) {
this.required = required;
}
@Override
public Integer getDisplayOrder() {
return displayOrder;
}
@Override
public void setDisplayOrder(Integer displayOrder) {
this.displayOrder = displayOrder;
}
@Override
public List<Product> getProducts() {
return products;
}
@Override
public void setProducts(List<Product> products){
this.products = products;
}
@Override
public List<ProductOptionValue> getAllowedValues() {
return allowedValues;
}
@Override
public void setAllowedValues(List<ProductOptionValue> allowedValues) {
this.allowedValues = allowedValues;
}
@Override
public Boolean getUseInSkuGeneration() {
return (useInSkuGeneration == null) ? true : useInSkuGeneration;
}
@Override
public void setUseInSkuGeneration(Boolean useInSkuGeneration) {
this.useInSkuGeneration = useInSkuGeneration;
}
@Override
public ProductOptionValidationType getProductOptionValidationType() {
return ProductOptionValidationType.getInstance(productOptionValidationType);
}
@Override
public void setProductOptionValidationType(ProductOptionValidationType productOptionValidationType) {
this.productOptionValidationType = productOptionValidationType == null ? null : productOptionValidationType.getType();
}
@Override
public String getValidationString() {
return validationString;
}
@Override
public void setValidationString(String validationString) {
this.validationString = validationString;
}
@Override
public String getErrorCode() {
return errorCode;
}
@Override
public void setErrorCode(String errorCode) {
this.errorCode = errorCode;
}
@Override
public String getErrorMessage() {
return errorMessage;
}
@Override
public void setErrorMessage(String errorMessage) {
this.errorMessage = errorMessage;
}
@Override
public String getMainEntityName() {
return getLabel();
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_ProductOptionImpl.java |
1,300 | public static class Stats {
private AtomicLong offers = new AtomicLong();
private AtomicLong polls = new AtomicLong();
public Stats getAndReset() {
long offersNow = offers.getAndSet(0);
long pollsNow = polls.getAndSet(0);
Stats newOne = new Stats();
newOne.offers.set(offersNow);
newOne.polls.set(pollsNow);
return newOne;
}
public long total() {
return offers.get() + polls.get();
}
public String toString() {
return "total= " + total() + ", offers:" + offers.get() + ", polls:" + polls.get();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_examples_SimpleQueueTest.java |
1,156 | public class OSQLMethodReplace extends OAbstractSQLMethod {
public static final String NAME = "replace";
public OSQLMethodReplace() {
super(NAME, 2);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
ioResult = ioResult != null ? ioResult.toString().replace(
iMethodParams[0].toString(),
iMethodParams[1].toString()) : null;
return ioResult;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodReplace.java |
1,145 | public class ChildSearchBenchmark {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put("gateway.type", "local")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
String clusterName = ChildSearchBenchmark.class.getSimpleName();
Node node1 = nodeBuilder().clusterName(clusterName)
.settings(settingsBuilder().put(settings).put("name", "node1")).node();
Client client = node1.client();
int COUNT = (int) SizeValue.parseSizeValue("2m").singles();
int CHILD_COUNT = 15;
int QUERY_VALUE_RATIO = 3;
int QUERY_WARMUP = 10;
int QUERY_COUNT = 20;
String indexName = "test";
ParentChildIndexGenerator parentChildIndexGenerator = new ParentChildIndexGenerator(client, COUNT, CHILD_COUNT, QUERY_VALUE_RATIO);
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
try {
client.admin().indices().create(createIndexRequest(indexName)).actionGet();
client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
Thread.sleep(5000);
long startTime = System.currentTimeMillis();
parentChildIndexGenerator.index();
System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
System.out.println("--> Running just child query");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
}
long totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Just Child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).execute().actionGet();
System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
// run parent child constant query
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
filteredQuery(
matchAllQuery(),
hasChildFilter("child", termQuery("field2", parentChildIndexGenerator.getQueryValue()))
)
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
filteredQuery(
matchAllQuery(),
hasChildFilter("child", termQuery("field2", parentChildIndexGenerator.getQueryValue()))
)
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_child filter with match_all child query");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
filteredQuery(
matchAllQuery(),
hasChildFilter("child", matchAllQuery())
)
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
// run parent child constant query
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
filteredQuery(
matchAllQuery(),
hasParentFilter("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue()))
)
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
}
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
filteredQuery(
matchAllQuery(),
hasParentFilter("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue()))
)
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_parent filter with match_all parent query ");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(filteredQuery(
matchAllQuery(),
hasParentFilter("parent", matchAllQuery())
))
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent filter with match_all parent query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running top_children query");
// run parent child score query
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(topChildrenQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue()))).execute().actionGet();
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue()))).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> top_children Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running top_children query, with match_all as child query");
// run parent child score query
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(topChildrenQuery("child", matchAllQuery())).execute().actionGet();
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(topChildrenQuery("child", matchAllQuery())).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> top_children, with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
System.out.println("--> Running has_child query with score type");
// run parent child score query
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())).scoreType("max")).execute().actionGet();
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())).scoreType("max")).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", matchAllQuery()).scoreType("max")).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_parent query with score type");
// run parent child score query
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())).scoreType("score")).execute().actionGet();
}
totalQueryTime = 0;
for (int j = 1; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())).scoreType("score")).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 1; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", matchAllQuery()).scoreType("score")).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.gc();
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
client.close();
node1.close();
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_search_child_ChildSearchBenchmark.java |
384 | clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.URGENT, new AckedClusterStateUpdateTask() {
private volatile ClusterState clusterStateToSend;
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
listener.onResponse(new ClusterRerouteResponse(true, clusterStateToSend));
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend));
}
@Override
public TimeValue ackTimeout() {
return request.timeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to perform [{}]", t, source);
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, true);
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
clusterStateToSend = newState;
if (request.dryRun) {
return currentState;
}
return newState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
}); | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_reroute_TransportClusterRerouteAction.java |
1,884 | @Documented
@Target(METHOD)
@Retention(RUNTIME)
public @interface Provides {
} | 0true
| src_main_java_org_elasticsearch_common_inject_Provides.java |
1,742 | @Deprecated
public class LZFCompressedIndexInput extends CompressedIndexInput<LZFCompressorContext> {
private final ChunkDecoder decoder;
// scratch area buffer
private byte[] inputBuffer;
public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
super(in, LZFCompressorContext.INSTANCE);
this.decoder = decoder;
this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
}
@Override
protected void readHeader(IndexInput in) throws IOException {
byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
in.readBytes(header, 0, header.length, false);
if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
}
}
@Override
protected int uncompress(IndexInput in, byte[] out) throws IOException {
return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
}
@Override
protected void doClose() throws IOException {
// nothing to do here...
}
@Override
public IndexInput clone() {
LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
return cloned;
}
} | 0true
| src_main_java_org_elasticsearch_common_compress_lzf_LZFCompressedIndexInput.java |
81 | @SuppressWarnings("serial")
static final class MapReduceValuesTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceValuesTask<K,V,U> rights, nextRight;
MapReduceValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceValuesTask<K,V,U> nextRight,
Fun<? super V, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final Fun<? super V, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceValuesTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.val)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U>
t = (MapReduceValuesTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
} | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
642 | public abstract class AbstractGeneratedResourceHandler {
protected Cache generatedResourceCache;
/**
* @param path
* @return booelean determining whether or not this handler is able to handle the given request
*/
public abstract boolean canHandle(String path);
/**
* @param path
* @param locations
* @return the Resource representing this file
*/
public abstract Resource getFileContents(String path, List<Resource> locations);
/**
* @param cachedResource
* @param path
* @param locations
* @return whether or not the given cachedResource needs to be regenerated
*/
public abstract boolean isCachedResourceExpired(GeneratedResource cachedResource, String path, List<Resource> locations);
/**
* Attempts to retrive the requested resource from cache. If not cached, generates the resource, caches it,
* and then returns it
*
* @param request
* @param location
* @return the generated resource
*/
public Resource getResource(String path, List<Resource> locations) {
Element e = getGeneratedResourceCache().get(path);
Resource r = null;
boolean shouldGenerate = false;
if (e == null || e.getObjectValue() == null) {
shouldGenerate = true;
} else if (e.getObjectValue() instanceof GeneratedResource
&& isCachedResourceExpired((GeneratedResource) e.getObjectValue(), path, locations)) {
shouldGenerate = true;
} else {
r = (Resource) e.getObjectValue();
}
if (shouldGenerate) {
r = getFileContents(path, locations);
e = new Element(path, r);
getGeneratedResourceCache().put(e);
}
return r;
}
protected Cache getGeneratedResourceCache() {
if (generatedResourceCache == null) {
generatedResourceCache = CacheManager.getInstance().getCache("generatedResourceCache");
}
return generatedResourceCache;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_web_resource_AbstractGeneratedResourceHandler.java |
43 | @Component("blPageItemCriteriaCustomPersistenceHandler")
public class PageItemCriteriaCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private final Log LOG = LogFactory.getLog(PageItemCriteriaCustomPersistenceHandler.class);
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
return PageItemCriteria.class.getName().equals(ceilingEntityFullyQualifiedClassname);
}
@Override
public Boolean canHandleRemove(PersistencePackage persistencePackage) {
return canHandleAdd(persistencePackage);
}
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
return canHandleAdd(persistencePackage);
}
protected void removeHtmlEncoding(Entity entity) {
Property prop = entity.findProperty("orderItemMatchRule");
if (prop != null && prop.getValue() != null) {
//antisamy XSS protection encodes the values in the MVEL
//reverse this behavior
prop.setValue(prop.getRawValue());
}
}
@Override
public Entity add(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
removeHtmlEncoding(entity);
try {
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
PageItemCriteria adminInstance = (PageItemCriteria) Class.forName(entity.getType()[0]).newInstance();
Map<String, FieldMetadata> adminProperties = helper.getSimpleMergedProperties(PageItemCriteria.class.getName(), persistencePerspective);
adminInstance = (PageItemCriteria) helper.createPopulatedInstance(adminInstance, entity, adminProperties, false);
if (adminInstance.getPage().getLockedFlag()) {
throw new IllegalArgumentException("Unable to update a locked record");
}
adminInstance = (PageItemCriteria) dynamicEntityDao.merge(adminInstance);
Entity adminEntity = helper.getRecord(adminProperties, adminInstance, null, null);
return adminEntity;
} catch (Exception e) {
throw new ServiceException("Unable to add entity for " + entity.getType()[0], e);
}
}
@Override
public Entity update(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
removeHtmlEncoding(entity);
try {
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
Map<String, FieldMetadata> adminProperties = helper.getSimpleMergedProperties(PageItemCriteria.class.getName(), persistencePerspective);
Object primaryKey = helper.getPrimaryKey(entity, adminProperties);
PageItemCriteria adminInstance = (PageItemCriteria) dynamicEntityDao.retrieve(Class.forName(entity.getType()[0]), primaryKey);
if (adminInstance.getPage().getLockedFlag()) {
/*
This may be an attempt to delete a target item criteria off an otherwise un-edited, production StructuredContent instance
*/
CriteriaBuilder criteriaBuilder = dynamicEntityDao.getStandardEntityManager().getCriteriaBuilder();
CriteriaQuery<Page> query = criteriaBuilder.createQuery(Page.class);
Root<PageImpl> root = query.from(PageImpl.class);
query.where(criteriaBuilder.and(criteriaBuilder.equal(root.get("archivedFlag"), Boolean.FALSE), criteriaBuilder.equal(root.get("originalPageId"), adminInstance.getPage().getId())));
query.select(root);
TypedQuery<Page> scQuery = dynamicEntityDao.getStandardEntityManager().createQuery(query);
try {
checkCriteria: {
Page myContent = scQuery.getSingleResult();
for (PageItemCriteria itemCriteria : myContent.getQualifyingItemCriteria()) {
if (itemCriteria.getMatchRule().equals(adminInstance.getMatchRule()) && itemCriteria.getQuantity().equals(adminInstance.getQuantity())) {
//manually set the values - otherwise unwanted properties will be set
itemCriteria.setMatchRule(entity.findProperty("orderItemMatchRule").getValue());
itemCriteria.setQuantity(Integer.parseInt(entity.findProperty("quantity").getValue()));
adminInstance = itemCriteria;
break checkCriteria;
}
}
throw new RuntimeException("Unable to find an item criteria to update");
}
} catch (Exception e) {
throw new IllegalArgumentException("Unable to update a locked record");
}
} else {
adminInstance = (PageItemCriteria) helper.createPopulatedInstance(adminInstance, entity, adminProperties, false);
}
adminInstance = (PageItemCriteria) dynamicEntityDao.merge(adminInstance);
Entity adminEntity = helper.getRecord(adminProperties, adminInstance, null, null);
return adminEntity;
} catch (Exception e) {
throw new ServiceException("Unable to update entity for " + entity.getType()[0], e);
}
}
@Override
public void remove(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
try {
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
Map<String, FieldMetadata> adminProperties = helper.getSimpleMergedProperties(PageItemCriteria.class.getName(), persistencePerspective);
Object primaryKey = helper.getPrimaryKey(entity, adminProperties);
PageItemCriteria adminInstance = (PageItemCriteria) dynamicEntityDao.retrieve(Class.forName(entity.getType()[0]), primaryKey);
if (adminInstance.getPage().getLockedFlag()) {
/*
This may be an attempt to delete a target item criteria off an otherwise un-edited, production StructuredContent instance
*/
CriteriaBuilder criteriaBuilder = dynamicEntityDao.getStandardEntityManager().getCriteriaBuilder();
CriteriaQuery<Page> query = criteriaBuilder.createQuery(Page.class);
Root<PageImpl> root = query.from(PageImpl.class);
query.where(criteriaBuilder.and(criteriaBuilder.equal(root.get("archivedFlag"), Boolean.FALSE), criteriaBuilder.equal(root.get("originalPageId"), adminInstance.getPage().getId())));
query.select(root);
TypedQuery<Page> scQuery = dynamicEntityDao.getStandardEntityManager().createQuery(query);
try {
Page myContent = scQuery.getSingleResult();
for (PageItemCriteria itemCriteria : myContent.getQualifyingItemCriteria()) {
if (itemCriteria.getMatchRule().equals(adminInstance.getMatchRule()) && itemCriteria.getQuantity().equals(adminInstance.getQuantity())) {
myContent.getQualifyingItemCriteria().remove(itemCriteria);
return;
}
}
throw new RuntimeException("Unable to find an item criteria to delete");
} catch (Exception e) {
throw new IllegalArgumentException("Unable to update a locked record");
}
}
dynamicEntityDao.remove(adminInstance);
} catch (Exception e) {
throw new ServiceException("Unable to remove entity for " + entity.getType()[0], e);
}
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_PageItemCriteriaCustomPersistenceHandler.java |
2,597 | private class MasterPingRequestHandler extends BaseTransportRequestHandler<MasterPingRequest> {
public static final String ACTION = "discovery/zen/fd/masterPing";
@Override
public MasterPingRequest newInstance() {
return new MasterPingRequest();
}
@Override
public void messageReceived(MasterPingRequest request, TransportChannel channel) throws Exception {
DiscoveryNodes nodes = nodesProvider.nodes();
// check if we are really the same master as the one we seemed to be think we are
// this can happen if the master got "kill -9" and then another node started using the same port
if (!request.masterNodeId.equals(nodes.localNodeId())) {
throw new NotMasterException();
}
// if we are no longer master, fail...
if (!nodes.localNodeMaster()) {
throw new NoLongerMasterException();
}
if (!nodes.nodeExists(request.nodeId)) {
throw new NodeDoesNotExistOnMasterException();
}
// send a response, and note if we are connected to the master or not
channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
} | 1no label
| src_main_java_org_elasticsearch_discovery_zen_fd_MasterFaultDetection.java |
521 | public class OQueryParsingException extends OException {
private String text;
private int position = -1;
private static final long serialVersionUID = -7430575036316163711L;
public OQueryParsingException(final String iMessage) {
super(iMessage);
}
public OQueryParsingException(final String iMessage, final Throwable cause) {
super(iMessage, cause);
}
public OQueryParsingException(final String iMessage, final String iText, final int iPosition, final Throwable cause) {
super(iMessage, cause);
text = iText;
position = iPosition;
}
public OQueryParsingException(final String iMessage, final String iText, final int iPosition) {
super(iMessage);
text = iText;
position = iPosition;
}
@Override
public String getMessage() {
StringBuilder buffer = new StringBuilder();
if (position > -1) {
buffer.append("Error on parsing query at position #");
buffer.append(position);
buffer.append(": ");
}
buffer.append(super.getMessage());
if (text != null) {
buffer.append("\nQuery: ");
buffer.append(text);
buffer.append("\n------");
for (int i = 0; i < position - 1; ++i)
buffer.append("-");
buffer.append("^");
}
return buffer.toString();
}
public String getText() {
return text;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_exception_OQueryParsingException.java |
375 | public static class TestCombinerFactory
implements CombinerFactory<String, Integer, Integer> {
public TestCombinerFactory() {
}
@Override
public Combiner<String, Integer, Integer> newCombiner(String key) {
return new TestCombiner();
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_DistributedMapperClientMapReduceTest.java |
1,112 | public class SimpleGetActionBenchmark {
public static void main(String[] args) {
long OPERATIONS = SizeValue.parseSizeValue("300k").singles();
Node node = NodeBuilder.nodeBuilder().node();
Client client;
if (false) {
client = NodeBuilder.nodeBuilder().client(true).node().client();
} else {
client = node.client();
}
client.prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
StopWatch stopWatch = new StopWatch().start();
for (long i = 0; i < OPERATIONS; i++) {
client.prepareGet("test", "type1", "1").execute().actionGet();
}
stopWatch.stop();
System.out.println("Ran in " + stopWatch.totalTime() + ", per second: " + (((double) OPERATIONS) / stopWatch.totalTime().secondsFrac()));
node.close();
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_get_SimpleGetActionBenchmark.java |
1,295 | public interface SearchSynonymDao {
public List<SearchSynonym> getAllSynonyms();
public void createSynonym(SearchSynonym synonym);
public void updateSynonym(SearchSynonym synonym);
public void deleteSynonym(SearchSynonym synonym);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_dao_SearchSynonymDao.java |
372 | public class AnnotationJmxAttributeSource extends org.springframework.jmx.export.annotation.AnnotationJmxAttributeSource {
private final String appName;
public AnnotationJmxAttributeSource(String appName) {
this.appName = appName;
}
@SuppressWarnings("unchecked")
@Override
public ManagedResource getManagedResource(Class beanClass) throws InvalidMetadataException {
ManagedResource resource = super.getManagedResource(beanClass);
if (resource != null && appName != null) {
String objectName = resource.getObjectName();
objectName += "." + appName;
resource.setObjectName(objectName);
}
return resource;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_jmx_AnnotationJmxAttributeSource.java |
1,872 | boolean b = h1.executeTransaction(options, new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
final TransactionalMap<Object, Object> txMap = context.getMap("default");
assertEquals("value2", txMap.replace("1", "value3"));
assertEquals("value3", txMap.get("1"));
assertNull(map2.get("2"));
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java |
608 | public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<UpdateSettingsClusterStateUpdateRequest> {
private Settings settings;
public UpdateSettingsClusterStateUpdateRequest() {
}
/**
* Returns the {@link Settings} to update
*/
public Settings settings() {
return settings;
}
/**
* Sets the {@link Settings} to update
*/
public UpdateSettingsClusterStateUpdateRequest settings(Settings settings) {
this.settings = settings;
return this;
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_settings_put_UpdateSettingsClusterStateUpdateRequest.java |
20 | public class IllegalLogFormatException extends IOException
{
public IllegalLogFormatException( long expected, long was )
{
super( "Invalid log format version found, expected " + expected + " but was " + was +
". To be able to upgrade from an older log format version there must have " +
"been a clean shutdown of the database" );
}
} | 1no label
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_IllegalLogFormatException.java |
173 | public class EideticTransactionMonitor implements TransactionMonitor
{
private int commitCount;
private int injectOnePhaseCommitCount;
private int injectTwoPhaseCommitCount;
@Override
public void transactionCommitted( Xid xid, boolean recovered )
{
commitCount++;
}
@Override
public void injectOnePhaseCommit( Xid xid )
{
injectOnePhaseCommitCount++;
}
@Override
public void injectTwoPhaseCommit( Xid xid )
{
injectTwoPhaseCommitCount++;
}
public int getCommitCount()
{
return commitCount;
}
public int getInjectOnePhaseCommitCount()
{
return injectOnePhaseCommitCount;
}
public int getInjectTwoPhaseCommitCount()
{
return injectTwoPhaseCommitCount;
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_EideticTransactionMonitor.java |
823 | @Entity
@Table(name = "BLC_OFFER_ITEM_CRITERIA")
@Inheritance(strategy=InheritanceType.JOINED)
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "OfferItemCriteriaImpl_baseOfferItemCriteria")
public class OfferItemCriteriaImpl implements OfferItemCriteria {
public static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "OfferItemCriteriaId")
@GenericGenerator(
name="OfferItemCriteriaId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="OfferItemCriteriaImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.offer.domain.OfferItemCriteriaImpl")
}
)
@Column(name = "OFFER_ITEM_CRITERIA_ID")
@AdminPresentation(friendlyName = "OfferItemCriteriaImpl_Item_Criteria_Id", group = "OfferItemCriteriaImpl_Description", visibility = VisibilityEnum.HIDDEN_ALL)
protected Long id;
@Column(name = "QUANTITY", nullable=false)
@AdminPresentation(friendlyName = "OfferItemCriteriaImpl_Quantity", group = "OfferItemCriteriaImpl_Description", visibility =VisibilityEnum.HIDDEN_ALL)
protected Integer quantity;
@Lob
@Type(type = "org.hibernate.type.StringClobType")
@Column(name = "ORDER_ITEM_MATCH_RULE", length = Integer.MAX_VALUE - 1)
@AdminPresentation(friendlyName = "OfferItemCriteriaImpl_Order_Item_Match_Rule", group = "OfferItemCriteriaImpl_Description", visibility = VisibilityEnum.HIDDEN_ALL)
protected String orderItemMatchRule;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Integer getQuantity() {
return quantity;
}
@Override
public void setQuantity(Integer receiveQuantity) {
this.quantity = receiveQuantity;
}
@Override
public String getMatchRule() {
return orderItemMatchRule;
}
@Override
public void setMatchRule(String matchRule) {
this.orderItemMatchRule = matchRule;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
result = prime * result + ((orderItemMatchRule == null) ? 0 : orderItemMatchRule.hashCode());
result = prime * result + ((quantity == null) ? 0 : quantity.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferItemCriteriaImpl other = (OfferItemCriteriaImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (orderItemMatchRule == null) {
if (other.orderItemMatchRule != null)
return false;
} else if (!orderItemMatchRule.equals(other.orderItemMatchRule))
return false;
if (quantity == null) {
if (other.quantity != null)
return false;
} else if (!quantity.equals(other.quantity))
return false;
return true;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_OfferItemCriteriaImpl.java |
3,623 | public class TokenCountFieldMapperTests extends ElasticsearchTestCase {
@Test
public void testMerge() throws IOException {
String stage1Mapping = XContentFactory.jsonBuilder().startObject()
.startObject("person")
.startObject("properties")
.startObject("tc")
.field("type", "token_count")
.field("analyzer", "keyword")
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper stage1 = MapperTestUtils.newParser().parse(stage1Mapping);
String stage2Mapping = XContentFactory.jsonBuilder().startObject()
.startObject("person")
.startObject("properties")
.startObject("tc")
.field("type", "token_count")
.field("analyzer", "standard")
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper stage2 = MapperTestUtils.newParser().parse(stage2Mapping);
DocumentMapper.MergeResult mergeResult = stage1.merge(stage2, mergeFlags().simulate(true));
assertThat(mergeResult.hasConflicts(), equalTo(false));
// Just simulated so merge hasn't happened yet
assertThat(((TokenCountFieldMapper) stage1.mappers().smartName("tc").mapper()).analyzer(), equalTo("keyword"));
mergeResult = stage1.merge(stage2, mergeFlags().simulate(false));
assertThat(mergeResult.hasConflicts(), equalTo(false));
// Just simulated so merge hasn't happened yet
assertThat(((TokenCountFieldMapper) stage1.mappers().smartName("tc").mapper()).analyzer(), equalTo("standard"));
}
@Test
public void testCountPositions() throws IOException {
// We're looking to make sure that we:
Token t1 = new Token(); // Don't count tokens without an increment
t1.setPositionIncrement(0);
Token t2 = new Token();
t2.setPositionIncrement(1); // Count normal tokens with one increment
Token t3 = new Token();
t2.setPositionIncrement(2); // Count funny tokens with more than one increment
int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
Token[] tokens = new Token[] {t1, t2, t3};
Collections.shuffle(Arrays.asList(tokens), getRandom());
TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
assertThat(TokenCountFieldMapper.countPositions(tokenStream), equalTo(7));
}
} | 0true
| src_test_java_org_elasticsearch_index_mapper_core_TokenCountFieldMapperTests.java |
2,812 | public class AnalyzerBackwardsCompatTests extends ElasticsearchTokenStreamTestCase {
@Ignore
private void testNoStopwordsAfter(org.elasticsearch.Version noStopwordVersion, String type) throws IOException {
final int iters = atLeast(10);
org.elasticsearch.Version version = org.elasticsearch.Version.CURRENT;
for (int i = 0; i < iters; i++) {
ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
if (version.onOrAfter(noStopwordVersion)) {
if (random().nextBoolean()) {
builder.put(SETTING_VERSION_CREATED, version);
}
} else {
builder.put(SETTING_VERSION_CREATED, version);
}
builder.put("index.analysis.analyzer.foo.type", type);
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
NamedAnalyzer analyzer = analysisService.analyzer("foo");
if (version.onOrAfter(noStopwordVersion)) {
assertAnalyzesTo(analyzer, "this is bogus", new String[]{"this", "is", "bogus"});
} else {
assertAnalyzesTo(analyzer, "this is bogus", new String[]{"bogus"});
}
version = randomVersion();
}
}
public void testPatternAnalyzer() throws IOException {
testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "pattern");
}
public void testStandardHTMLStripAnalyzer() throws IOException {
testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "standard_html_strip");
}
public void testStandardAnalyzer() throws IOException {
testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_Beta1, "standard");
}
} | 0true
| src_test_java_org_elasticsearch_index_analysis_AnalyzerBackwardsCompatTests.java |
725 | ItemListener listener = new ItemListener() {
public void itemAdded(ItemEvent item) {
latchAdd.countDown();
}
public void itemRemoved(ItemEvent item) {
latchRemove.countDown();
}
}; | 0true
| hazelcast_src_test_java_com_hazelcast_collection_SetTest.java |
3,029 | public class DocValuesFormatService extends AbstractIndexComponent {
private final ImmutableMap<String, DocValuesFormatProvider> providers;
public final static String DEFAULT_FORMAT = "default";
public DocValuesFormatService(Index index) {
this(index, ImmutableSettings.Builder.EMPTY_SETTINGS);
}
public DocValuesFormatService(Index index, @IndexSettings Settings indexSettings) {
this(index, indexSettings, ImmutableMap.<String, DocValuesFormatProvider.Factory>of());
}
@Inject
public DocValuesFormatService(Index index, @IndexSettings Settings indexSettings, Map<String, DocValuesFormatProvider.Factory> docValuesFormatFactories) {
super(index, indexSettings);
MapBuilder<String, DocValuesFormatProvider> providers = MapBuilder.newMapBuilder();
Map<String, Settings> docValuesFormatSettings = indexSettings.getGroups(DocValuesFormatProvider.DOC_VALUES_FORMAT_SETTINGS_PREFIX);
for (Map.Entry<String, DocValuesFormatProvider.Factory> entry : docValuesFormatFactories.entrySet()) {
String name = entry.getKey();
DocValuesFormatProvider.Factory factory = entry.getValue();
Settings settings = docValuesFormatSettings.get(name);
if (settings == null) {
settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
}
providers.put(name, factory.create(name, settings));
}
// This is only needed for tests when guice doesn't have the chance to populate the list of DVF factories
for (PreBuiltDocValuesFormatProvider.Factory factory : DocValuesFormats.listFactories()) {
if (!providers.containsKey(factory.name())) {
providers.put(factory.name(), factory.get());
}
}
this.providers = providers.immutableMap();
}
public DocValuesFormatProvider get(String name) throws ElasticsearchIllegalArgumentException {
DocValuesFormatProvider provider = providers.get(name);
if (provider == null) {
throw new ElasticsearchIllegalArgumentException("failed to find doc_values_format [" + name + "]");
}
return provider;
}
} | 0true
| src_main_java_org_elasticsearch_index_codec_docvaluesformat_DocValuesFormatService.java |
4,133 | public class IndexDynamicSettingsModule extends AbstractModule {
private final DynamicSettings indexDynamicSettings;
public IndexDynamicSettingsModule() {
indexDynamicSettings = new DynamicSettings();
indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE);
indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*");
indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "*");
indexDynamicSettings.addDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "*");
indexDynamicSettings.addDynamicSetting(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE);
indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION);
indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION);
indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION);
indexDynamicSettings.addDynamicSetting(FsTranslog.INDEX_TRANSLOG_FS_TYPE);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_READ_ONLY);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE);
indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA);
indexDynamicSettings.addDynamicSetting(IndexShardGatewayService.INDEX_GATEWAY_SNAPSHOT_INTERVAL, Validator.TIME);
indexDynamicSettings.addDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE);
indexDynamicSettings.addDynamicSetting(InternalIndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME);
indexDynamicSettings.addDynamicSetting(LocalGatewayAllocator.INDEX_RECOVERY_INITIAL_SHARDS);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_SIZE, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_SIZE, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, Validator.POSITIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(LogByteSizeMergePolicyProvider.INDEX_COMPOUND_FORMAT);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_DOCS, Validator.POSITIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, Validator.POSITIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_COMPOUND_FORMAT);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_INDEX_CONCURRENCY, Validator.NON_NEGATIVE_INTEGER);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_COMPOUND_ON_FLUSH, Validator.BOOLEAN);
indexDynamicSettings.addDynamicSetting(CodecService.INDEX_CODEC_BLOOM_LOAD, Validator.BOOLEAN);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_GC_DELETES, Validator.TIME);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_CODEC);
indexDynamicSettings.addDynamicSetting(InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_REFORMAT);
indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_LEVEL);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE, Validator.TIME);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_REFORMAT);
indexDynamicSettings.addDynamicSetting(ShardSlowLogSearchService.INDEX_SEARCH_SLOWLOG_LEVEL);
indexDynamicSettings.addDynamicSetting(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, Validator.DOUBLE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, Validator.INTEGER_GTE_2);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
indexDynamicSettings.addDynamicSetting(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_INTERVAL, Validator.TIME);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, Validator.INTEGER);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, Validator.TIME);
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH);
indexDynamicSettings.addDynamicSetting(InternalIndicesWarmer.INDEX_WARMER_ENABLED);
}
public void addDynamicSettings(String... settings) {
indexDynamicSettings.addDynamicSettings(settings);
}
public void addDynamicSetting(String setting, Validator validator) {
indexDynamicSettings.addDynamicSetting(setting, validator);
}
@Override
protected void configure() {
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings);
}
} | 1no label
| src_main_java_org_elasticsearch_index_settings_IndexDynamicSettingsModule.java |
1,212 | @Beta
public interface ReplicatedMap<K, V>
extends Map<K, V>, DistributedObject {
/**
* <p>Associates a given value to the specified key and replicates it to the
* cluster. If there is an old value it will be replaced by the specified
* one and is returned from the call.</p>
* <p>In addition you have to specify a ttl and it's {@link TimeUnit}
* to define when the value is outdated and should be removed from the
* replicated map.</p>
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @param ttl ttl to be associated with the specified key-value pair
* @param timeUnit TimeUnit to be used for the ttl value
*/
V put(K key, V value, long ttl, TimeUnit timeUnit);
/**
* <p>The clear operation is thought for wiping data out of the replicated maps.
* Therefor it is the only synchronous remote operation in this implementation, so
* be aware of the fact that this might be a slow operation.</p>
* <p>If some node fails on executing the operation it is retried for at most of
* 3 times (on the failing nodes only). If not working after the third time this
* method throws a {@link com.hazelcast.spi.exception.CallTimeoutException} back
* to the caller.</p>
*
* @throws com.hazelcast.spi.exception.CallTimeoutException thrown if clear could not
* executed on remote nodes
*/
void clear();
/**
* Removes the specified entry listener
* Returns silently if there is no such listener added before.
*
* @param id id of registered listener
* @return true if registration is removed, false otherwise
*/
boolean removeEntryListener(String id);
/**
* Adds an entry listener for this map. Listener will get notified
* for all map add/remove/update/evict events.
*
* @param listener entry listener
*/
String addEntryListener(EntryListener<K, V> listener);
/**
* Adds the specified entry listener for the specified key.
* The listener will get notified for all
* add/remove/update/evict events of the specified key only.
* <p/>
* <p><b>Warning:</b></p>
* This method uses <tt>hashCode</tt> and <tt>equals</tt> of binary form of
* the <tt>key</tt>, not the actual implementations of <tt>hashCode</tt> and <tt>equals</tt>
* defined in <tt>key</tt>'s class.
*
* @param listener entry listener
* @param key key to listen
* @throws NullPointerException if the specified key is null
*/
String addEntryListener(EntryListener<K, V> listener, K key);
/**
* Adds an continuous entry listener for this map. Listener will get notified
* for map add/remove/update/evict events filtered by given predicate.
*
* @param listener entry listener
* @param predicate predicate for filtering entries
*/
String addEntryListener(EntryListener<K, V> listener, Predicate<K, V> predicate);
/**
* Adds an continuous entry listener for this map. Listener will get notified
* for map add/remove/update/evict events filtered by given predicate.
*
* @param listener entry listener
* @param predicate predicate for filtering entries
* @param key key to listen
*/
String addEntryListener(EntryListener<K, V> listener, Predicate<K, V> predicate, K key);
/**
* Returns a {@link Collection} view of the values contained in this map.
* The collection is <b>NOT</b> backed by the map, so changes to the map are
* <b>NOT</b> reflected in the collection, and vice-versa.<br/>
* The order of the elements is not guaranteed due to the internal
* asynchronous replication behavior. If a specific order is needed use
* {@link #values(java.util.Comparator)} to force reordering of the
* elements before returning.
*
* @return a collection view of the values contained in this map
*/
Collection<V> values();
/**
* Returns a {@link Collection} view of the values contained in this map.
* The collection is <b>NOT</b> backed by the map, so changes to the map are
* <b>NOT</b> reflected in the collection, and vice-versa.<br/>
* The order of the elements is guaranteed by executing the given
* {@link java.util.Comparator} before returning the elements.
*
* @param comparator the Comparator to sort the returned elements
* @return a collection view of the values contained in this map
*/
Collection<V> values(Comparator<V> comparator);
/**
* Returns a {@link Set} view of the mappings contained in this map.
* The set is <b>NOT</b> backed by the map, so changes to the map are
* <b>NOT</b> reflected in the set, and vice-versa.<br/>
* The order of the elements is not guaranteed due to the internal
* asynchronous replication behavior.
*
* @return a set view of the mappings contained in this map
*/
Set<Entry<K, V>> entrySet();
} | 0true
| hazelcast_src_main_java_com_hazelcast_core_ReplicatedMap.java |
1,092 | threads[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
for (long i = 0; i < SCAN_COUNT; i++) {
long id = startUid + (Math.abs(ThreadLocalRandom.current().nextInt()) % INDEX_COUNT);
final long version = Versions.loadVersion(reader, new Term("_uid", Long.toString(id)));
if (version != id) {
System.err.println("wrong id...");
break;
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown();
}
}
}); | 0true
| src_test_java_org_elasticsearch_benchmark_common_lucene_uidscan_LuceneUidScanBenchmark.java |
128 | final class ClientServiceProxy implements ClientService {
private final ClientEngineImpl clientEngine;
ClientServiceProxy(ClientEngineImpl clientEngine) {
this.clientEngine = clientEngine;
}
@Override
public Collection<Client> getConnectedClients() {
return clientEngine.getClients();
}
@Override
public String addClientListener(ClientListener clientListener) {
return clientEngine.addClientListener(clientListener);
}
@Override
public boolean removeClientListener(String registrationId) {
return clientEngine.removeClientListener(registrationId);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_ClientServiceProxy.java |
789 | public class OMetadataDefault implements OMetadata {
public static final String CLUSTER_INTERNAL_NAME = "internal";
public static final String CLUSTER_INDEX_NAME = "index";
public static final String CLUSTER_MANUAL_INDEX_NAME = "manindex";
public static final String DATASEGMENT_INDEX_NAME = "index";
protected int schemaClusterId;
protected OSchemaProxy schema;
protected OSecurity security;
protected OIndexManagerProxy indexManager;
protected OFunctionLibraryProxy functionLibrary;
protected OSchedulerListenerProxy scheduler;
protected static final OProfilerMBean PROFILER = Orient.instance().getProfiler();
public OMetadataDefault() {
}
public void load() {
final long timer = PROFILER.startChrono();
try {
init(true);
if (schemaClusterId == -1 || getDatabase().countClusterElements(CLUSTER_INTERNAL_NAME) == 0)
return;
} finally {
PROFILER.stopChrono(PROFILER.getDatabaseMetric(getDatabase().getName(), "metadata.load"), "Loading of database metadata",
timer, "db.*.metadata.load");
}
}
public void create() throws IOException {
init(false);
schema.create();
indexManager.create();
security.create();
functionLibrary.create();
security.createClassTrigger();
scheduler.create();
}
public OSchema getSchema() {
return schema;
}
public OSecurity getSecurity() {
return security;
}
public OIndexManagerProxy getIndexManager() {
return indexManager;
}
public int getSchemaClusterId() {
return schemaClusterId;
}
private void init(final boolean iLoad) {
final ODatabaseRecord database = getDatabase();
schemaClusterId = database.getClusterIdByName(CLUSTER_INTERNAL_NAME);
schema = new OSchemaProxy(database.getStorage().getResource(OSchema.class.getSimpleName(), new Callable<OSchemaShared>() {
public OSchemaShared call() {
final OSchemaShared instance = new OSchemaShared(schemaClusterId);
if (iLoad)
instance.load();
return instance;
}
}), database);
indexManager = new OIndexManagerProxy(database.getStorage().getResource(OIndexManager.class.getSimpleName(),
new Callable<OIndexManager>() {
public OIndexManager call() {
OIndexManager instance;
if (database.getStorage() instanceof OStorageProxy)
instance = new OIndexManagerRemote(database);
else
instance = new OIndexManagerShared(database);
if (iLoad)
try {
instance.load();
} catch (Exception e) {
OLogManager.instance().error(this, "[OMetadata] Error on loading index manager, reset index configuration", e);
instance.create();
}
return instance;
}
}), database);
final Boolean enableSecurity = (Boolean) database.getProperty(ODatabase.OPTIONS.SECURITY.toString());
if (enableSecurity != null && !enableSecurity)
// INSTALL NO SECURITY IMPL
security = new OSecurityNull();
else
security = new OSecurityProxy(database.getStorage().getResource(OSecurity.class.getSimpleName(),
new Callable<OSecurityShared>() {
public OSecurityShared call() {
final OSecurityShared instance = new OSecurityShared();
if (iLoad) {
security = instance;
instance.load();
}
return instance;
}
}), database);
functionLibrary = new OFunctionLibraryProxy(database.getStorage().getResource(OFunctionLibrary.class.getSimpleName(),
new Callable<OFunctionLibrary>() {
public OFunctionLibrary call() {
final OFunctionLibraryImpl instance = new OFunctionLibraryImpl();
if (iLoad)
instance.load();
return instance;
}
}), database);
scheduler = new OSchedulerListenerProxy(database.getStorage().getResource(OSchedulerListener.class.getSimpleName(),
new Callable<OSchedulerListener>() {
public OSchedulerListener call() {
final OSchedulerListenerImpl instance = new OSchedulerListenerImpl();
if (iLoad)
instance.load();
return instance;
}
}), database);
}
/**
* Reloads the internal objects.
*/
public void reload() {
if (schema != null)
schema.reload();
if (indexManager != null)
indexManager.load();
if (security != null)
security.load();
if (functionLibrary != null)
functionLibrary.load();
}
/**
* Closes internal objects
*/
public void close() {
if (schema != null)
schema.close();
if (security != null)
security.close();
}
protected ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
public OFunctionLibrary getFunctionLibrary() {
return functionLibrary;
}
public OSchedulerListener getSchedulerListener() {
return scheduler;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_metadata_OMetadataDefault.java |
1,333 | public class OFullCheckpointStartRecord extends OAbstractCheckPointStartRecord {
private OLogSequenceNumber lsn;
public OFullCheckpointStartRecord() {
}
public OFullCheckpointStartRecord(OLogSequenceNumber previousCheckpoint) {
super(previousCheckpoint);
}
@Override
public OLogSequenceNumber getLsn() {
return lsn;
}
@Override
public void setLsn(OLogSequenceNumber lsn) {
this.lsn = lsn;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
return true;
}
@Override
public String toString() {
return "OFullCheckpointStartRecord{" + "lsn=" + lsn + "} " + super.toString();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OFullCheckpointStartRecord.java |
2,375 | public enum MemorySizeValue {
;
/** Parse the provided string as a memory size. This method either accepts absolute values such as
* <tt>42</tt> (default assumed unit is byte) or <tt>2mb</tt>, or percentages of the heap size: if
* the heap is 1G, <tt>10%</tt> will be parsed as <tt>100mb</tt>. */
public static ByteSizeValue parseBytesSizeValueOrHeapRatio(String sValue) {
if (sValue.endsWith("%")) {
final String percentAsString = sValue.substring(0, sValue.length() - 1);
try {
final double percent = Double.parseDouble(percentAsString);
if (percent < 0 || percent > 100) {
throw new ElasticsearchParseException("Percentage should be in [0-100], got " + percentAsString);
}
return new ByteSizeValue((long) ((percent / 100) * JvmInfo.jvmInfo().getMem().getHeapMax().bytes()), ByteSizeUnit.BYTES);
} catch (NumberFormatException e) {
throw new ElasticsearchParseException("Failed to parse [" + percentAsString + "] as a double", e);
}
} else {
return parseBytesSizeValue(sValue);
}
}
} | 0true
| src_main_java_org_elasticsearch_common_unit_MemorySizeValue.java |
574 | ex.execute(new Runnable() {
public void run() {
factory.newHazelcastInstance(config);
nodeLatch.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_cluster_ClusterMembershipTest.java |
2,138 | public static class ExistsCollector extends Collector {
private boolean exists;
public void reset() {
exists = false;
}
public boolean exists() {
return exists;
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.exists = false;
}
@Override
public void collect(int doc) throws IOException {
exists = true;
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_Lucene.java |
5,393 | public class InternalValueCount extends MetricsAggregation implements ValueCount {
public static final Type TYPE = new Type("value_count", "vcount");
private static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalValueCount readResult(StreamInput in) throws IOException {
InternalValueCount count = new InternalValueCount();
count.readFrom(in);
return count;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private long value;
InternalValueCount() {} // for serialization
public InternalValueCount(String name, long value) {
super(name);
this.value = value;
}
@Override
public long getValue() {
return value;
}
@Override
public Type type() {
return TYPE;
}
@Override
public InternalAggregation reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return aggregations.get(0);
}
InternalValueCount reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
reduced = (InternalValueCount) aggregation;
} else {
reduced.value += ((InternalValueCount) aggregation).value;
}
}
return reduced;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
value = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeVLong(value);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject(name)
.field(CommonFields.VALUE, value)
.endObject();
}
@Override
public String toString() {
return "count[" + value + "]";
}
} | 1no label
| src_main_java_org_elasticsearch_search_aggregations_metrics_valuecount_InternalValueCount.java |
982 | shardAction.execute(shardRequest, new ActionListener<ShardResponse>() {
@Override
public void onResponse(ShardResponse result) {
shardsResponses.set(indexCounter.getAndIncrement(), result);
if (completionCounter.decrementAndGet() == 0) {
listener.onResponse(newResponseInstance(request, shardsResponses));
}
}
@Override
public void onFailure(Throwable e) {
int index = indexCounter.getAndIncrement();
if (accumulateExceptions()) {
shardsResponses.set(index, e);
}
if (completionCounter.decrementAndGet() == 0) {
listener.onResponse(newResponseInstance(request, shardsResponses));
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_support_replication_TransportIndexReplicationOperationAction.java |
200 | public class Router {
private final LoadBalancer loadBalancer;
public Router(LoadBalancer loadBalancer) {
this.loadBalancer = loadBalancer;
}
public Address next() {
final MemberImpl member = (MemberImpl) loadBalancer.next();
if (member == null) {
return null;
} else {
return member.getAddress();
}
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_connection_Router.java |
3,855 | public static class Parser implements FilterParser {
@Inject
public Parser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
String fieldName = null;
String geohash = null;
int levels = -1;
boolean neighbors = false;
XContentParser.Token token;
if ((token = parser.currentToken()) != Token.START_OBJECT) {
throw new ElasticsearchParseException(NAME + " must be an object");
}
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == Token.FIELD_NAME) {
String field = parser.text();
if (PRECISION.equals(field)) {
token = parser.nextToken();
if(token == Token.VALUE_NUMBER) {
levels = parser.intValue();
} else if(token == Token.VALUE_STRING) {
double meters = DistanceUnit.parse(parser.text(), DistanceUnit.DEFAULT, DistanceUnit.METERS);
levels = GeoUtils.geoHashLevelsForPrecision(meters);
}
} else if (NEIGHBORS.equals(field)) {
parser.nextToken();
neighbors = parser.booleanValue();
} else {
fieldName = field;
token = parser.nextToken();
if(token == Token.VALUE_STRING) {
// A string indicates either a gehash or a lat/lon string
String location = parser.text();
if(location.indexOf(",")>0) {
geohash = GeoPoint.parse(parser).geohash();
} else {
geohash = location;
}
} else {
geohash = GeoPoint.parse(parser).geohash();
}
}
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
}
if (geohash == null) {
throw new QueryParsingException(parseContext.index(), "no geohash value provided to geohash_cell filter");
}
MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName);
if (smartMappers == null || !smartMappers.hasMapper()) {
throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]");
}
FieldMapper<?> mapper = smartMappers.mapper();
if (!(mapper instanceof GeoPointFieldMapper)) {
throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field");
}
GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper);
if (!geoMapper.isEnableGeohashPrefix()) {
throw new QueryParsingException(parseContext.index(), "can't execute geohash_cell on field [" + fieldName + "], geohash_prefix is not enabled");
}
if(levels > 0) {
int len = Math.min(levels, geohash.length());
geohash = geohash.substring(0, len);
}
if (neighbors) {
return create(parseContext, geoMapper, geohash, GeoHashUtils.neighbors(geohash));
} else {
return create(parseContext, geoMapper, geohash, null);
}
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_GeohashCellFilter.java |
1,353 | private class NodeMappingRefreshTransportHandler extends BaseTransportRequestHandler<NodeMappingRefreshRequest> {
static final String ACTION = "cluster/nodeMappingRefresh";
@Override
public NodeMappingRefreshRequest newInstance() {
return new NodeMappingRefreshRequest();
}
@Override
public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
innerMappingRefresh(request);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_action_index_NodeMappingRefreshAction.java |
1,212 | QUEUE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return concurrentDeque(c, limit);
}
}, | 0true
| src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java |
3,271 | public class MultiOrdinals implements Ordinals {
private static final int OFFSETS_PAGE_SIZE = 1024;
private static final int OFFSET_INIT_PAGE_COUNT = 16;
/**
* Return true if this impl is going to be smaller than {@link SinglePackedOrdinals} by at least 20%.
*/
public static boolean significantlySmallerThanSinglePackedOrdinals(int maxDoc, int numDocsWithValue, long numOrds, float acceptableOverheadRatio) {
int bitsPerOrd = PackedInts.bitsRequired(numOrds);
bitsPerOrd = PackedInts.fastestFormatAndBits(numDocsWithValue, bitsPerOrd, acceptableOverheadRatio).bitsPerValue;
// Compute the worst-case number of bits per value for offsets in the worst case, eg. if no docs have a value at the
// beginning of the block and all docs have one at the end of the block
final float avgValuesPerDoc = (float) numDocsWithValue / maxDoc;
final int maxDelta = (int) Math.ceil(OFFSETS_PAGE_SIZE * (1 - avgValuesPerDoc) * avgValuesPerDoc);
int bitsPerOffset = PackedInts.bitsRequired(maxDelta) + 1; // +1 because of the sign
bitsPerOffset = PackedInts.fastestFormatAndBits(maxDoc, bitsPerOffset, acceptableOverheadRatio).bitsPerValue;
final long expectedMultiSizeInBytes = (long) numDocsWithValue * bitsPerOrd + (long) maxDoc * bitsPerOffset;
final long expectedSingleSizeInBytes = (long) maxDoc * bitsPerOrd;
return expectedMultiSizeInBytes < 0.8f * expectedSingleSizeInBytes;
}
private final boolean multiValued;
private final long numOrds;
private final MonotonicAppendingLongBuffer endOffsets;
private final AppendingPackedLongBuffer ords;
public MultiOrdinals(OrdinalsBuilder builder, float acceptableOverheadRatio) {
multiValued = builder.getNumMultiValuesDocs() > 0;
numOrds = builder.getNumOrds();
endOffsets = new MonotonicAppendingLongBuffer(OFFSET_INIT_PAGE_COUNT, OFFSETS_PAGE_SIZE, acceptableOverheadRatio);
ords = new AppendingPackedLongBuffer(OFFSET_INIT_PAGE_COUNT, OFFSETS_PAGE_SIZE, acceptableOverheadRatio);
long lastEndOffset = 0;
for (int i = 0; i < builder.maxDoc(); ++i) {
final LongsRef docOrds = builder.docOrds(i);
final long endOffset = lastEndOffset + docOrds.length;
endOffsets.add(endOffset);
for (int j = 0; j < docOrds.length; ++j) {
ords.add(docOrds.longs[docOrds.offset + j] - 1);
}
lastEndOffset = endOffset;
}
assert endOffsets.size() == builder.maxDoc();
assert ords.size() == builder.getTotalNumOrds() : ords.size() + " != " + builder.getTotalNumOrds();
}
@Override
public long getMemorySizeInBytes() {
return endOffsets.ramBytesUsed() + ords.ramBytesUsed();
}
@Override
public boolean isMultiValued() {
return multiValued;
}
@Override
public int getNumDocs() {
return (int) endOffsets.size();
}
@Override
public long getNumOrds() {
return numOrds;
}
@Override
public long getMaxOrd() {
return numOrds + 1;
}
@Override
public Ordinals.Docs ordinals() {
return new MultiDocs(this);
}
static class MultiDocs implements Ordinals.Docs {
private final MultiOrdinals ordinals;
private final MonotonicAppendingLongBuffer endOffsets;
private final AppendingPackedLongBuffer ords;
private final LongsRef longsScratch;
private long offset;
private long limit;
private long currentOrd;
MultiDocs(MultiOrdinals ordinals) {
this.ordinals = ordinals;
this.endOffsets = ordinals.endOffsets;
this.ords = ordinals.ords;
this.longsScratch = new LongsRef(16);
}
@Override
public Ordinals ordinals() {
return this.ordinals;
}
@Override
public int getNumDocs() {
return ordinals.getNumDocs();
}
@Override
public long getNumOrds() {
return ordinals.getNumOrds();
}
@Override
public long getMaxOrd() {
return ordinals.getMaxOrd();
}
@Override
public boolean isMultiValued() {
return ordinals.isMultiValued();
}
@Override
public long getOrd(int docId) {
final long startOffset = docId > 0 ? endOffsets.get(docId - 1) : 0;
final long endOffset = endOffsets.get(docId);
if (startOffset == endOffset) {
return currentOrd = 0L; // ord for missing values
} else {
return currentOrd = 1L + ords.get(startOffset);
}
}
@Override
public LongsRef getOrds(int docId) {
final long startOffset = docId > 0 ? endOffsets.get(docId - 1) : 0;
final long endOffset = endOffsets.get(docId);
final int numValues = (int) (endOffset - startOffset);
if (longsScratch.length < numValues) {
longsScratch.longs = new long[ArrayUtil.oversize(numValues, RamUsageEstimator.NUM_BYTES_LONG)];
}
for (int i = 0; i < numValues; ++i) {
longsScratch.longs[i] = 1L + ords.get(startOffset + i);
}
longsScratch.offset = 0;
longsScratch.length = numValues;
return longsScratch;
}
@Override
public long nextOrd() {
assert offset < limit;
return currentOrd = 1L + ords.get(offset++);
}
@Override
public int setDocument(int docId) {
final long startOffset = docId > 0 ? endOffsets.get(docId - 1) : 0;
final long endOffset = endOffsets.get(docId);
offset = startOffset;
limit = endOffset;
return (int) (endOffset - startOffset);
}
@Override
public long currentOrd() {
return currentOrd;
}
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_ordinals_MultiOrdinals.java |
1,568 | public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance";
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance);
if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) {
logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance);
ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance;
}
}
}
private volatile int clusterConcurrentRebalance;
@Inject
public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2);
logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
nodeSettingsService.addListener(new ApplySettings());
}
@Override
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
if (clusterConcurrentRebalance == -1) {
return allocation.decision(Decision.YES, "all concurrent rebalances are allowed");
}
if (allocation.routingNodes().getRelocatingShardCount() >= clusterConcurrentRebalance) {
return allocation.decision(Decision.NO, "too man concurrent rebalances [%d], limit: [%d]",
allocation.routingNodes().getRelocatingShardCount(), clusterConcurrentRebalance);
}
return allocation.decision(Decision.YES, "below threshold [%d] for concurrent rebalances", clusterConcurrentRebalance);
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_decider_ConcurrentRebalanceAllocationDecider.java |
975 | public static class Name {
public static final String OrderItems = "OrderImpl_Order_Items_Tab";
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_BundleOrderItemImpl.java |
751 | public class MultiGetAction extends Action<MultiGetRequest, MultiGetResponse, MultiGetRequestBuilder> {
public static final MultiGetAction INSTANCE = new MultiGetAction();
public static final String NAME = "mget";
private MultiGetAction() {
super(NAME);
}
@Override
public MultiGetResponse newResponse() {
return new MultiGetResponse();
}
@Override
public MultiGetRequestBuilder newRequestBuilder(Client client) {
return new MultiGetRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_get_MultiGetAction.java |
3,264 | public class SemaphorePermission extends InstancePermission {
private static final int ACQUIRE = 0x4;
private static final int RELEASE = 0x8;
private static final int READ = 0x16;
private static final int ALL = CREATE | DESTROY | ACQUIRE | RELEASE | READ;
public SemaphorePermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_ACQUIRE.equals(action)) {
mask |= ACQUIRE;
} else if (ActionConstants.ACTION_RELEASE.equals(action)) {
mask |= RELEASE;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
}
}
return mask;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_security_permission_SemaphorePermission.java |
50 | public abstract class HttpCommandProcessor<T> extends AbstractTextCommandProcessor<T> {
public static final String URI_MAPS = "/hazelcast/rest/maps/";
public static final String URI_QUEUES = "/hazelcast/rest/queues/";
public static final String URI_CLUSTER = "/hazelcast/rest/cluster";
public static final String URI_STATE_DUMP = "/hazelcast/rest/dump";
public static final String URI_MANCENTER_CHANGE_URL = "/hazelcast/rest/mancenter/changeurl";
protected HttpCommandProcessor(TextCommandService textCommandService) {
super(textCommandService);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_ascii_rest_HttpCommandProcessor.java |
120 | public final class ClientPrincipal implements Portable {
private String uuid;
private String ownerUuid;
public ClientPrincipal() {
}
public ClientPrincipal(String uuid, String ownerUuid) {
this.uuid = uuid;
this.ownerUuid = ownerUuid;
}
public String getUuid() {
return uuid;
}
public String getOwnerUuid() {
return ownerUuid;
}
@Override
public int getFactoryId() {
return ClientPortableHook.ID;
}
@Override
public int getClassId() {
return ClientPortableHook.PRINCIPAL;
}
@Override
public void writePortable(PortableWriter writer) throws IOException {
writer.writeUTF("uuid", uuid);
writer.writeUTF("ownerUuid", ownerUuid);
}
@Override
public void readPortable(PortableReader reader) throws IOException {
uuid = reader.readUTF("uuid");
ownerUuid = reader.readUTF("ownerUuid");
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ClientPrincipal that = (ClientPrincipal) o;
if (ownerUuid != null ? !ownerUuid.equals(that.ownerUuid) : that.ownerUuid != null) {
return false;
}
if (uuid != null ? !uuid.equals(that.uuid) : that.uuid != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = uuid != null ? uuid.hashCode() : 0;
result = 31 * result + (ownerUuid != null ? ownerUuid.hashCode() : 0);
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("ClientPrincipal{");
sb.append("uuid='").append(uuid).append('\'');
sb.append(", ownerUuid='").append(ownerUuid).append('\'');
sb.append('}');
return sb.toString();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_ClientPrincipal.java |
494 | private static class IdentityRewriter<T> implements FieldRewriter<T> {
@Override
public T rewriteValue(T value) {
return null;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseImport.java |
666 | public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder<ValidateQueryRequest, ValidateQueryResponse, ValidateQueryRequestBuilder> {
private QuerySourceBuilder sourceBuilder;
public ValidateQueryRequestBuilder(IndicesAdminClient client) {
super((InternalIndicesAdminClient) client, new ValidateQueryRequest());
}
/**
* The types of documents the query will run against. Defaults to all types.
*/
public ValidateQueryRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* The query source to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setQuery(QueryBuilder queryBuilder) {
sourceBuilder().setQuery(queryBuilder);
return this;
}
/**
* The source to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setSource(BytesReference source) {
request().source(source, false);
return this;
}
/**
* The source to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setSource(BytesReference source, boolean unsafe) {
request().source(source, unsafe);
return this;
}
/**
* The source to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setSource(byte[] source) {
request.source(source);
return this;
}
/**
* Indicates if detailed information about the query should be returned.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setExplain(boolean explain) {
request.explain(explain);
return this;
}
@Override
protected void doExecute(ActionListener<ValidateQueryResponse> listener) {
if (sourceBuilder != null) {
request.source(sourceBuilder);
}
((IndicesAdminClient) client).validateQuery(request, listener);
}
private QuerySourceBuilder sourceBuilder() {
if (sourceBuilder == null) {
sourceBuilder = new QuerySourceBuilder();
}
return sourceBuilder;
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_validate_query_ValidateQueryRequestBuilder.java |
1,190 | @Service("blCreditCardPaymentInfoFactory")
public class CreditCardPaymentInfoFactoryImpl implements PaymentInfoFactory {
/**
* Constructs a default Credit Card PaymentInfo object based on the passed in order.
* Sets the basic information necessary to complete an order.
*
* @param order
* @return PaymentInfo - the Credit Card Payment object that gets persisted in Broadleaf.
*/
@Override
public PaymentInfo constructPaymentInfo(Order order) {
PaymentInfoImpl paymentInfo = new PaymentInfoImpl();
paymentInfo.setOrder(order);
paymentInfo.setType(PaymentInfoType.CREDIT_CARD);
paymentInfo.setReferenceNumber(String.valueOf(order.getId()));
paymentInfo.setAmount(order.getRemainingTotal());
return paymentInfo;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_CreditCardPaymentInfoFactoryImpl.java |
2,186 | public class MatchAllDocsFilter extends Filter {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
return new AllDocIdSet(context.reader().maxDoc());
}
@Override
public int hashCode() {
return this.getClass().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null) {
return false;
}
if (obj.getClass() == this.getClass()) {
return true;
}
return false;
}
@Override
public String toString() {
return "*:*";
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_search_MatchAllDocsFilter.java |
1,054 | public class JoinConfig {
private MulticastConfig multicastConfig = new MulticastConfig();
private TcpIpConfig tcpIpConfig = new TcpIpConfig();
private AwsConfig awsConfig = new AwsConfig();
/**
* @return the multicastConfig
*/
public MulticastConfig getMulticastConfig() {
return multicastConfig;
}
/**
* @param multicastConfig the multicastConfig to set
* @throws IllegalArgumentException if multicastConfig is null.
*/
public JoinConfig setMulticastConfig(final MulticastConfig multicastConfig) {
this.multicastConfig = isNotNull(multicastConfig, "multicastConfig");
return this;
}
/**
* @return the tcpIpConfig
*/
public TcpIpConfig getTcpIpConfig() {
return tcpIpConfig;
}
/**
* @param tcpIpConfig the tcpIpConfig to set
* @throws IllegalArgumentException if tcpIpConfig is null.
*/
public JoinConfig setTcpIpConfig(final TcpIpConfig tcpIpConfig) {
this.tcpIpConfig = isNotNull(tcpIpConfig,"tcpIpConfig");
return this;
}
/**
* @return the awsConfig
*/
public AwsConfig getAwsConfig() {
return awsConfig;
}
/**
* @param awsConfig the AwsConfig to set
* @throws IllegalArgumentException if awsConfig is null.
*/
public JoinConfig setAwsConfig(final AwsConfig awsConfig) {
this.awsConfig = isNotNull(awsConfig,"awsConfig");
return this;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("JoinConfig{");
sb.append("multicastConfig=").append(multicastConfig);
sb.append(", tcpIpConfig=").append(tcpIpConfig);
sb.append(", awsConfig=").append(awsConfig);
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_config_JoinConfig.java |
3,069 | public class SnapshotIndexCommit extends IndexCommitDelegate implements Releasable {
private final SnapshotDeletionPolicy deletionPolicy;
private final String[] files;
SnapshotIndexCommit(SnapshotDeletionPolicy deletionPolicy, IndexCommit cp) throws IOException {
super(cp);
this.deletionPolicy = deletionPolicy;
ArrayList<String> tmpFiles = new ArrayList<String>();
for (String o : cp.getFileNames()) {
tmpFiles.add(o);
}
files = tmpFiles.toArray(new String[tmpFiles.size()]);
}
public String[] getFiles() {
return files;
}
/**
* Releases the current snapshot, returning <code>true</code> if it was
* actually released.
*/
public boolean release() {
return deletionPolicy.release(getGeneration());
}
/**
* Override the delete operation, and only actually delete it if it
* is not held by the {@link SnapshotDeletionPolicy}.
*/
@Override
public void delete() {
if (!deletionPolicy.isHeld(getGeneration())) {
delegate.delete();
}
}
} | 0true
| src_main_java_org_elasticsearch_index_deletionpolicy_SnapshotIndexCommit.java |
483 | int indexesSizeTwo = makeDbCall(databaseDocumentTxTwo, new ODbRelatedCall<Integer>() {
public Integer call() {
return indexManagerTwo.getIndexes().size();
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseCompare.java |
4,127 | static class StatsHolder {
public final MeanMetric queryMetric = new MeanMetric();
public final MeanMetric fetchMetric = new MeanMetric();
public final CounterMetric queryCurrent = new CounterMetric();
public final CounterMetric fetchCurrent = new CounterMetric();
public SearchStats.Stats stats() {
return new SearchStats.Stats(
queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(),
fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count());
}
public long totalCurrent() {
return queryCurrent.count() + fetchCurrent.count();
}
public void clear() {
queryMetric.clear();
fetchMetric.clear();
}
} | 1no label
| src_main_java_org_elasticsearch_index_search_stats_ShardSearchService.java |
1,616 | @Component("blAdminSandBoxFilter")
public class AdminSandBoxFilter extends OncePerRequestFilter {
private static final String SANDBOX_ADMIN_ID_VAR = "blAdminCurrentSandboxId";
private static String SANDBOX_ID_VAR = "blSandboxId";
@Resource(name="blSandBoxService")
protected SandBoxService sandBoxService;
@Resource(name="blAdminSecurityRemoteService")
protected SecurityVerifier adminRemoteSecurityService;
@Override
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException {
HttpSession session = request.getSession();
AdminUser adminUser = adminRemoteSecurityService.getPersistentAdminUser();
if (adminUser == null) {
//clear any sandbox
session.removeAttribute(SANDBOX_ADMIN_ID_VAR);
SandBoxContext.setSandBoxContext(null);
} else {
SandBox sandBox = sandBoxService.retrieveUserSandBox(null, adminUser);
session.setAttribute(SANDBOX_ADMIN_ID_VAR, sandBox.getId());
session.removeAttribute(SANDBOX_ID_VAR);
AdminSandBoxContext context = new AdminSandBoxContext();
context.setSandBoxId(sandBox.getId());
context.setSandBoxMode(SandBoxMode.IMMEDIATE_COMMIT);
context.setAdminUser(adminUser);
SandBoxContext.setSandBoxContext(context);
}
try {
filterChain.doFilter(request, response);
} finally {
SandBoxContext.setSandBoxContext(null);
}
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_security_AdminSandBoxFilter.java |
1,758 | assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(0, map.size());
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_EvictionTest.java |
89 | public interface ObjectToDouble<A> { double apply(A a); } | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
3,629 | public class PathMatchDynamicTemplateTests extends ElasticsearchTestCase {
@Test
public void testSimple() throws Exception {
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json");
DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);
byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json");
Document doc = docMapper.parse(new BytesArray(json)).rootDoc();
IndexableField f = doc.getField("name");
assertThat(f.name(), equalTo("name"));
assertThat(f.stringValue(), equalTo("top_level"));
assertThat(f.fieldType().stored(), equalTo(false));
FieldMappers fieldMappers = docMapper.mappers().fullName("name");
assertThat(fieldMappers.mappers().size(), equalTo(1));
assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(false));
f = doc.getField("obj1.name");
assertThat(f.name(), equalTo("obj1.name"));
assertThat(f.fieldType().stored(), equalTo(true));
fieldMappers = docMapper.mappers().fullName("obj1.name");
assertThat(fieldMappers.mappers().size(), equalTo(1));
assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(true));
f = doc.getField("obj1.obj2.name");
assertThat(f.name(), equalTo("obj1.obj2.name"));
assertThat(f.fieldType().stored(), equalTo(false));
fieldMappers = docMapper.mappers().fullName("obj1.obj2.name");
assertThat(fieldMappers.mappers().size(), equalTo(1));
assertThat(fieldMappers.mapper().fieldType().stored(), equalTo(false));
// verify more complex path_match expressions
fieldMappers = docMapper.mappers().fullName("obj3.obj4.prop1");
assertThat(fieldMappers.mappers().size(), equalTo(1));
}
} | 0true
| src_test_java_org_elasticsearch_index_mapper_dynamictemplate_pathmatch_PathMatchDynamicTemplateTests.java |
917 | final Object myNextVal = makeDbCall(iMyDb, new ODbRelatedCall<Object>() {
public Object call() {
return myIterator.next();
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_record_impl_ODocumentHelper.java |
1,344 | completableFuture.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
reference.set(response);
latch2.countDown();
}
@Override
public void onFailure(Throwable t) {
reference.set(t);
latch2.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
828 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class AtomicReferenceClientRequestTest extends ClientTestSupport {
static final String name = "test";
protected Config createConfig() {
return new Config();
}
private IAtomicReference getAtomicReference() {
IAtomicReference reference = getInstance().getAtomicReference(name);
reference.set(null);
return reference;
}
@Test
public void get() throws Exception {
IAtomicReference<String> reference = getAtomicReference();
final SimpleClient client = getClient();
client.send(new GetRequest(name));
assertNull(client.receive());
reference.set("foo");
client.send(new GetRequest(name));
assertEquals("foo",client.receive());
}
@Test
public void isNull() throws Exception {
IAtomicReference<String> reference = getAtomicReference();
final SimpleClient client = getClient();
client.send(new IsNullRequest(name));
assertEquals(Boolean.TRUE,client.receive());
reference.set("foo");
client.send(new IsNullRequest(name));
assertEquals(Boolean.FALSE,client.receive());
}
@Test
@ClientCompatibleTest
public void contains()throws Exception {
IAtomicReference<String> reference = getAtomicReference();
final SimpleClient client = getClient();
client.send(new ContainsRequest(name, toData(null)));
assertEquals(Boolean.TRUE, client.receive());
reference.set("foo");
client.send(new ContainsRequest(name, toData(null)));
assertEquals(Boolean.FALSE, client.receive());
client.send(new ContainsRequest(name, toData("foo")));
assertEquals(Boolean.TRUE,client.receive());
client.send(new ContainsRequest(name, toData("bar")));
assertEquals(Boolean.FALSE,client.receive());
}
@Test
public void set() throws Exception {
IAtomicReference<String> reference = getAtomicReference();
final SimpleClient client = getClient();
client.send(new SetRequest(name, toData(null)));
assertNull(client.receive());
assertNull(reference.get());
client.send(new SetRequest(name, toData("foo")));
assertNull(client.receive());
assertEquals("foo", reference.get());
client.send(new SetRequest(name, toData("foo")));
assertNull(client.receive());
assertEquals("foo", reference.get());
client.send(new SetRequest(name, toData(null)));
assertNull(client.receive());
assertEquals(null,reference.get());
}
@Test
public void getAndSet() throws Exception {
IAtomicReference<String> reference = getAtomicReference();
final SimpleClient client = getClient();
client.send(new GetAndSetRequest(name, toData(null)));
assertNull(client.receive());
assertNull(reference.get());
client.send(new GetAndSetRequest(name, toData("foo")));
assertNull(client.receive());
assertEquals("foo",reference.get());
client.send(new GetAndSetRequest(name, toData("foo")));
assertEquals("foo", client.receive());
assertEquals("foo",reference.get());
client.send(new GetAndSetRequest(name, toData("bar")));
assertEquals("foo", client.receive());
assertEquals("bar",reference.get());
client.send(new GetAndSetRequest(name, toData(null)));
assertEquals("bar", client.receive());
assertNull(reference.get());
}
@Test
public void compareAndSet() throws Exception {
IAtomicReference<String> reference = getAtomicReference();
final SimpleClient client = getClient();
client.send(new CompareAndSetRequest(name, toData(null), toData(null)));
assertEquals(Boolean.TRUE, client.receive());
assertNull(reference.get());
client.send(new CompareAndSetRequest(name, toData("foo"), toData(null)));
assertEquals(Boolean.FALSE, client.receive());
assertNull(reference.get());
client.send(new CompareAndSetRequest(name, toData(null), toData("foo")));
assertEquals(Boolean.TRUE, client.receive());
assertEquals("foo", reference.get());
client.send(new CompareAndSetRequest(name, toData("foo"), toData("foo")));
assertEquals(Boolean.TRUE, client.receive());
assertEquals("foo",reference.get());
client.send(new CompareAndSetRequest(name, toData(null), toData("pipo")));
assertEquals(Boolean.FALSE, client.receive());
assertEquals("foo",reference.get());
client.send(new CompareAndSetRequest(name, toData("bar"), toData("foo")));
assertEquals(Boolean.FALSE, client.receive());
assertEquals("foo",reference.get());
client.send(new CompareAndSetRequest(name, toData("foo"), toData("bar")));
assertEquals(Boolean.TRUE, client.receive());
assertEquals("bar",reference.get());
client.send(new CompareAndSetRequest(name, toData("bar"), toData(null)));
assertEquals(Boolean.TRUE, client.receive());
assertEquals(null,reference.get());
}
public Data toData(Object o){
return getNode(getInstance()).getSerializationService().toData(o);
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_atomicreference_AtomicReferenceClientRequestTest.java |
1,997 | private static class AnnotatedWith extends AbstractMatcher<AnnotatedElement>
implements Serializable {
private final Annotation annotation;
public AnnotatedWith(Annotation annotation) {
this.annotation = checkNotNull(annotation, "annotation");
checkForRuntimeRetention(annotation.annotationType());
}
public boolean matches(AnnotatedElement element) {
Annotation fromElement = element.getAnnotation(annotation.annotationType());
return fromElement != null && annotation.equals(fromElement);
}
@Override
public boolean equals(Object other) {
return other instanceof AnnotatedWith
&& ((AnnotatedWith) other).annotation.equals(annotation);
}
@Override
public int hashCode() {
return 37 * annotation.hashCode();
}
@Override
public String toString() {
return "annotatedWith(" + annotation + ")";
}
private static final long serialVersionUID = 0;
} | 0true
| src_main_java_org_elasticsearch_common_inject_matcher_Matchers.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.