Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,422 | @XmlRootElement(name = "state")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class StateWrapper extends BaseWrapper implements APIWrapper<State>, APIUnwrapper<State> {
@XmlElement
protected String name;
@XmlElement
protected String abbreviation;
@Override
public void wrapDetails(State model, HttpServletRequest request) {
this.name = model.getName();
this.abbreviation = model.getAbbreviation();
}
@Override
public void wrapSummary(State model, HttpServletRequest request) {
wrapDetails(model, request);
}
@Override
public State unwrap(HttpServletRequest request, ApplicationContext appContext) {
StateService stateService = (StateService) appContext.getBean("blStateService");
if (this.abbreviation != null) {
State state = stateService.findStateByAbbreviation(this.abbreviation);
return state;
}
return null;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_StateWrapper.java |
1,065 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class MapConfigTest {
/**
* Test method for {@link com.hazelcast.config.MapConfig#getName()}.
*/
@Test
public void testGetName() {
assertNull(new MapConfig().getName());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setName(java.lang.String)}.
*/
@Test
public void testSetName() {
assertEquals("map-test-name", new MapConfig().setName("map-test-name").getName());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getBackupCount()}.
*/
@Test
public void testGetBackupCount() {
assertEquals(MapConfig.DEFAULT_BACKUP_COUNT, new MapConfig().getBackupCount());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setBackupCount(int)}.
*/
@Test
public void testSetBackupCount() {
assertEquals(0, new MapConfig().setBackupCount(0).getBackupCount());
assertEquals(1, new MapConfig().setBackupCount(1).getBackupCount());
assertEquals(2, new MapConfig().setBackupCount(2).getBackupCount());
assertEquals(3, new MapConfig().setBackupCount(3).getBackupCount());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setBackupCount(int)}.
*/
@Test(expected = IllegalArgumentException.class)
public void testSetBackupCountLowerLimit() {
new MapConfig().setBackupCount(MapConfig.MIN_BACKUP_COUNT - 1);
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getEvictionPercentage()}.
*/
@Test
public void testGetEvictionPercentage() {
assertEquals(MapConfig.DEFAULT_EVICTION_PERCENTAGE, new MapConfig().getEvictionPercentage());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setEvictionPercentage(int)}.
*/
@Test
public void testSetEvictionPercentage() {
assertEquals(50, new MapConfig().setEvictionPercentage(50).getEvictionPercentage());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setEvictionPercentage(int)}.
*/
@Test(expected = IllegalArgumentException.class)
public void testSetEvictionPercentageLowerLimit() {
new MapConfig().setEvictionPercentage(MapConfig.MIN_EVICTION_PERCENTAGE - 1);
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setEvictionPercentage(int)}.
*/
@Test(expected = IllegalArgumentException.class)
public void testSetEvictionPercentageUpperLimit() {
new MapConfig().setEvictionPercentage(MapConfig.MAX_EVICTION_PERCENTAGE + 1);
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getTimeToLiveSeconds()}.
*/
@Test
public void testGetTimeToLiveSeconds() {
assertEquals(MapConfig.DEFAULT_TTL_SECONDS, new MapConfig().getTimeToLiveSeconds());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setTimeToLiveSeconds(int)}.
*/
@Test
public void testSetTimeToLiveSeconds() {
assertEquals(1234, new MapConfig().setTimeToLiveSeconds(1234).getTimeToLiveSeconds());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getMaxIdleSeconds()}.
*/
@Test
public void testGetMaxIdleSeconds() {
assertEquals(MapConfig.DEFAULT_MAX_IDLE_SECONDS, new MapConfig().getMaxIdleSeconds());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setMaxIdleSeconds(int)}.
*/
@Test
public void testSetMaxIdleSeconds() {
assertEquals(1234, new MapConfig().setMaxIdleSeconds(1234).getMaxIdleSeconds());
}
@Test
public void testGetMaxSize() {
assertEquals(MapConfig.DEFAULT_MAX_SIZE, new MapConfig().getMaxSizeConfig().getSize());
}
@Test
public void testSetMaxSize() {
assertEquals(1234, new MapConfig().getMaxSizeConfig().setSize(1234).getSize());
}
@Test
public void testSetMaxSizeMustBePositive() {
assertTrue(new MapConfig().getMaxSizeConfig().setSize(-1).getSize() > 0);
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getEvictionPolicy()}.
*/
@Test
public void testGetEvictionPolicy() {
assertEquals(MapConfig.DEFAULT_EVICTION_POLICY, new MapConfig().getEvictionPolicy());
}
@Test
public void testSetEvictionPolicy() {
assertEquals(MapConfig.EvictionPolicy.LRU, new MapConfig().setEvictionPolicy(MapConfig.EvictionPolicy.LRU).getEvictionPolicy());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getMapStoreConfig()}.
*/
@Test
public void testGetMapStoreConfig() {
assertNull(new MapConfig().getMapStoreConfig());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setMapStoreConfig(com.hazelcast.config.MapStoreConfig)}.
*/
@Test
public void testSetMapStoreConfig() {
MapStoreConfig mapStoreConfig = new MapStoreConfig();
assertEquals(mapStoreConfig, new MapConfig().setMapStoreConfig(mapStoreConfig).getMapStoreConfig());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#getNearCacheConfig()}.
*/
@Test
public void testGetNearCacheConfig() {
assertNull(new MapConfig().getNearCacheConfig());
}
/**
* Test method for {@link com.hazelcast.config.MapConfig#setNearCacheConfig(com.hazelcast.config.NearCacheConfig)}.
*/
@Test
public void testSetNearCacheConfig() {
NearCacheConfig nearCacheConfig = new NearCacheConfig();
assertEquals(nearCacheConfig, new MapConfig().setNearCacheConfig(nearCacheConfig).getNearCacheConfig());
}
@Test
public void configSetsForDefaultAllwaysIssue466() {
Config config = new XmlConfigBuilder().build();
MapStoreConfig mapStoreConfig = new MapStoreConfig();
mapStoreConfig.setEnabled(true);
mapStoreConfig.setWriteDelaySeconds(0);
mapStoreConfig.setClassName("com.hazelcast.examples.DummyStore");
config.getMapConfig("test").setMapStoreConfig(mapStoreConfig);
config.getMapConfig("default").setMapStoreConfig(null);
assertNotNull(config.getMapConfig("test").getMapStoreConfig());
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_config_MapConfigTest.java |
420 | public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
/**
* Constructs new restore snapshot request builder
*
* @param clusterAdminClient cluster admin client
*/
public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest());
}
/**
* Constructs new restore snapshot request builder with specified repository and snapshot names
*
* @param clusterAdminClient cluster admin client
* @param repository reposiory name
* @param name snapshot name
*/
public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new RestoreSnapshotRequest(repository, name));
}
/**
* Sets snapshot name
*
* @param snapshot snapshot name
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSnapshot(String snapshot) {
request.snapshot(snapshot);
return this;
}
/**
* Sets repository name
*
* @param repository repository name
* @return this builder
*/
public RestoreSnapshotRequestBuilder setRepository(String repository) {
request.repository(repository);
return this;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this builder
*/
public RestoreSnapshotRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
* @return this request
*/
public RestoreSnapshotRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
/**
* Sets rename pattern that should be applied to restored indices.
* <p/>
* Indices that match the rename pattern will be renamed according to {@link #setRenameReplacement(String)}. The
* rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
* The request will fail if two or more indices will be renamed into the same name.
*
* @param renamePattern rename pattern
* @return this builder
*/
public RestoreSnapshotRequestBuilder setRenamePattern(String renamePattern) {
request.renamePattern(renamePattern);
return this;
}
/**
* Sets rename replacement
* <p/>
* See {@link #setRenamePattern(String)} for more information.
*
* @param renameReplacement rename replacement
* @return
*/
public RestoreSnapshotRequestBuilder setRenameReplacement(String renameReplacement) {
request.renameReplacement(renameReplacement);
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Settings settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Settings.Builder settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific restore settings in JSON, YAML or properties format
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets repository-specific restore settings
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public RestoreSnapshotRequestBuilder setSettings(Map<String, Object> source) {
request.settings(source);
return this;
}
/**
* If this parameter is set to true the operation will wait for completion of restore process before returning.
*
* @param waitForCompletion if true the operation will wait for completion
* @return this builder
*/
public RestoreSnapshotRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
request.waitForCompletion(waitForCompletion);
return this;
}
/**
* If set to true the restore procedure will restore global cluster state.
* <p/>
* The global cluster state includes persistent settings and index template definitions.
*
* @param restoreGlobalState true if global state should be restored from the snapshot
* @return this request
*/
public RestoreSnapshotRequestBuilder setRestoreGlobalState(boolean restoreGlobalState) {
request.includeGlobalState(restoreGlobalState);
return this;
}
@Override
protected void doExecute(ActionListener<RestoreSnapshotResponse> listener) {
((ClusterAdminClient) client).restoreSnapshot(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotRequestBuilder.java |
393 | public interface IndexProvider extends IndexInformation {
/**
* This method registers a new key for the specified index store with the given data type. This allows the IndexProvider
* to prepare the index if necessary.
*
* It is expected that this method is first called with each new key to inform the index of the expected type before the
* key is used in any documents.
*
* @param store Index store
* @param key New key to register
* @param information Information on the key to register
* @param tx enclosing transaction
* @throws com.thinkaurelius.titan.diskstorage.BackendException
*/
public void register(String store, String key, KeyInformation information, BaseTransaction tx) throws BackendException;
/**
* Mutates the index (adds and removes fields or entire documents)
*
* @param mutations Updates to the index. First map contains all the mutations for each store. The inner map contains
* all changes for each document in an {@link IndexMutation}.
* @param informations Information on the keys used in the mutation accessible through {@link KeyInformation.IndexRetriever}.
* @param tx Enclosing transaction
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* @see IndexMutation
*/
public void mutate(Map<String,Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException;
/**
* Restores the index to the state of the primary data store as given in the {@code documents} variable. When this method returns, the index records
* for the given documents exactly matches the provided data. Unlike {@link #mutate(java.util.Map, KeyInformation.IndexRetriever, BaseTransaction)}
* this method does not do a delta-update, but entirely replaces the documents with the provided data or deletes them if the document content is empty.
*
* @param documents The outer map maps stores to documents, the inner contains the documents mapping document ids to the document content which is a
* list of {@link IndexEntry}. If that list is empty, that means this document should not exist and ought to be deleted.
* @param informations Information on the keys used in the mutation accessible through {@link KeyInformation.IndexRetriever}.
* @param tx Enclosing transaction
* @throws BackendException
*/
public void restore(Map<String,Map<String, List<IndexEntry>>> documents, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException;
/**
* Executes the given query against the index.
*
* @param query Query to execute
* @param informations Information on the keys used in the query accessible through {@link KeyInformation.IndexRetriever}.
* @param tx Enclosing transaction
* @return The ids of all matching documents
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* @see IndexQuery
*/
public List<String> query(IndexQuery query, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException;
/**
* Executes the given raw query against the index
*
* @param query Query to execute
* @param informations Information on the keys used in the query accessible through {@link KeyInformation.IndexRetriever}.
* @param tx Enclosing transaction
* @return Results objects for all matching documents (i.e. document id and score)
* @throws com.thinkaurelius.titan.diskstorage.BackendException
* @see RawQuery
*/
public Iterable<RawQuery.Result<String>> query(RawQuery query, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException;
/**
* Returns a transaction handle for a new index transaction.
*
* @return New Transaction Handle
*/
public BaseTransactionConfigurable beginTransaction(BaseTransactionConfig config) throws BackendException;
/**
* Closes the index
* @throws com.thinkaurelius.titan.diskstorage.BackendException
*/
public void close() throws BackendException;
/**
* Clears the index and removes all entries in all stores.
* @throws com.thinkaurelius.titan.diskstorage.BackendException
*/
public void clearStorage() throws BackendException;
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_indexing_IndexProvider.java |
380 | public class AnnotationIterator implements Iterator<Annotation> {
private Iterator<Annotation> iterator;
private Annotation nextAnnotation;
private boolean includeRefinementAnnotations;
/**
* Returns a new JavaAnnotationIterator.
* @param parent the parent iterator to iterate over annotations
* @param returnAllAnnotations whether to return all annotations or just problem annotations
*/
public AnnotationIterator(Iterator<Annotation> parent,
boolean includeRefinementAnnotations) {
this.iterator = parent;
this.includeRefinementAnnotations = includeRefinementAnnotations;
skip();
}
private void skip() {
while (iterator.hasNext()) {
Annotation next = (Annotation) iterator.next();
if (!next.isMarkedDeleted()) {
//TODO: rethink this condition!
if (next instanceof CeylonAnnotation ||
includeRefinementAnnotations &&
next instanceof RefinementAnnotation ||
isProblemMarkerAnnotation(next)) {
nextAnnotation = next;
return;
}
}
}
nextAnnotation = null;
}
private static boolean isProblemMarkerAnnotation(Annotation annotation) {
if (!(annotation instanceof MarkerAnnotation))
return false;
try {
MarkerAnnotation ma = (MarkerAnnotation) annotation;
return ma.getMarker().isSubtypeOf(IMarker.PROBLEM) &&
!ma.getMarker().getType().equals(CeylonBuilder.PROBLEM_MARKER_ID);
}
catch (CoreException e) {
return false;
}
}
public boolean hasNext() {
return nextAnnotation != null;
}
public Annotation next() {
try {
return nextAnnotation;
}
finally {
skip();
}
}
public void remove() {
throw new UnsupportedOperationException();
}
} | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_hover_AnnotationIterator.java |
537 | @Deprecated
public class TransportGatewaySnapshotAction extends TransportBroadcastOperationAction<GatewaySnapshotRequest, GatewaySnapshotResponse, ShardGatewaySnapshotRequest, ShardGatewaySnapshotResponse> {
private final IndicesService indicesService;
@Inject
public TransportGatewaySnapshotAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService) {
super(settings, threadPool, clusterService, transportService);
this.indicesService = indicesService;
}
@Override
protected String executor() {
return ThreadPool.Names.SNAPSHOT;
}
@Override
protected String transportAction() {
return GatewaySnapshotAction.NAME;
}
@Override
protected GatewaySnapshotRequest newRequest() {
return new GatewaySnapshotRequest();
}
@Override
protected GatewaySnapshotResponse newResponse(GatewaySnapshotRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// non active shard, ignore
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = Lists.newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
successfulShards++;
}
}
return new GatewaySnapshotResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
}
@Override
protected ShardGatewaySnapshotRequest newShardRequest() {
return new ShardGatewaySnapshotRequest();
}
@Override
protected ShardGatewaySnapshotRequest newShardRequest(ShardRouting shard, GatewaySnapshotRequest request) {
return new ShardGatewaySnapshotRequest(shard.index(), shard.id(), request);
}
@Override
protected ShardGatewaySnapshotResponse newShardResponse() {
return new ShardGatewaySnapshotResponse();
}
@Override
protected ShardGatewaySnapshotResponse shardOperation(ShardGatewaySnapshotRequest request) throws ElasticsearchException {
IndexShardGatewayService shardGatewayService = indicesService.indexServiceSafe(request.index())
.shardInjectorSafe(request.shardId()).getInstance(IndexShardGatewayService.class);
shardGatewayService.snapshot("api");
return new ShardGatewaySnapshotResponse(request.index(), request.shardId());
}
/**
* The snapshot request works against all primary shards.
*/
@Override
protected GroupShardsIterator shards(ClusterState clusterState, GatewaySnapshotRequest request, String[] concreteIndices) {
return clusterState.routingTable().activePrimaryShardsGrouped(concreteIndices, true);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, GatewaySnapshotRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, GatewaySnapshotRequest request, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_gateway_snapshot_TransportGatewaySnapshotAction.java |
1,235 | public class OMMapBufferEntry implements Comparable<OMMapBufferEntry> {
private static final OProfilerMBean PROFILER = Orient.instance().getProfiler();
private static final int FORCE_DELAY;
private static final int FORCE_RETRY;
static Method cleanerMethod;
Lock lock = new ReentrantLock();
volatile OFileMMap file;
volatile MappedByteBuffer buffer;
final long beginOffset;
final int size;
volatile boolean dirty;
private volatile long lastUsed;
static {
FORCE_DELAY = OGlobalConfiguration.FILE_MMAP_FORCE_DELAY.getValueAsInteger();
FORCE_RETRY = OGlobalConfiguration.FILE_MMAP_FORCE_RETRY.getValueAsInteger();
// GET SUN JDK METHOD TO CLEAN MMAP BUFFERS
try {
final Class<?> sunClass = Class.forName("sun.nio.ch.DirectBuffer");
cleanerMethod = sunClass.getMethod("cleaner");
} catch (Exception e) {
// IGNORE IT AND USE GC TO FREE RESOURCES
}
}
public OMMapBufferEntry(final OFileMMap iFile, final MappedByteBuffer buffer, final long beginOffset, final int size) {
this.file = iFile;
this.buffer = buffer;
this.beginOffset = beginOffset;
this.size = size;
this.dirty = false;
updateLastUsedTime();
}
/**
* Flushes the memory mapped buffer to disk only if it's dirty.
*
* @return true if the buffer has been successfully flushed, otherwise false.
*/
boolean flush() {
lock.lock();
try {
if (!dirty)
return true;
final long timer = PROFILER.startChrono();
// FORCE THE WRITE OF THE BUFFER
for (int i = 0; i < FORCE_RETRY; ++i) {
try {
buffer.force();
dirty = false;
break;
} catch (Exception e) {
OLogManager.instance().debug(this,
"Cannot write memory buffer to disk. Retrying (" + (i + 1) + "/" + FORCE_RETRY + ")...");
OMemoryWatchDog.freeMemoryForResourceCleanup(FORCE_DELAY);
}
}
if (dirty)
OLogManager.instance().debug(this, "Cannot commit memory buffer to disk after %d retries", FORCE_RETRY);
else
PROFILER.updateCounter(PROFILER.getProcessMetric("file.mmap.pagesCommitted"), "Memory mapped pages committed to disk", +1);
PROFILER.stopChrono(PROFILER.getProcessMetric("file.mmap.commitPages"), "Commit memory mapped pages to disk", timer);
return !dirty;
} finally {
lock.unlock();
}
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
builder.append("OMMapBufferEntry [file=").append(file).append(", beginOffset=").append(beginOffset).append(", size=")
.append(size).append("]");
return builder.toString();
}
/**
* Force closing of file if it's opened yet.
*/
void close() {
lock.lock();
try {
if (buffer != null) {
if (dirty)
buffer.force();
if (cleanerMethod != null) {
// USE SUN JVM SPECIAL METHOD TO FREE RESOURCES
try {
final Object cleaner = cleanerMethod.invoke(buffer);
if (cleaner != null)
cleaner.getClass().getMethod("clean").invoke(cleaner);
} catch (Exception e) {
OLogManager.instance().error(this, "Error on calling Sun's MMap buffer clean", e);
}
}
buffer = null;
}
lastUsed = 0;
file = null;
} finally {
lock.unlock();
}
}
public int compareTo(final OMMapBufferEntry iOther) {
return (int) (beginOffset - iOther.beginOffset);
}
boolean isValid() {
return buffer != null;
}
boolean isDirty() {
return dirty;
}
void setDirty() {
this.dirty = true;
}
void acquireLock() {
lock.lock();
}
void releaseLock() {
lock.unlock();
}
public void updateLastUsedTime() {
lastUsed = System.currentTimeMillis();
}
public long getLastUsed() {
return lastUsed;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_fs_OMMapBufferEntry.java |
1,383 | public static class Builder {
private static final Set<String> VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings");
static {
VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet());
}
private String name;
private int order;
private String template;
private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
private final ImmutableOpenMap.Builder<String, CompressedString> mappings;
private final ImmutableOpenMap.Builder<String, IndexMetaData.Custom> customs;
public Builder(String name) {
this.name = name;
mappings = ImmutableOpenMap.builder();
customs = ImmutableOpenMap.builder();
}
public Builder(IndexTemplateMetaData indexTemplateMetaData) {
this.name = indexTemplateMetaData.name();
order(indexTemplateMetaData.order());
template(indexTemplateMetaData.template());
settings(indexTemplateMetaData.settings());
mappings = ImmutableOpenMap.builder(indexTemplateMetaData.mappings());
customs = ImmutableOpenMap.builder(indexTemplateMetaData.customs());
}
public Builder order(int order) {
this.order = order;
return this;
}
public Builder template(String template) {
this.template = template;
return this;
}
public String template() {
return template;
}
public Builder settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
public Builder settings(Settings settings) {
this.settings = settings;
return this;
}
public Builder removeMapping(String mappingType) {
mappings.remove(mappingType);
return this;
}
public Builder putMapping(String mappingType, CompressedString mappingSource) throws IOException {
mappings.put(mappingType, mappingSource);
return this;
}
public Builder putMapping(String mappingType, String mappingSource) throws IOException {
mappings.put(mappingType, new CompressedString(mappingSource));
return this;
}
public Builder putCustom(String type, IndexMetaData.Custom customIndexMetaData) {
this.customs.put(type, customIndexMetaData);
return this;
}
public Builder removeCustom(String type) {
this.customs.remove(type);
return this;
}
public IndexMetaData.Custom getCustom(String type) {
return this.customs.get(type);
}
public IndexTemplateMetaData build() {
return new IndexTemplateMetaData(name, order, template, settings, mappings.build(), customs.build());
}
public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(indexTemplateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("order", indexTemplateMetaData.order());
builder.field("template", indexTemplateMetaData.template());
builder.startObject("settings");
for (Map.Entry<String, String> entry : indexTemplateMetaData.settings().getAsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
builder.endObject();
if (params.paramAsBoolean("reduce_mappings", false)) {
builder.startObject("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
byte[] mappingSource = cursor.value.uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor.key);
}
builder.field(cursor.key);
builder.map(mapping);
}
builder.endObject();
} else {
builder.startArray("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
byte[] data = cursor.value.uncompressed();
XContentParser parser = XContentFactory.xContent(data).createParser(data);
Map<String, Object> mapping = parser.mapOrderedAndClose();
builder.map(mapping);
}
builder.endArray();
}
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.endObject();
}
public static IndexTemplateMetaData fromXContentStandalone(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
if (token == null) {
throw new IOException("no data");
}
if (token != XContentParser.Token.START_OBJECT) {
throw new IOException("should start object");
}
token = parser.nextToken();
if (token != XContentParser.Token.FIELD_NAME) {
throw new IOException("the first field should be the template name");
}
return fromXContent(parser);
}
public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {
Builder builder = new Builder(parser.currentName());
String currentFieldName = skipTemplateName(parser);
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
ImmutableSettings.Builder templateSettingsBuilder = ImmutableSettings.settingsBuilder();
for (Map.Entry<String, String> entry : SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()).entrySet()) {
if (!entry.getKey().startsWith("index.")) {
templateSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
} else {
templateSettingsBuilder.put(entry.getKey(), entry.getValue());
}
}
builder.settings(templateSettingsBuilder.build());
} else if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
builder.putMapping(mappingType, XContentFactory.jsonBuilder().map(mappingSource).string());
}
}
} else {
// check if its a custom index metadata
IndexMetaData.Custom.Factory<IndexMetaData.Custom> factory = IndexMetaData.lookupFactory(currentFieldName);
if (factory == null) {
//TODO warn
parser.skipChildren();
} else {
builder.putCustom(factory.type(), factory.fromXContent(parser));
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Map<String, Object> mapping = parser.mapOrdered();
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
String mappingSource = XContentFactory.jsonBuilder().map(mapping).string();
if (mappingSource == null) {
// crap, no mapping source, warn?
} else {
builder.putMapping(mappingType, mappingSource);
}
}
}
}
} else if (token.isValue()) {
if ("template".equals(currentFieldName)) {
builder.template(parser.text());
} else if ("order".equals(currentFieldName)) {
builder.order(parser.intValue());
}
}
}
return builder.build();
}
private static String skipTemplateName(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
if (token != null && token == XContentParser.Token.START_OBJECT) {
token = parser.nextToken();
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
if (VALID_FIELDS.contains(currentFieldName)) {
return currentFieldName;
} else {
// we just hit the template name, which should be ignored and we move on
parser.nextToken();
}
}
}
return null;
}
public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
Builder builder = new Builder(in.readString());
builder.order(in.readInt());
builder.template(in.readString());
builder.settings(ImmutableSettings.readSettingsFromStream(in));
int mappingsSize = in.readVInt();
for (int i = 0; i < mappingsSize; i++) {
builder.putMapping(in.readString(), CompressedString.readCompressedString(in));
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
public static void writeTo(IndexTemplateMetaData indexTemplateMetaData, StreamOutput out) throws IOException {
out.writeString(indexTemplateMetaData.name());
out.writeInt(indexTemplateMetaData.order());
out.writeString(indexTemplateMetaData.template());
ImmutableSettings.writeSettingsToStream(indexTemplateMetaData.settings(), out);
out.writeVInt(indexTemplateMetaData.mappings().size());
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
out.writeString(cursor.key);
cursor.value.writeTo(out);
}
out.writeVInt(indexTemplateMetaData.customs().size());
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
out.writeString(cursor.key);
IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
} | 1no label
| src_main_java_org_elasticsearch_cluster_metadata_IndexTemplateMetaData.java |
367 | this.rows = Iterators.filter(rows.iterator(), new Predicate<Result>() {
@Override
public boolean apply(@Nullable Result result) {
if (result == null)
return false;
try {
StaticBuffer id = StaticArrayBuffer.of(result.getRow());
id.getLong(0);
} catch (NumberFormatException e) {
return false;
}
return true;
}
}); | 0true
| titan-hbase-parent_titan-hbase-core_src_main_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseKeyColumnValueStore.java |
1,589 | int index = Arrays.binarySearch(properties, searchProperty, new Comparator<Property>() {
@Override
public int compare(Property o1, Property o2) {
if (o1 == null && o2 == null) {
return 0;
} else if (o1 == null) {
return 1;
} else if (o2 == null) {
return -1;
}
return o1.getName().compareTo(o2.getName());
}
}); | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_Entity.java |
3,150 | public abstract class AbstractFieldDataImplTests extends AbstractFieldDataTests {
protected String one() {
return "1";
}
protected String two() {
return "2";
}
protected String three() {
return "3";
}
protected String four() {
return "4";
}
protected String toString(Object value) {
if (value instanceof BytesRef) {
return ((BytesRef) value).utf8ToString();
}
return value.toString();
}
protected abstract void fillSingleValueAllSet() throws Exception;
protected abstract void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception;
@Test
public void testDeletedDocs() throws Exception {
add2SingleValuedDocumentsAndDeleteOneOfThem();
IndexFieldData indexFieldData = getForField("value");
AtomicReaderContext readerContext = refreshReader();
AtomicFieldData fieldData = indexFieldData.load(readerContext);
BytesValues values = fieldData.getBytesValues(randomBoolean());
for (int i = 0; i < fieldData.getNumDocs(); ++i) {
assertThat(values.setDocument(i), greaterThanOrEqualTo(1));
}
}
@Test
public void testSingleValueAllSet() throws Exception {
fillSingleValueAllSet();
IndexFieldData indexFieldData = getForField("value");
AtomicReaderContext readerContext = refreshReader();
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
assertThat(fieldData.getNumDocs(), equalTo(3));
BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(bytesValues.isMultiValued(), equalTo(false));
assertThat(bytesValues.setDocument(0), equalTo(1));
assertThat(bytesValues.nextValue(), equalTo(new BytesRef(two())));
assertThat(bytesValues.setDocument(1), equalTo(1));
assertThat(bytesValues.nextValue(), equalTo(new BytesRef(one())));
assertThat(bytesValues.setDocument(2), equalTo(1));
assertThat(bytesValues.nextValue(), equalTo(new BytesRef(three())));
assertValues(bytesValues, 0, two());
assertValues(bytesValues, 1, one());
assertValues(bytesValues, 2, three());
BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(one())));
assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
assertHashedValues(hashedBytesValues, 0, two());
assertHashedValues(hashedBytesValues, 1, one());
assertHashedValues(hashedBytesValues, 2, three());
IndexSearcher searcher = new IndexSearcher(readerContext.reader());
TopFieldDocs topDocs;
topDocs = searcher.search(new MatchAllDocsQuery(), 10,
new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
assertThat(topDocs.totalHits, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one()));
assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
assertThat(toString(((FieldDoc) topDocs.scoreDocs[1]).fields[0]), equalTo(two()));
assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three()));
topDocs = searcher.search(new MatchAllDocsQuery(), 10,
new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
assertThat(topDocs.totalHits, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
}
private HashedBytesRef convert(BytesValues values, int doc) {
if (values.setDocument(doc) > 0) {
return new HashedBytesRef(BytesRef.deepCopyOf(values.nextValue()), values.currentValueHash());
} else {
return new HashedBytesRef(new BytesRef());
}
}
protected abstract void fillSingleValueWithMissing() throws Exception;
public void assertValues(BytesValues values, int docId, BytesRef... actualValues) {
assertThat(values.setDocument(docId), equalTo(actualValues.length));
for (int i = 0; i < actualValues.length; i++) {
assertThat(values.nextValue(), equalTo(actualValues[i]));
}
}
public void assertValues(BytesValues values, int docId, String... actualValues) {
assertThat(values.setDocument(docId), equalTo(actualValues.length));
for (int i = 0; i < actualValues.length; i++) {
assertThat(values.nextValue(), equalTo(new BytesRef(actualValues[i])));
}
}
public void assertHashedValues(BytesValues values, int docId, BytesRef... actualValues) {
assertThat(values.setDocument(docId), equalTo(actualValues.length));
BytesRef r = new BytesRef();
for (int i = 0; i < actualValues.length; i++) {
assertThat(values.nextValue(), equalTo(new HashedBytesRef(actualValues[i]).bytes));
assertThat(values.currentValueHash(), equalTo(new HashedBytesRef(actualValues[i]).hash));
}
}
public void assertHashedValues(BytesValues values, int docId, String... actualValues) {
assertThat(values.setDocument(docId), equalTo(actualValues.length));
for (int i = 0; i < actualValues.length; i++) {
assertThat(values.nextValue(), equalTo(new HashedBytesRef(actualValues[i]).bytes));
assertThat(values.currentValueHash(), equalTo(new HashedBytesRef(actualValues[i]).hash));
}
}
@Test
public void testSingleValueWithMissing() throws Exception {
fillSingleValueWithMissing();
IndexFieldData indexFieldData = getForField("value");
AtomicFieldData fieldData = indexFieldData.load(refreshReader());
assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
assertThat(fieldData.getNumDocs(), equalTo(3));
BytesValues bytesValues = fieldData
.getBytesValues(randomBoolean());
assertThat(bytesValues.isMultiValued(), equalTo(false));
assertValues(bytesValues, 0, two());
assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
assertValues(bytesValues, 2, three());
BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(new BytesRef())));
assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
assertHashedValues(hashedBytesValues, 0, two());
assertHashedValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
assertHashedValues(hashedBytesValues, 2, three());
}
protected abstract void fillMultiValueAllSet() throws Exception;
@Test
public void testMultiValueAllSet() throws Exception {
fillMultiValueAllSet();
IndexFieldData indexFieldData = getForField("value");
AtomicFieldData fieldData = indexFieldData.load(refreshReader());
assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
assertThat(fieldData.getNumDocs(), equalTo(3));
BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(bytesValues.isMultiValued(), equalTo(true));
assertValues(bytesValues, 0, two(), four());
assertValues(bytesValues, 1, one());
assertValues(bytesValues, 2, three());
BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(one())));
assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
assertHashedValues(hashedBytesValues, 0, two(), four());
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
assertThat(topDocs.totalHits, equalTo(3));
assertThat(topDocs.scoreDocs.length, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
assertThat(topDocs.totalHits, equalTo(3));
assertThat(topDocs.scoreDocs.length, equalTo(3));
assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
}
protected abstract void fillMultiValueWithMissing() throws Exception;
@Test
public void testMultiValueWithMissing() throws Exception {
fillMultiValueWithMissing();
IndexFieldData indexFieldData = getForField("value");
AtomicFieldData fieldData = indexFieldData.load(refreshReader());
assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
assertThat(fieldData.getNumDocs(), equalTo(3));
BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(bytesValues.isMultiValued(), equalTo(true));
assertValues(bytesValues, 0, two(), four());
assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(convert(hashedBytesValues, 0), equalTo(new HashedBytesRef(two())));
assertThat(convert(hashedBytesValues, 1), equalTo(new HashedBytesRef(new BytesRef())));
assertThat(convert(hashedBytesValues, 2), equalTo(new HashedBytesRef(three())));
assertHashedValues(bytesValues, 0, two(), four());
assertHashedValues(bytesValues, 1, Strings.EMPTY_ARRAY);
assertHashedValues(bytesValues, 2, three());
assertHashedValues(hashedBytesValues, 0, two(), four());
assertHashedValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
assertHashedValues(hashedBytesValues, 2, three());
}
public void testMissingValueForAll() throws Exception {
fillAllMissing();
IndexFieldData indexFieldData = getForField("value");
AtomicFieldData fieldData = indexFieldData.load(refreshReader());
// Some impls (FST) return size 0 and some (PagedBytes) do take size in the case no actual data is loaded
assertThat(fieldData.getMemorySizeInBytes(), greaterThanOrEqualTo(0l));
assertThat(fieldData.getNumDocs(), equalTo(3));
BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
assertThat(bytesValues.isMultiValued(), equalTo(false));
assertValues(bytesValues, 0, Strings.EMPTY_ARRAY);
assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
assertValues(bytesValues, 2, Strings.EMPTY_ARRAY);
BytesValues hashedBytesValues = fieldData.getBytesValues(randomBoolean());
assertValues(hashedBytesValues, 0, Strings.EMPTY_ARRAY);
assertValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
assertValues(hashedBytesValues, 2, Strings.EMPTY_ARRAY);
}
protected abstract void fillAllMissing() throws Exception;
@Test
public void testSortMultiValuesFields() throws Exception {
fillExtendedMvSet();
IndexFieldData indexFieldData = getForField("value");
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MIN))));
assertThat(topDocs.totalHits, equalTo(8));
assertThat(topDocs.scoreDocs.length, equalTo(8));
assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08"));
assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("02"));
assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("03"));
assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("06"));
assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("08"));
assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(BytesRefFieldComparatorSource.MAX_TERM));
assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(BytesRefFieldComparatorSource.MAX_TERM));
topDocs = searcher.search(new MatchAllDocsQuery(), 10,
new Sort(new SortField("value", indexFieldData.comparatorSource(null, SortMode.MAX), true)));
assertThat(topDocs.totalHits, equalTo(8));
assertThat(topDocs.scoreDocs.length, equalTo(8));
assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10"));
assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("08"));
assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("06"));
assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("03"));
assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("!10"));
assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
}
protected abstract void fillExtendedMvSet() throws Exception;
} | 0true
| src_test_java_org_elasticsearch_index_fielddata_AbstractFieldDataImplTests.java |
1,187 | public interface MapStoreFactory<K, V> {
/**
* Produces a MapLoader or a MapStore for the given map name and properties.
*
* @param mapName name of the distributed map that the produced MapLoader or MapStore will serve
* @param properties the properties of the MapStoreConfig for the produced MapLoader or MapStore
*/
MapLoader<K, V> newMapStore(String mapName, Properties properties);
} | 0true
| hazelcast_src_main_java_com_hazelcast_core_MapStoreFactory.java |
206 | public class CacheFactoryException extends Exception {
private static final long serialVersionUID = 1L;
public CacheFactoryException() {
super();
}
public CacheFactoryException(String message, Throwable cause) {
super(message, cause);
}
public CacheFactoryException(String message) {
super(message);
}
public CacheFactoryException(Throwable cause) {
super(cause);
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_cache_engine_CacheFactoryException.java |
1,045 | public enum InMemoryFormat {
BINARY, OBJECT, OFFHEAP
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_InMemoryFormat.java |
857 | @SuppressWarnings("unchecked")
public class ORole extends ODocumentWrapper {
private static final long serialVersionUID = 1L;
public static final String ADMIN = "admin";
public static final String CLASS_NAME = "ORole";
public enum ALLOW_MODES {
DENY_ALL_BUT, ALLOW_ALL_BUT
}
// CRUD OPERATIONS
private static Map<Integer, String> PERMISSION_BIT_NAMES;
public final static int PERMISSION_NONE = 0;
public final static int PERMISSION_CREATE = registerPermissionBit(0, "Create");
public final static int PERMISSION_READ = registerPermissionBit(1, "Read");
public final static int PERMISSION_UPDATE = registerPermissionBit(2, "Update");
public final static int PERMISSION_DELETE = registerPermissionBit(3, "Delete");
public final static int PERMISSION_ALL = PERMISSION_CREATE + PERMISSION_READ + PERMISSION_UPDATE
+ PERMISSION_DELETE;
protected final static byte STREAM_DENY = 0;
protected final static byte STREAM_ALLOW = 1;
protected ALLOW_MODES mode = ALLOW_MODES.DENY_ALL_BUT;
protected ORole parentRole;
protected Map<String, Byte> rules = new LinkedHashMap<String, Byte>();
/**
* Constructor used in unmarshalling.
*/
public ORole() {
}
public ORole(final String iName, final ORole iParent, final ALLOW_MODES iAllowMode) {
super(CLASS_NAME);
document.field("name", iName);
parentRole = iParent;
document.field("inheritedRole", iParent != null ? iParent.getDocument() : null);
setMode(iAllowMode);
document.field("rules", new HashMap<String, Number>());
}
/**
* Create the role by reading the source document.
*/
public ORole(final ODocument iSource) {
fromStream(iSource);
}
@Override
@OBeforeDeserialization
public void fromStream(final ODocument iSource) {
if (document != null)
return;
document = iSource;
try {
mode = ((Number) document.field("mode")).byteValue() == STREAM_ALLOW ? ALLOW_MODES.ALLOW_ALL_BUT : ALLOW_MODES.DENY_ALL_BUT;
} catch (Exception ex) {
OLogManager.instance().error(this, "illegal mode " + ex.getMessage());
mode = ALLOW_MODES.DENY_ALL_BUT;
}
final OIdentifiable role = document.field("inheritedRole");
parentRole = role != null ? document.getDatabase().getMetadata().getSecurity().getRole(role) : null;
final Map<String, Number> storedRules = document.field("rules");
if (storedRules != null)
for (Entry<String, Number> a : storedRules.entrySet()) {
rules.put(a.getKey().toLowerCase(), a.getValue().byteValue());
}
if (getName().equals("admin") && !hasRule(ODatabaseSecurityResources.BYPASS_RESTRICTED))
// FIX 1.5.1 TO ASSIGN database.bypassRestricted rule to the role
addRule(ODatabaseSecurityResources.BYPASS_RESTRICTED, ORole.PERMISSION_ALL).save();
}
public boolean allow(final String iResource, final int iCRUDOperation) {
// CHECK FOR SECURITY AS DIRECT RESOURCE
final Byte access = rules.get(iResource);
if (access != null) {
final byte mask = (byte) iCRUDOperation;
return (access.byteValue() & mask) == mask;
} else if (parentRole != null)
// DELEGATE TO THE PARENT ROLE IF ANY
return parentRole.allow(iResource, iCRUDOperation);
return mode == ALLOW_MODES.ALLOW_ALL_BUT;
}
public boolean hasRule(final String iResource) {
return rules.containsKey(iResource.toLowerCase());
}
public ORole addRule(final String iResource, final int iOperation) {
rules.put(iResource.toLowerCase(), (byte) iOperation);
document.field("rules", rules);
return this;
}
/**
* Grant a permission to the resource.
*
* @param iResource
* Requested resource
* @param iOperation
* Permission to grant/add
* @return
*/
public ORole grant(final String iResource, final int iOperation) {
final Byte current = rules.get(iResource);
byte currentValue = current == null ? PERMISSION_NONE : current.byteValue();
currentValue |= (byte) iOperation;
rules.put(iResource.toLowerCase(), currentValue);
document.field("rules", rules);
return this;
}
/**
* Revoke a permission to the resource.
*
* @param iResource
* Requested resource
* @param iOperation
* Permission to grant/remove
*/
public ORole revoke(final String iResource, final int iOperation) {
if (iOperation == PERMISSION_NONE)
return this;
final Byte current = rules.get(iResource);
byte currentValue;
if (current == null)
currentValue = PERMISSION_NONE;
else {
currentValue = current.byteValue();
currentValue &= ~(byte) iOperation;
}
rules.put(iResource.toLowerCase(), currentValue);
document.field("rules", rules);
return this;
}
public String getName() {
return document.field("name");
}
public ALLOW_MODES getMode() {
return mode;
}
public ORole setMode(final ALLOW_MODES iMode) {
this.mode = iMode;
document.field("mode", mode == ALLOW_MODES.ALLOW_ALL_BUT ? STREAM_ALLOW : STREAM_DENY);
return this;
}
public ORole getParentRole() {
return parentRole;
}
public ORole setParentRole(final ORole iParent) {
this.parentRole = iParent;
document.field("inheritedRole", parentRole != null ? parentRole.getDocument() : null);
return this;
}
@Override
public ORole save() {
document.save(ORole.class.getSimpleName());
return this;
}
public Map<String, Byte> getRules() {
return Collections.unmodifiableMap(rules);
}
@Override
public String toString() {
return getName();
}
/**
* Convert the permission code to a readable string.
*
* @param iPermission
* Permission to convert
* @return String representation of the permission
*/
public static String permissionToString(final int iPermission) {
int permission = iPermission;
final StringBuilder returnValue = new StringBuilder();
for (Entry<Integer, String> p : PERMISSION_BIT_NAMES.entrySet()) {
if ((permission & p.getKey()) == p.getKey()) {
if (returnValue.length() > 0)
returnValue.append(", ");
returnValue.append(p.getValue());
permission &= ~p.getKey();
}
}
if (permission != 0) {
if (returnValue.length() > 0)
returnValue.append(", ");
returnValue.append("Unknown 0x");
returnValue.append(Integer.toHexString(permission));
}
return returnValue.toString();
}
public static int registerPermissionBit(final int iBitNo, final String iName) {
if (iBitNo < 0 || iBitNo > 31)
throw new IndexOutOfBoundsException("Permission bit number must be positive and less than 32");
final int value = 1 << iBitNo;
if (PERMISSION_BIT_NAMES == null)
PERMISSION_BIT_NAMES = new HashMap<Integer, String>();
if (PERMISSION_BIT_NAMES.containsKey(value))
throw new IndexOutOfBoundsException("Permission bit number " + String.valueOf(iBitNo) + " already in use");
PERMISSION_BIT_NAMES.put(value, iName);
return value;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_metadata_security_ORole.java |
2,005 | private static class Returns extends AbstractMatcher<Method> implements Serializable {
private final Matcher<? super Class<?>> returnType;
public Returns(Matcher<? super Class<?>> returnType) {
this.returnType = checkNotNull(returnType, "return type matcher");
}
public boolean matches(Method m) {
return returnType.matches(m.getReturnType());
}
@Override
public boolean equals(Object other) {
return other instanceof Returns
&& ((Returns) other).returnType.equals(returnType);
}
@Override
public int hashCode() {
return 37 * returnType.hashCode();
}
@Override
public String toString() {
return "returns(" + returnType + ")";
}
private static final long serialVersionUID = 0;
} | 0true
| src_main_java_org_elasticsearch_common_inject_matcher_Matchers.java |
23 | final class DescendingSubMapEntryIterator extends SubMapIterator<Map.Entry<K, V>> {
DescendingSubMapEntryIterator(final OMVRBTreeEntryPosition<K, V> last, final OMVRBTreeEntryPosition<K, V> fence) {
super(last, fence);
}
public Map.Entry<K, V> next() {
final Map.Entry<K, V> e = OMVRBTree.exportEntry(next);
prevEntry();
return e;
}
public void remove() {
removeDescending();
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java |
3,407 | public class CommitPoint {
public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, ImmutableList.<CommitPoint.FileInfo>of(), ImmutableList.<CommitPoint.FileInfo>of());
public static class FileInfo {
private final String name;
private final String physicalName;
private final long length;
private final String checksum;
public FileInfo(String name, String physicalName, long length, String checksum) {
this.name = name;
this.physicalName = physicalName;
this.length = length;
this.checksum = checksum;
}
public String name() {
return name;
}
public String physicalName() {
return this.physicalName;
}
public long length() {
return length;
}
@Nullable
public String checksum() {
return checksum;
}
public boolean isSame(StoreFileMetaData md) {
if (checksum == null || md.checksum() == null) {
return false;
}
return length == md.length() && checksum.equals(md.checksum());
}
}
public static enum Type {
GENERATED,
SAVED
}
private final long version;
private final String name;
private final Type type;
private final ImmutableList<FileInfo> indexFiles;
private final ImmutableList<FileInfo> translogFiles;
public CommitPoint(long version, String name, Type type, List<FileInfo> indexFiles, List<FileInfo> translogFiles) {
this.version = version;
this.name = name;
this.type = type;
this.indexFiles = ImmutableList.copyOf(indexFiles);
this.translogFiles = ImmutableList.copyOf(translogFiles);
}
public long version() {
return version;
}
public String name() {
return this.name;
}
public Type type() {
return this.type;
}
public ImmutableList<FileInfo> indexFiles() {
return this.indexFiles;
}
public ImmutableList<FileInfo> translogFiles() {
return this.translogFiles;
}
public boolean containPhysicalIndexFile(String physicalName) {
return findPhysicalIndexFile(physicalName) != null;
}
public CommitPoint.FileInfo findPhysicalIndexFile(String physicalName) {
for (FileInfo file : indexFiles) {
if (file.physicalName().equals(physicalName)) {
return file;
}
}
return null;
}
public CommitPoint.FileInfo findNameFile(String name) {
CommitPoint.FileInfo fileInfo = findNameIndexFile(name);
if (fileInfo != null) {
return fileInfo;
}
return findNameTranslogFile(name);
}
public CommitPoint.FileInfo findNameIndexFile(String name) {
for (FileInfo file : indexFiles) {
if (file.name().equals(name)) {
return file;
}
}
return null;
}
public CommitPoint.FileInfo findNameTranslogFile(String name) {
for (FileInfo file : translogFiles) {
if (file.name().equals(name)) {
return file;
}
}
return null;
}
} | 0true
| src_main_java_org_elasticsearch_index_gateway_CommitPoint.java |
883 | public interface PromotableCandidateItemOffer extends Serializable {
public HashMap<OfferItemCriteria, List<PromotableOrderItem>> getCandidateQualifiersMap();
public void setCandidateQualifiersMap(HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateItemsMap);
public HashMap<OfferItemCriteria, List<PromotableOrderItem>> getCandidateTargetsMap();
public void setCandidateTargetsMap(HashMap<OfferItemCriteria, List<PromotableOrderItem>> candidateItemsMap);
public Money getPotentialSavings();
public void setPotentialSavings(Money savings);
public boolean hasQualifyingItemCriteria();
/**
* Public only for unit testing - not intended to be called
*/
public Money calculateSavingsForOrderItem(PromotableOrderItem orderItem, int qtyToReceiveSavings);
public int calculateMaximumNumberOfUses();
/**
* Returns the number of item quantities that qualified as targets for
* this promotion.
*/
public int calculateTargetQuantityForTieredOffer();
/**
* Determines the max number of times this itemCriteria might apply. This calculation does
* not take into account other promotions. It is useful only to assist in prioritizing the order to process
* the promotions.
*
* @param itemCriteria
* @param promotion
* @return
*/
public int calculateMaxUsesForItemCriteria(OfferItemCriteria itemCriteria, Offer promotion);
public int getPriority();
public Offer getOffer();
public int getUses();
public void addUse();
/**
* Resets the uses for this candidate offer item. This is mainly used in the case where we want to calculate savings
* and then actually apply the promotion to an item. Both scenarios run through the same logic that add uses in order
* to determine if various quantities of items can be targeted for a particular promotion.
*
* @see {@link ItemOfferProcessor#applyAndCompareOrderAndItemOffers(PromotableOrder, List, List)}
*/
public void resetUses();
public boolean isLegacyOffer();
public List<PromotableOrderItem> getLegacyCandidateTargets();
public void setLegacyCandidateTargets(List<PromotableOrderItem> candidateTargets);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_discount_domain_PromotableCandidateItemOffer.java |
2,292 | public static interface V<T> extends Releasable {
/** Reference to the value. */
T v();
/** Whether this instance has been recycled (true) or newly allocated (false). */
boolean isRecycled();
} | 0true
| src_main_java_org_elasticsearch_common_recycler_Recycler.java |
296 | new Thread() {
public void run() {
try {
if (!l.tryLock(2, TimeUnit.SECONDS)) {
latch.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java |
1,962 | public class MapTryRemoveRequest extends KeyBasedClientRequest implements Portable, SecureRequest {
protected String name;
protected Data key;
protected long threadId;
protected long timeout;
public MapTryRemoveRequest() {
}
public MapTryRemoveRequest(String name, Data key, long threadId, long timeout) {
this.name = name;
this.key = key;
this.threadId = threadId;
this.timeout = timeout;
}
public int getFactoryId() {
return MapPortableHook.F_ID;
}
public int getClassId() {
return MapPortableHook.TRY_REMOVE;
}
@Override
protected Object getKey() {
return key;
}
@Override
protected Operation prepareOperation() {
TryRemoveOperation operation = new TryRemoveOperation(name, key, timeout);
operation.setThreadId(threadId);
return operation;
}
public String getServiceName() {
return MapService.SERVICE_NAME;
}
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeLong("t", threadId);
writer.writeLong("timeout", timeout);
final ObjectDataOutput out = writer.getRawDataOutput();
key.writeData(out);
}
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
threadId = reader.readLong("t");
timeout = reader.readLong("timeout");
final ObjectDataInput in = reader.getRawDataInput();
key = new Data();
key.readData(in);
}
public Permission getRequiredPermission() {
return new MapPermission(name, ActionConstants.ACTION_REMOVE);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_client_MapTryRemoveRequest.java |
542 | public class RecoverAllTransactionsRequest extends InvocationClientRequest {
public RecoverAllTransactionsRequest() {
}
@Override
public void invoke() {
ClientEngine clientEngine = getClientEngine();
ClusterService clusterService = clientEngine.getClusterService();
Collection<MemberImpl> memberList = clusterService.getMemberList();
TransactionManagerServiceImpl service = getService();
List<Future<SerializableCollection>> futures = recoverTransactions(memberList);
Set<Data> xids = new HashSet<Data>();
for (Future<SerializableCollection> future : futures) {
try {
SerializableCollection collectionWrapper = future.get(RECOVER_TIMEOUT, TimeUnit.MILLISECONDS);
for (Data data : collectionWrapper) {
RecoveredTransaction rt = (RecoveredTransaction) clientEngine.toObject(data);
service.addClientRecoveredTransaction(rt);
xids.add(clientEngine.toData(rt.getXid()));
}
} catch (MemberLeftException e) {
ILogger logger = clientEngine.getLogger(RecoverAllTransactionsRequest.class);
logger.warning("Member left while recovering: " + e);
} catch (Throwable e) {
handleException(clientEngine, e);
}
}
ClientEndpoint endpoint = getEndpoint();
endpoint.sendResponse(new SerializableCollection(xids), getCallId());
}
private List<Future<SerializableCollection>> recoverTransactions(Collection<MemberImpl> memberList) {
List<Future<SerializableCollection>> futures = new ArrayList<Future<SerializableCollection>>(memberList.size());
for (MemberImpl member : memberList) {
RecoverTxnOperation op = new RecoverTxnOperation();
Future<SerializableCollection> f = createInvocationBuilder(TransactionManagerServiceImpl.SERVICE_NAME,
op, member.getAddress()).invoke();
futures.add(f);
}
return futures;
}
private void handleException(ClientEngine clientEngine, Throwable e) {
Throwable cause = getCause(e);
if (cause instanceof TargetNotMemberException) {
ILogger logger = clientEngine.getLogger(RecoverAllTransactionsRequest.class);
logger.warning("Member left while recovering: " + cause);
} else {
throw ExceptionUtil.rethrow(e);
}
}
private Throwable getCause(Throwable e) {
if (e instanceof ExecutionException) {
if (e.getCause() != null) {
return e.getCause();
}
}
return e;
}
@Deprecated
public String getServiceName() {
return TransactionManagerServiceImpl.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return ClientTxnPortableHook.F_ID;
}
@Override
public int getClassId() {
return ClientTxnPortableHook.RECOVER_ALL;
}
@Override
public Permission getRequiredPermission() {
return new TransactionPermission();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_txn_RecoverAllTransactionsRequest.java |
2,354 | public class NetworkExceptionHelper {
public static boolean isConnectException(Throwable e) {
if (e instanceof ConnectException) {
return true;
}
return false;
}
public static boolean isCloseConnectionException(Throwable e) {
if (e instanceof ClosedChannelException) {
return true;
}
if (e.getMessage() != null) {
// UGLY!, this exception messages seems to represent closed connection
if (e.getMessage().contains("Connection reset by peer")) {
return true;
}
if (e.getMessage().contains("connection was aborted")) {
return true;
}
if (e.getMessage().contains("forcibly closed")) {
return true;
}
if (e.getMessage().contains("Broken pipe")) {
return true;
}
if (e.getMessage().contains("Connection timed out")) {
return true;
}
}
return false;
}
} | 0true
| src_main_java_org_elasticsearch_common_transport_NetworkExceptionHelper.java |
37 | @Component("blFulfillmentTypeOptionsExtensionListener")
public class FulfillmentTypeEnumOptionsExtensionListener extends AbstractRuleBuilderEnumOptionsExtensionListener {
@Override
protected Map<String, Class<? extends BroadleafEnumerationType>> getValuesToGenerate() {
Map<String, Class<? extends BroadleafEnumerationType>> map =
new HashMap<String, Class<? extends BroadleafEnumerationType>>();
map.put("blcOptions_FulfillmentType", FulfillmentType.class);
return map;
}
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_options_FulfillmentTypeEnumOptionsExtensionListener.java |
2,203 | public class TermsFilterTests extends ElasticsearchTestCase {
@Test
public void testTermFilter() throws Exception {
String fieldName = "field1";
Directory rd = new RAMDirectory();
IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
for (int i = 0; i < 100; i++) {
Document doc = new Document();
int term = i * 10; //terms are units of 10;
doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
w.addDocument(doc);
if ((i % 40) == 0) {
w.commit();
}
}
AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
w.close();
TermFilter tf = new TermFilter(new Term(fieldName, "19"));
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits, nullValue());
tf = new TermFilter(new Term(fieldName, "20"));
DocIdSet result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
assertThat(bits.cardinality(), equalTo(1));
tf = new TermFilter(new Term("all", "xxx"));
result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
assertThat(bits.cardinality(), equalTo(100));
reader.close();
rd.close();
}
@Test
public void testTermsFilter() throws Exception {
String fieldName = "field1";
Directory rd = new RAMDirectory();
IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer()));
for (int i = 0; i < 100; i++) {
Document doc = new Document();
int term = i * 10; //terms are units of 10;
doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
w.addDocument(doc);
if ((i % 40) == 0) {
w.commit();
}
}
AtomicReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(w, true));
w.close();
TermsFilter tf = new TermsFilter(new Term[]{new Term(fieldName, "19")});
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits, nullValue());
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(1));
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(2));
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
bits = (FixedBitSet) tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
assertThat(bits.cardinality(), equalTo(2));
reader.close();
rd.close();
}
} | 0true
| src_test_java_org_elasticsearch_common_lucene_search_TermsFilterTests.java |
1,064 | public class OCommandSQLParsingException extends OException {
private String text;
private int position;
private static final long serialVersionUID = -7430575036316163711L;
public OCommandSQLParsingException(String iMessage) {
super(iMessage, null);
}
public OCommandSQLParsingException(String iMessage, String iText, int iPosition, Throwable cause) {
super(iMessage, cause);
text = iText;
position = iPosition;
}
public OCommandSQLParsingException(String iMessage, String iText, int iPosition) {
super(iMessage);
text = iText;
position = iPosition;
}
@Override
public String getMessage() {
StringBuilder buffer = new StringBuilder();
buffer.append("Error on parsing command at position #");
buffer.append(position);
buffer.append(": " + super.getMessage());
if (text != null) {
buffer.append("\nCommand: ");
buffer.append(text);
buffer.append("\n---------");
for (int i = 0; i < position - 1; ++i)
buffer.append("-");
buffer.append("^");
}
return buffer.toString();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_OCommandSQLParsingException.java |
1,070 | @Test
public class OOQueryOperatorTest {
@Test
public void testOperatorOrder() {
//check operator are the correct order
final OQueryOperator[] operators = OSQLEngine.INSTANCE.getRecordOperators();
int i=0;
Assert.assertTrue(operators[i++] instanceof OQueryOperatorEquals);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorAnd);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorOr);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorNotEquals);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorNot);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMinorEquals);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMinor);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMajorEquals);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorContainsAll);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMajor);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorLike);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMatches);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorInstanceof);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorIs);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorIn);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorContainsKey);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorContainsValue);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorContainsText);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorContains);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorTraverse);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorBetween);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorPlus);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMinus);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMultiply);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorDivide);
Assert.assertTrue(operators[i++] instanceof OQueryOperatorMod);
}
} | 0true
| core_src_test_java_com_orientechnologies_orient_core_sql_OOQueryOperatorTest.java |
621 | public class NullBroadleafTemplateResolver implements ITemplateResolver {
@Override
public String getName() {
return "NullBroadleafTemplateResolver";
}
@Override
public Integer getOrder() {
return 9999;
}
@Override
public TemplateResolution resolveTemplate(TemplateProcessingParameters templateProcessingParameters) {
return null;
}
@Override
public void initialize() {
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_web_NullBroadleafTemplateResolver.java |
3,425 | SnapshotStatus snapshotStatus = indexShard.snapshot(new Engine.SnapshotHandler<SnapshotStatus>() {
@Override
public SnapshotStatus snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
if (lastIndexVersion != snapshotIndexCommit.getGeneration() || lastTranslogId != translogSnapshot.translogId() || lastTranslogLength < translogSnapshot.length()) {
logger.debug("snapshot ({}) to {} ...", reason, shardGateway);
SnapshotStatus snapshotStatus =
shardGateway.snapshot(new IndexShardGateway.Snapshot(snapshotIndexCommit, translogSnapshot, lastIndexVersion, lastTranslogId, lastTranslogLength, lastTotalTranslogOperations));
lastIndexVersion = snapshotIndexCommit.getGeneration();
lastTranslogId = translogSnapshot.translogId();
lastTranslogLength = translogSnapshot.length();
lastTotalTranslogOperations = translogSnapshot.estimatedTotalOperations();
return snapshotStatus;
}
return null;
}
}); | 0true
| src_main_java_org_elasticsearch_index_gateway_IndexShardGatewayService.java |
395 | public interface ORecordLazyMultiValue extends ODetachable, OSizeable {
public Iterator<OIdentifiable> rawIterator();
/**
* Browse all the set to convert all the items into records.
*/
public void convertLinks2Records();
/**
* Browse all the set to convert all the items into links.
*
* @return
*/
public boolean convertRecords2Links();
public boolean isAutoConvertToRecord();
public void setAutoConvertToRecord(boolean convertToRecord);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordLazyMultiValue.java |
3,472 | public class CacheMapLoader implements MapStore, MapLoaderLifecycleSupport {
private String type;
public void init(final HazelcastInstance hazelcastInstance,
final Properties properties, final String mapName) {
type = mapName;
}
public void destroy() {
}
public Object load(final Object key) {
return type + ":" + key;
}
public Map loadAll(final Collection keys) {
return null;
}
public Set loadAllKeys() {
return null;
}
@Override
public void store(Object key, Object value) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void storeAll(Map map) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void delete(Object key) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void deleteAll(Collection keys) {
//To change body of implemented methods use File | Settings | File Templates.
}
} | 1no label
| hazelcast-spring_src_test_java_com_hazelcast_spring_cache_CacheMapLoader.java |
155 | public abstract class TransactionInterceptorProvider extends Service
{
private final String name;
public TransactionInterceptorProvider( String name )
{
super( name );
this.name = name;
}
/**
* Returns the name of this provider
*
* @return The name of this provider
*/
public final String name()
{
return name;
}
/**
* Creates a TransactionInterceptor with the given datasource and options.
* It is possible for this method to return null, signifying that the
* options passed did not allow for instantiation.
*
* @param ds The datasource the TransactionInterceptor will communicate with
* @param options An object that can be the options to instantiate the
* interceptor with - e.g "false" to prevent instantiation
* @return An implementation of TransactionInterceptor or null if the
* options say so.
*/
public abstract TransactionInterceptor create( XaDataSource ds,
String options, DependencyResolver dependencyResolver );
/**
* Creates a TransactionInterceptor with the given datasource and options
* and the given TransactionInterceptor as the next in the chain.
* It is possible for this method to return null, signifying that the
* options passed did not allow for instantiation.
*
* @param ds The datasource the TransactionInterceptor will communicate with
* @param options An object that can be the options to instantiate the
* interceptor with - e.g "false" to prevent instantiation
* @param next The next interceptor in the chain - can be null
* @return An implementation of TransactionInterceptor or null if the
* options say so.
*/
public abstract TransactionInterceptor create( TransactionInterceptor next,
XaDataSource ds, String options, DependencyResolver dependencyResolver );
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_TransactionInterceptorProvider.java |
73 | public interface ImageStaticAsset extends StaticAsset {
public Integer getWidth();
public void setWidth(Integer width);
public Integer getHeight();
public void setHeight(Integer height);
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_ImageStaticAsset.java |
671 | constructors[COLLECTION_TXN_REMOVE_BACKUP] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionTxnRemoveBackupOperation();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java |
0 | public interface AdminCatalogService {
/**
* Clear out any Skus that are already attached to the Product
* if there were any there and generate a new set of Skus based
* on the permutations of ProductOptions attached to this Product
*
* @param productId - the Product to generate Skus from
* @return the number of generated Skus from the ProductOption permutations
*/
public Integer generateSkusFromProduct(Long productId);
/**
* This will create a new product along with a new Sku for the defaultSku, along with new
* Skus for all of the additional Skus. This is achieved by simply detaching the entities
* from the persistent session, resetting the primary keys and then saving the entity.
*
* Note: Media for the product is not saved separately, meaning if you make a change to the
* original product's media items (the one specified by <b>productId</b>) it will change the
* cloned product's media and vice-versa.
*
* @param productId
* @return
*/
public Boolean cloneProduct(Long productId);
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_AdminCatalogService.java |
2,814 | public final class CheckReplicaVersion extends Operation implements PartitionAwareOperation, MigrationCycleOperation {
private long version;
private boolean returnResponse;
private boolean response;
public CheckReplicaVersion() {
}
public CheckReplicaVersion(long version, boolean returnResponse) {
this.version = version;
this.returnResponse = returnResponse;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
InternalPartitionServiceImpl partitionService = getService();
int partitionId = getPartitionId();
int replicaIndex = getReplicaIndex();
long[] currentVersions = partitionService.getPartitionReplicaVersions(partitionId);
long currentVersion = currentVersions[replicaIndex - 1];
if (currentVersion == version) {
response = true;
} else {
logBackupVersionMismatch(currentVersion);
partitionService.triggerPartitionReplicaSync(partitionId, replicaIndex);
response = false;
}
}
private void logBackupVersionMismatch(long currentVersion) {
ILogger logger = getLogger();
if (logger.isFinestEnabled()) {
logger.finest("Partition: " + getPartitionId() + " version is not matching to version of the owner -> "
+ currentVersion + " -vs- " + version);
}
}
@Override
public void afterRun() throws Exception {
}
@Override
public boolean returnsResponse() {
return returnResponse;
}
@Override
public Object getResponse() {
return response;
}
@Override
public boolean validatesTarget() {
return false;
}
@Override
public String getServiceName() {
return InternalPartitionService.SERVICE_NAME;
}
@Override
public void logError(Throwable e) {
ReplicaErrorLogger.log(e, getLogger());
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
out.writeLong(version);
out.writeBoolean(returnResponse);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
version = in.readLong();
returnResponse = in.readBoolean();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("CheckReplicaVersion");
sb.append("{partition=").append(getPartitionId());
sb.append(", replica=").append(getReplicaIndex());
sb.append(", version=").append(version);
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_partition_impl_CheckReplicaVersion.java |
1,407 | public class ReadWriteAccessDelegate<T extends HazelcastRegion> extends AbstractAccessDelegate<T> {
public ReadWriteAccessDelegate(T hazelcastRegion, final Properties props) {
super(hazelcastRegion, props);
}
public boolean afterInsert(final Object key, final Object value, final Object version) throws CacheException {
return put(key, value, version);
}
/**
* {@inheritDoc}
* <p/>
* Called after <code>com.hazelcast.hibernate.access.ReadWriteAccessDelegate.lockItem()</code>
*/
public boolean afterUpdate(final Object key, final Object value, final Object currentVersion, final Object previousVersion,
final SoftLock lock) throws CacheException {
try {
return update(key, value, currentVersion, previousVersion, lock);
} finally {
unlockItem(key, lock);
}
}
public boolean putFromLoad(final Object key, final Object value, final long txTimestamp, final Object version,
final boolean minimalPutOverride) throws CacheException {
return put(key, value, version);
}
public SoftLock lockItem(final Object key, final Object version) throws CacheException {
return cache.tryLock(key, version);
}
public void unlockItem(final Object key, final SoftLock lock) throws CacheException {
cache.unlock(key, lock);
}
public void unlockRegion(SoftLock lock) throws CacheException {
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_access_ReadWriteAccessDelegate.java |
1,540 | public class PathMap {
public static final String CLASS = Tokens.makeNamespace(PathMap.class) + ".class";
public enum Counters {
VERTICES_PROCESSED,
OUT_EDGES_PROCESSED
}
public static Configuration createConfiguration(final Class<? extends Element> klass) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.setBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_PATHS, true);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, Text> {
private boolean isVertex;
private final Text textWritable = new Text();
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.outputs = new SafeMapperOutputs(context);
if (!context.getConfiguration().getBoolean(Tokens.TITAN_HADOOP_PIPELINE_TRACK_PATHS, false))
throw new IllegalStateException(PathMap.class.getSimpleName() + " requires that paths be enabled");
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex && value.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : value.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
for (final List<FaunusPathElement.MicroElement> path : edge.getPaths()) {
this.textWritable.set(path.toString());
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_PathMap.java |
864 | public class CandidateOrderOfferAnswer implements IAnswer<CandidateOrderOffer> {
@Override
public CandidateOrderOffer answer() throws Throwable {
return new CandidateOrderOfferImpl();
}
} | 0true
| core_broadleaf-framework_src_test_java_org_broadleafcommerce_core_offer_service_OfferServiceTest.java |
601 | public class GetSettingsRequest extends MasterNodeReadOperationRequest<GetSettingsRequest> {
private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
private String[] names = Strings.EMPTY_ARRAY;
public GetSettingsRequest indices(String... indices) {
this.indices = indices;
return this;
}
public GetSettingsRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
public String[] indices() {
return indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public String[] names() {
return names;
}
public GetSettingsRequest names(String... names) {
this.names = names;
return this;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (names == null) {
validationException = ValidateActions.addValidationError("names may not be null", validationException);
}
return validationException;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
names = in.readStringArray();
readLocal(in, Version.V_1_0_0_RC2);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
out.writeStringArray(names);
writeLocal(out, Version.V_1_0_0_RC2);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_settings_get_GetSettingsRequest.java |
76 | public class ClientCreateRequest extends CallableClientRequest implements Portable, RetryableRequest, SecureRequest {
private String name;
private String serviceName;
public ClientCreateRequest() {
}
public ClientCreateRequest(String name, String serviceName) {
this.name = name;
this.serviceName = serviceName;
}
@Override
public Object call() throws Exception {
ProxyService proxyService = clientEngine.getProxyService();
proxyService.initializeDistributedObject(serviceName, name);
return null;
}
@Override
public String getServiceName() {
return serviceName;
}
@Override
public int getFactoryId() {
return ClientPortableHook.ID;
}
@Override
public int getClassId() {
return ClientPortableHook.CREATE_PROXY;
}
@Override
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeUTF("s", serviceName);
}
@Override
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
serviceName = reader.readUTF("s");
}
@Override
public Permission getRequiredPermission() {
return ActionConstants.getPermission(name, serviceName, ActionConstants.ACTION_CREATE);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_ClientCreateRequest.java |
1,420 | @XmlRootElement(name = "skuBundleItem")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class SkuBundleItemWrapper extends BaseWrapper implements APIWrapper<SkuBundleItem> {
@XmlElement
protected Long id;
@XmlElement
protected Integer quantity;
@XmlElement
protected Money salePrice;
@XmlElement
protected Money retailPrice;
@XmlElement
protected Long bundleId;
@XmlElement
protected SkuWrapper sku;
@XmlElement
protected String name;
@XmlElement
protected Boolean active;
@XmlElement
protected String description;
@XmlElement
protected String longDescription;
@Override
public void wrapDetails(SkuBundleItem model, HttpServletRequest request) {
this.id = model.getId();
this.quantity = model.getQuantity();
this.salePrice = model.getSalePrice();
this.retailPrice = model.getRetailPrice();
this.bundleId = model.getBundle().getId();
this.name = model.getSku().getName();
this.description = model.getSku().getDescription();
this.longDescription = model.getSku().getLongDescription();
this.active = model.getSku().isActive();
// this.sku = (SkuWrapper)context.getBean(SkuWrapper.class.getName());
// this.sku.wrap(model.getSku(), request);
}
@Override
public void wrapSummary(SkuBundleItem model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_SkuBundleItemWrapper.java |
1,237 | public class UnitDependencyVisitor extends Visitor {
private final PhasedUnit phasedUnit;
private Set<Declaration> alreadyDone;
public UnitDependencyVisitor(PhasedUnit phasedUnit) {
this.phasedUnit = phasedUnit;
alreadyDone = new HashSet<Declaration>();
}
private void storeDependency(Declaration d) {
if (d!=null && (d instanceof UnionType ||
d instanceof IntersectionType ||
!alreadyDone.contains(d))) {
if (!(d instanceof UnionType ||
d instanceof IntersectionType)) {
alreadyDone.add(d);
}
if (d instanceof TypeDeclaration) {
TypeDeclaration td = (TypeDeclaration) d;
storeDependency(td.getExtendedTypeDeclaration());
for (TypeDeclaration st: td.getSatisfiedTypeDeclarations()) {
storeDependency(st);
}
List<TypeDeclaration> caseTypes = td.getCaseTypeDeclarations();
if (caseTypes!=null) {
for (TypeDeclaration ct: caseTypes) {
storeDependency(ct);
}
}
}
if (d instanceof TypedDeclaration) {
//TODO: is this really necessary?
storeDependency(((TypedDeclaration) d).getTypeDeclaration());
}
Declaration rd = d.getRefinedDeclaration();
if (rd!=d) {
storeDependency(rd); //this one is needed for default arguments, I think
}
Unit declarationUnit = d.getUnit();
if (declarationUnit != null && ! (declarationUnit instanceof TypeFactory)) {
String moduleName = declarationUnit.getPackage().getModule().getNameAsString();
if (!moduleName.equals(Module.LANGUAGE_MODULE_NAME) &&
!JDKUtils.isJDKModule(moduleName)
&& !JDKUtils.isOracleJDKModule(moduleName)) {
Unit currentUnit = phasedUnit.getUnit();
String currentUnitPath = phasedUnit.getUnitFile().getPath();
String currentUnitName = currentUnit.getFilename();
String dependedOnUnitName = declarationUnit.getFilename();
String currentUnitPackage = currentUnit.getPackage().getNameAsString();
String dependedOnPackage = declarationUnit.getPackage().getNameAsString();
if (!dependedOnUnitName.equals(currentUnitName) ||
!dependedOnPackage.equals(currentUnitPackage)) {
// WOW : Ceylon Abstract Data types and swith case would be cool here ;)
if (declarationUnit instanceof ProjectSourceFile) {
declarationUnit.getDependentsOf().add(currentUnitPath);
}
else if (declarationUnit instanceof ICrossProjectReference) {
ProjectSourceFile originalProjectSourceFile = ((ICrossProjectReference) declarationUnit).getOriginalSourceFile();
if (originalProjectSourceFile != null) {
originalProjectSourceFile.getDependentsOf().add(currentUnitPath);
}
}
else if (declarationUnit instanceof ExternalSourceFile) {
// Don't manage them : they cannot change ... Well they might if we were using these dependencies to manage module
// removal. But since module removal triggers a classpath container update and so a full build, it's not necessary.
// Might change in the future
}
else if (declarationUnit instanceof CeylonBinaryUnit) {
declarationUnit.getDependentsOf().add(currentUnitPath);
}
else if (declarationUnit instanceof JavaCompilationUnit) {
//TODO: this does not seem to work for cross-project deps
// We should introduce a CrossProjectJavaUnit that can return
// the original JavaCompilationUnit from the original project
declarationUnit.getDependentsOf().add(currentUnitPath);
}
else if (declarationUnit instanceof JavaClassFile) {
//TODO: All the dependencies to class files are also added... It is really useful ?
// I assume in the case of the classes in the classes or exploded dirs, it might be,
// but not sure it is also used not in the case of jar-located classes
declarationUnit.getDependentsOf().add(currentUnitPath);
}
else {
assert(false);
}
}
}
}
}
}
@Override
public void visit(Tree.MemberOrTypeExpression that) {
storeDependency(that.getDeclaration());
super.visit(that);
}
@Override
public void visit(Tree.NamedArgument that) {
//TODO: is this really necessary?
storeDependency(that.getParameter());
super.visit(that);
}
@Override
public void visit(Tree.SequencedArgument that) {
//TODO: is this really necessary?
storeDependency(that.getParameter());
super.visit(that);
}
@Override
public void visit(Tree.PositionalArgument that) {
//TODO: is this really necessary?
storeDependency(that.getParameter());
super.visit(that);
}
void storeDependency(Parameter p) {
if (p!=null) {
storeDependency(p.getModel());
}
}
@Override
public void visit(Tree.Type that) {
ProducedType tm = that.getTypeModel();
if (tm!=null) {
storeDependency(tm.getDeclaration());
}
super.visit(that);
}
@Override
public void visit(Tree.ImportMemberOrType that) {
storeDependency(that.getDeclarationModel());
super.visit(that);
}
@Override
public void visit(Tree.TypeArguments that) {
//TODO: is this really necessary?
List<ProducedType> tms = that.getTypeModels();
if (tms!=null) {
for (ProducedType pt: tms) {
if (pt!=null) {
storeDependency(pt.getDeclaration());
}
}
}
super.visit(that);
}
@Override
public void visit(Tree.Term that) {
//TODO: is this really necessary?
ProducedType tm = that.getTypeModel();
if (tm!=null) {
storeDependency(tm.getDeclaration());
}
super.visit(that);
}
} | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_builder_UnitDependencyVisitor.java |
1,650 | public class LateStageAddMetadataRequest {
private final String fieldName;
private final Class<?> parentClass;
private final Class<?> targetClass;
private final DynamicEntityDao dynamicEntityDao;
private final String prefix;
public LateStageAddMetadataRequest(String fieldName, Class<?> parentClass, Class<?> targetClass, DynamicEntityDao dynamicEntityDao, String prefix) {
this.fieldName = fieldName;
this.parentClass = parentClass;
this.targetClass = targetClass;
this.dynamicEntityDao = dynamicEntityDao;
this.prefix = prefix;
}
public String getFieldName() {
return fieldName;
}
public Class<?> getParentClass() {
return parentClass;
}
public Class<?> getTargetClass() {
return targetClass;
}
public DynamicEntityDao getDynamicEntityDao() {
return dynamicEntityDao;
}
public String getPrefix() {
return prefix;
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_dao_provider_metadata_request_LateStageAddMetadataRequest.java |
1,304 | public class DataUpdateTask implements Callable<Void> {
private ODatabaseDocumentTx baseDB;
private ODatabaseDocumentTx testDB;
public DataUpdateTask(ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDocumentTx) {
this.baseDB = new ODatabaseDocumentTx(baseDB.getURL());
this.testDB = new ODatabaseDocumentTx(testDocumentTx.getURL());
}
@Override
public Void call() throws Exception {
Random random = new Random();
baseDB.open("admin", "admin");
testDB.open("admin", "admin");
int counter = 0;
try {
while (true) {
final int idToUpdate = random.nextInt(idGen);
idLockManager.acquireLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE);
try {
OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<ODocument>("select from TestClass where id = " + idToUpdate);
final List<ODocument> result = baseDB.query(query);
Assert.assertTrue(!result.isEmpty());
final ODocument document = result.get(0);
document.field("timestamp", System.currentTimeMillis());
document.field("stringValue", "vde" + random.nextLong());
saveDoc(document, baseDB, testDB);
counter++;
if (counter % 50000 == 0)
System.out.println(counter + " records were updated.");
} finally {
idLockManager.releaseLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE);
}
}
} finally {
baseDB.close();
testDB.close();
}
}
} | 0true
| server_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedStorageUpdateCrashRestore.java |
1,353 | completableFuture.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
reference2.set(response);
latch2.countDown();
}
@Override
public void onFailure(Throwable t) {
reference2.set(t);
latch2.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
1,252 | public class NoNodeAvailableException extends ElasticsearchException {
public NoNodeAvailableException() {
super("No node available");
}
@Override
public RestStatus status() {
return RestStatus.SERVICE_UNAVAILABLE;
}
} | 0true
| src_main_java_org_elasticsearch_client_transport_NoNodeAvailableException.java |
919 | public class OrderItemAdjustmentAnswer implements IAnswer<OrderItemAdjustment> {
@Override
public OrderItemAdjustment answer() throws Throwable {
return new OrderItemAdjustmentImpl();
}
} | 0true
| core_broadleaf-framework_src_test_java_org_broadleafcommerce_core_offer_service_processor_FulfillmentGroupOfferProcessorTest.java |
185 | {
@Override
public boolean accept( XaDataSource item )
{
return item.getName().equals( NeoStoreXaDataSource.DEFAULT_DATA_SOURCE_NAME );
}
} ); | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_XaDataSourceManager.java |
1,127 | public class ODefaultSQLMethodFactory implements OSQLMethodFactory {
private final Map<String, Object> methods = new HashMap<String, Object>();
public ODefaultSQLMethodFactory() {
methods.put(OSQLMethodAppend.NAME, new OSQLMethodAppend());
methods.put(OSQLMethodAsBoolean.NAME, new OSQLMethodAsBoolean());
methods.put(OSQLMethodAsDate.NAME, new OSQLMethodAsDate());
methods.put(OSQLMethodAsDateTime.NAME, new OSQLMethodAsDateTime());
methods.put(OSQLMethodAsDecimal.NAME, new OSQLMethodAsDecimal());
methods.put(OSQLMethodAsFloat.NAME, new OSQLMethodAsFloat());
methods.put(OSQLMethodAsInteger.NAME, new OSQLMethodAsInteger());
methods.put(OSQLMethodAsList.NAME, new OSQLMethodAsList());
methods.put(OSQLMethodAsLong.NAME, new OSQLMethodAsLong());
methods.put(OSQLMethodAsSet.NAME, new OSQLMethodAsSet());
methods.put(OSQLMethodAsString.NAME, new OSQLMethodAsString());
methods.put(OSQLMethodCharAt.NAME, new OSQLMethodCharAt());
methods.put(OSQLMethodField.NAME, new OSQLMethodField());
methods.put(OSQLMethodFormat.NAME, new OSQLMethodFormat());
methods.put(OSQLMethodFunctionDelegate.NAME, OSQLMethodFunctionDelegate.class);
methods.put(OSQLMethodIndexOf.NAME, new OSQLMethodIndexOf());
methods.put(OSQLMethodKeys.NAME, new OSQLMethodKeys());
methods.put(OSQLMethodLeft.NAME, new OSQLMethodLeft());
methods.put(OSQLMethodLength.NAME, new OSQLMethodLength());
methods.put(OSQLMethodNormalize.NAME, new OSQLMethodNormalize());
methods.put(OSQLMethodPrefix.NAME, new OSQLMethodPrefix());
methods.put(OSQLMethodReplace.NAME, new OSQLMethodReplace());
methods.put(OSQLMethodRemove.NAME, new OSQLMethodRemove());
methods.put(OSQLMethodRemoveAll.NAME, new OSQLMethodRemoveAll());
methods.put(OSQLMethodRight.NAME, new OSQLMethodRight());
methods.put(OSQLMethodSize.NAME, new OSQLMethodSize());
methods.put(OSQLMethodSubString.NAME, new OSQLMethodSubString());
methods.put(OSQLMethodToJSON.NAME, new OSQLMethodToJSON());
methods.put(OSQLMethodToLowerCase.NAME, new OSQLMethodToLowerCase());
methods.put(OSQLMethodToUpperCase.NAME, new OSQLMethodToUpperCase());
methods.put(OSQLMethodTrim.NAME, new OSQLMethodTrim());
methods.put(OSQLMethodValues.NAME, new OSQLMethodValues());
}
@Override
public boolean hasMethod(final String iName) {
return methods.containsKey(iName);
}
@Override
public Set<String> getMethodNames() {
return methods.keySet();
}
@Override
public OSQLMethod createMethod(final String name) throws OCommandExecutionException {
final Object m = methods.get(name);
final OSQLMethod method;
if (m instanceof Class<?>)
try {
method = (OSQLMethod) ((Class<?>) m).newInstance();
} catch (Exception e) {
throw new OCommandExecutionException("Cannot create SQL method: " + m);
}
else
method = (OSQLMethod) m;
if (method == null)
throw new OCommandExecutionException("Unknown method name: " + name);
return method;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_sql_method_ODefaultSQLMethodFactory.java |
541 | public class PrepareTransactionRequest extends BaseTransactionRequest {
public PrepareTransactionRequest() {
}
@Override
protected Object innerCall() throws Exception {
ClientEndpoint endpoint = getEndpoint();
TransactionContext transactionContext = endpoint.getTransactionContext(txnId);
Transaction transaction = TransactionAccessor.getTransaction(transactionContext);
transaction.prepare();
return null;
}
@Override
public String getServiceName() {
return ClientEngineImpl.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return ClientTxnPortableHook.F_ID;
}
@Override
public int getClassId() {
return ClientTxnPortableHook.PREPARE;
}
@Override
public Permission getRequiredPermission() {
return new TransactionPermission();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_client_txn_PrepareTransactionRequest.java |
1,906 | convertToClass(Character.class, new TypeConverter() {
public Object convert(String value, TypeLiteral<?> toType) {
value = value.trim();
if (value.length() != 1) {
throw new RuntimeException("Length != 1.");
}
return value.charAt(0);
}
@Override
public String toString() {
return "TypeConverter<Character>";
}
}); | 0true
| src_main_java_org_elasticsearch_common_inject_TypeConverterBindingProcessor.java |
77 | private class State
{
private final GraphDatabaseService graphDb;
private Transaction tx;
public State( GraphDatabaseService graphDb )
{
this.graphDb = graphDb;
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestManualAcquireLock.java |
268 | public class ElasticsearchIllegalArgumentException extends ElasticsearchException {
public ElasticsearchIllegalArgumentException() {
super(null);
}
public ElasticsearchIllegalArgumentException(String msg) {
super(msg);
}
public ElasticsearchIllegalArgumentException(String msg, Throwable cause) {
super(msg, cause);
}
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
} | 0true
| src_main_java_org_elasticsearch_ElasticsearchIllegalArgumentException.java |
953 | class ConsumerThread extends TestThread {
private final ILock lock;
private final ICondition condition;
ConsumerThread(int id, ILock lock, ICondition condition) {
super("ConsumerThread-" + id);
this.lock = lock;
this.condition = condition;
}
void runSingleIteration() throws InterruptedException {
lock.lock();
try {
while (object == null) {
condition.await();
}
object = null;
condition.signalAll();
} finally {
lock.unlock();
}
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_lock_ProducerConsumerConditionStressTest.java |
730 | loadEntriesBetween(keyFrom, fromInclusive, keyTo, toInclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > 0 && result.size() >= maxValuesToFetch)
return false;
return true;
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_sbtree_local_OSBTree.java |
2,390 | public enum BigArrays {
;
/** Page size in bytes: 16KB */
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE;
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT;
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG;
public static final int DOUBLE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_DOUBLE;
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
/** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */
public static long overSize(long minTargetSize) {
return overSize(minTargetSize, PAGE_SIZE_IN_BYTES / 8, 1);
}
/** Return the next size to grow to that is >= <code>minTargetSize</code>.
* Inspired from {@link ArrayUtil#oversize(int, int)} and adapted to play nicely with paging. */
public static long overSize(long minTargetSize, int pageSize, int bytesPerElement) {
Preconditions.checkArgument(minTargetSize >= 0, "minTargetSize must be >= 0");
Preconditions.checkArgument(pageSize >= 0, "pageSize must be > 0");
Preconditions.checkArgument(bytesPerElement > 0, "bytesPerElement must be > 0");
long newSize;
if (minTargetSize < pageSize) {
newSize = ArrayUtil.oversize((int)minTargetSize, bytesPerElement);
} else {
newSize = minTargetSize + (minTargetSize >>> 3);
}
if (newSize > pageSize) {
// round to a multiple of pageSize
newSize = newSize - (newSize % pageSize) + pageSize;
assert newSize % pageSize == 0;
}
return newSize;
}
static boolean indexIsInt(long index) {
return index == (int) index;
}
private static class ByteArrayWrapper extends AbstractArray implements ByteArray {
private final byte[] array;
ByteArrayWrapper(byte[] array, PageCacheRecycler recycler, boolean clearOnResize) {
super(recycler, clearOnResize);
this.array = array;
}
@Override
public long size() {
return array.length;
}
@Override
public byte get(long index) {
assert indexIsInt(index);
return array[(int) index];
}
@Override
public byte set(long index, byte value) {
assert indexIsInt(index);
final byte ret = array[(int) index];
array[(int) index] = value;
return ret;
}
@Override
public void get(long index, int len, BytesRef ref) {
assert indexIsInt(index);
ref.bytes = array;
ref.offset = (int) index;
ref.length = len;
}
@Override
public void set(long index, byte[] buf, int offset, int len) {
assert indexIsInt(index);
System.arraycopy(buf, offset, array, (int) index, len);
}
}
private static class IntArrayWrapper extends AbstractArray implements IntArray {
private final int[] array;
IntArrayWrapper(int[] array, PageCacheRecycler recycler, boolean clearOnResize) {
super(recycler, clearOnResize);
this.array = array;
}
@Override
public long size() {
return array.length;
}
@Override
public int get(long index) {
assert indexIsInt(index);
return array[(int) index];
}
@Override
public int set(long index, int value) {
assert indexIsInt(index);
final int ret = array[(int) index];
array[(int) index] = value;
return ret;
}
@Override
public int increment(long index, int inc) {
assert indexIsInt(index);
return array[(int) index] += inc;
}
}
private static class LongArrayWrapper extends AbstractArray implements LongArray {
private final long[] array;
LongArrayWrapper(long[] array, PageCacheRecycler recycler, boolean clearOnResize) {
super(recycler, clearOnResize);
this.array = array;
}
@Override
public long size() {
return array.length;
}
@Override
public long get(long index) {
assert indexIsInt(index);
return array[(int) index];
}
@Override
public long set(long index, long value) {
assert indexIsInt(index);
final long ret = array[(int) index];
array[(int) index] = value;
return ret;
}
@Override
public long increment(long index, long inc) {
assert indexIsInt(index);
return array[(int) index] += inc;
}
@Override
public void fill(long fromIndex, long toIndex, long value) {
assert indexIsInt(fromIndex);
assert indexIsInt(toIndex);
Arrays.fill(array, (int) fromIndex, (int) toIndex, value);
}
}
private static class DoubleArrayWrapper extends AbstractArray implements DoubleArray {
private final double[] array;
DoubleArrayWrapper(double[] array, PageCacheRecycler recycler, boolean clearOnResize) {
super(recycler, clearOnResize);
this.array = array;
}
@Override
public long size() {
return array.length;
}
@Override
public double get(long index) {
assert indexIsInt(index);
return array[(int) index];
}
@Override
public double set(long index, double value) {
assert indexIsInt(index);
double ret = array[(int) index];
array[(int) index] = value;
return ret;
}
@Override
public double increment(long index, double inc) {
assert indexIsInt(index);
return array[(int) index] += inc;
}
@Override
public void fill(long fromIndex, long toIndex, double value) {
assert indexIsInt(fromIndex);
assert indexIsInt(toIndex);
Arrays.fill(array, (int) fromIndex, (int) toIndex, value);
}
}
private static class ObjectArrayWrapper<T> extends AbstractArray implements ObjectArray<T> {
private final Object[] array;
ObjectArrayWrapper(Object[] array, PageCacheRecycler recycler) {
super(recycler, true);
this.array = array;
}
@Override
public long size() {
return array.length;
}
@SuppressWarnings("unchecked")
@Override
public T get(long index) {
assert indexIsInt(index);
return (T) array[(int) index];
}
@Override
public T set(long index, T value) {
assert indexIsInt(index);
@SuppressWarnings("unchecked")
T ret = (T) array[(int) index];
array[(int) index] = value;
return ret;
}
}
/** Allocate a new {@link ByteArray} of the given capacity. */
public static ByteArray newByteArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
if (size <= BYTE_PAGE_SIZE) {
return new ByteArrayWrapper(new byte[(int) size], recycler, clearOnResize);
} else {
return new BigByteArray(size, recycler, clearOnResize);
}
}
/** Allocate a new {@link ByteArray} of the given capacity. */
public static ByteArray newByteArray(long size) {
return newByteArray(size, null, true);
}
/** Resize the array to the exact provided size. */
public static ByteArray resize(ByteArray array, long size) {
if (array instanceof BigByteArray) {
((BigByteArray) array).resize(size);
return array;
} else {
AbstractArray arr = (AbstractArray) array;
final ByteArray newArray = newByteArray(size, arr.recycler, arr.clearOnResize);
final byte[] rawArray = ((ByteArrayWrapper) array).array;
newArray.set(0, rawArray, 0, (int) Math.min(rawArray.length, newArray.size()));
return newArray;
}
}
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public static ByteArray grow(ByteArray array, long minSize) {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_BYTE);
return resize(array, newSize);
}
/** Allocate a new {@link IntArray} of the given capacity. */
public static IntArray newIntArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
if (size <= INT_PAGE_SIZE) {
return new IntArrayWrapper(new int[(int) size], recycler, clearOnResize);
} else {
return new BigIntArray(size, recycler, clearOnResize);
}
}
/** Allocate a new {@link IntArray} of the given capacity. */
public static IntArray newIntArray(long size) {
return newIntArray(size, null, true);
}
/** Resize the array to the exact provided size. */
public static IntArray resize(IntArray array, long size) {
if (array instanceof BigIntArray) {
((BigIntArray) array).resize(size);
return array;
} else {
AbstractArray arr = (AbstractArray) array;
final IntArray newArray = newIntArray(size, arr.recycler, arr.clearOnResize);
for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
newArray.set(i, array.get(i));
}
return newArray;
}
}
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public static IntArray grow(IntArray array, long minSize) {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_INT);
return resize(array, newSize);
}
/** Allocate a new {@link LongArray} of the given capacity. */
public static LongArray newLongArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
if (size <= LONG_PAGE_SIZE) {
return new LongArrayWrapper(new long[(int) size], recycler, clearOnResize);
} else {
return new BigLongArray(size, recycler, clearOnResize);
}
}
/** Allocate a new {@link LongArray} of the given capacity. */
public static LongArray newLongArray(long size) {
return newLongArray(size, null, true);
}
/** Resize the array to the exact provided size. */
public static LongArray resize(LongArray array, long size) {
if (array instanceof BigLongArray) {
((BigLongArray) array).resize(size);
return array;
} else {
AbstractArray arr = (AbstractArray) array;
final LongArray newArray = newLongArray(size, arr.recycler, arr.clearOnResize);
for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
newArray.set(i, array.get(i));
}
return newArray;
}
}
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public static LongArray grow(LongArray array, long minSize) {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
return resize(array, newSize);
}
/** Allocate a new {@link DoubleArray} of the given capacity. */
public static DoubleArray newDoubleArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
if (size <= LONG_PAGE_SIZE) {
return new DoubleArrayWrapper(new double[(int) size], recycler, clearOnResize);
} else {
return new BigDoubleArray(size, recycler, clearOnResize);
}
}
/** Allocate a new {@link DoubleArray} of the given capacity. */
public static DoubleArray newDoubleArray(long size) {
return newDoubleArray(size, null, true);
}
/** Resize the array to the exact provided size. */
public static DoubleArray resize(DoubleArray array, long size) {
if (array instanceof BigDoubleArray) {
((BigDoubleArray) array).resize(size);
return array;
} else {
AbstractArray arr = (AbstractArray) array;
final DoubleArray newArray = newDoubleArray(size, arr.recycler, arr.clearOnResize);
for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
newArray.set(i, array.get(i));
}
return newArray;
}
}
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public static DoubleArray grow(DoubleArray array, long minSize) {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, DOUBLE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_DOUBLE);
return resize(array, newSize);
}
/** Allocate a new {@link ObjectArray} of the given capacity. */
public static <T> ObjectArray<T> newObjectArray(long size, PageCacheRecycler recycler) {
if (size <= OBJECT_PAGE_SIZE) {
return new ObjectArrayWrapper<T>(new Object[(int) size], recycler);
} else {
return new BigObjectArray<T>(size, recycler);
}
}
/** Allocate a new {@link ObjectArray} of the given capacity. */
public static <T> ObjectArray<T> newObjectArray(long size) {
return newObjectArray(size, null);
}
/** Resize the array to the exact provided size. */
public static <T> ObjectArray<T> resize(ObjectArray<T> array, long size) {
if (array instanceof BigObjectArray) {
((BigObjectArray<?>) array).resize(size);
return array;
} else {
final ObjectArray<T> newArray = newObjectArray(size, ((AbstractArray) array).recycler);
for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
newArray.set(i, array.get(i));
}
return newArray;
}
}
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
public static <T> ObjectArray<T> grow(ObjectArray<T> array, long minSize) {
if (minSize <= array.size()) {
return array;
}
final long newSize = overSize(minSize, OBJECT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
return resize(array, newSize);
}
} | 0true
| src_main_java_org_elasticsearch_common_util_BigArrays.java |
1,543 | static final class NodeSorter extends IntroSorter {
final ModelNode[] modelNodes;
/* the nodes weights with respect to the current weight function / index */
final float[] weights;
private final WeightFunction function;
private String index;
private final Balancer balancer;
private float pivotWeight;
public NodeSorter(ModelNode[] modelNodes, WeightFunction function, Balancer balancer) {
this.function = function;
this.balancer = balancer;
this.modelNodes = modelNodes;
weights = new float[modelNodes.length];
}
/**
* Resets the sorter, recalculates the weights per node and sorts the
* nodes by weight, with minimal weight first.
*/
public void reset(Operation operation, String index) {
this.index = index;
for (int i = 0; i < weights.length; i++) {
weights[i] = weight(operation, modelNodes[i]);
}
sort(0, modelNodes.length);
}
public float weight(Operation operation, ModelNode node) {
return function.weight(operation, balancer, node, index);
}
@Override
protected void swap(int i, int j) {
final ModelNode tmpNode = modelNodes[i];
modelNodes[i] = modelNodes[j];
modelNodes[j] = tmpNode;
final float tmpWeight = weights[i];
weights[i] = weights[j];
weights[j] = tmpWeight;
}
@Override
protected int compare(int i, int j) {
return Float.compare(weights[i], weights[j]);
}
@Override
protected void setPivot(int i) {
pivotWeight = weights[i];
}
@Override
protected int comparePivot(int j) {
return Float.compare(pivotWeight, weights[j]);
}
public float delta() {
return weights[weights.length - 1] - weights[0];
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_allocator_BalancedShardsAllocator.java |
1,627 | public class ScriptExecutorOperation extends Operation {
private String engineName;
private String script;
private Map<String, Object> bindings;
private Object result;
public ScriptExecutorOperation() {
}
public ScriptExecutorOperation(String engineName, String script, Map<String, Object> bindings) {
this.engineName = engineName;
this.script = script;
this.bindings = bindings;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
ScriptEngineManager scriptEngineManager = ScriptEngineManagerContext.getScriptEngineManager();
ScriptEngine engine = scriptEngineManager.getEngineByName(engineName);
if (engine == null) {
throw new IllegalArgumentException("Could not find ScriptEngine named '" + engineName + "'.");
}
engine.put("hazelcast", getNodeEngine().getHazelcastInstance());
if (bindings != null) {
Set<Map.Entry<String, Object>> entries = bindings.entrySet();
for (Map.Entry<String, Object> entry : entries) {
engine.put(entry.getKey(), entry.getValue());
}
}
try {
this.result = engine.eval(script);
} catch (ScriptException e) {
this.result = e.getMessage();
}
}
@Override
public void afterRun() throws Exception {
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
public Object getResponse() {
return result;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
out.writeUTF(engineName);
out.writeUTF(script);
if (bindings != null) {
out.writeInt(bindings.size());
Set<Map.Entry<String, Object>> entries = bindings.entrySet();
for (Map.Entry<String, Object> entry : entries) {
out.writeUTF(entry.getKey());
out.writeObject(entry.getValue());
}
} else {
out.writeInt(0);
}
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
engineName = in.readUTF();
script = in.readUTF();
int size = in.readInt();
if (size > 0) {
bindings = new HashMap<String, Object>(size);
for (int i = 0; i < size; i++) {
String key = in.readUTF();
Object value = in.readObject();
bindings.put(key, value);
}
}
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_management_operation_ScriptExecutorOperation.java |
458 | executor.execute(new Runnable() {
@Override
public void run() {
for (int i = 0; i < operations; i++) {
map2.remove("foo-" + i);
}
}
}, 60, EntryEventType.REMOVED, operations, 0.75, map1, map2); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_replicatedmap_ClientReplicatedMapTest.java |
1,841 | public interface MapMaxSizePolicy {
boolean overCapacity();
MaxSizeConfig getMaxSizeConfig();
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_MapMaxSizePolicy.java |
981 | public static class Presentation {
public static class Tab {
public static class Name {
public static final String OrderItems = "OrderImpl_Order_Items_Tab";
}
public static class Order {
public static final int OrderItems = 2000;
}
}
public static class Group {
public static class Name {
}
public static class Order {
}
}
public static class FieldOrder {
public static final int PRODUCT = 2000;
public static final int SKU = 3000;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_DiscreteOrderItemImpl.java |
3,045 | class WrappedTermsConsumer extends TermsConsumer {
private TermsConsumer delegateTermsConsumer;
private BloomFilter bloomFilter;
public WrappedTermsConsumer(TermsConsumer termsConsumer, BloomFilter bloomFilter) {
this.delegateTermsConsumer = termsConsumer;
this.bloomFilter = bloomFilter;
}
@Override
public PostingsConsumer startTerm(BytesRef text) throws IOException {
return delegateTermsConsumer.startTerm(text);
}
@Override
public void finishTerm(BytesRef text, TermStats stats) throws IOException {
// Record this term in our BloomFilter
if (stats.docFreq > 0) {
bloomFilter.put(text);
}
delegateTermsConsumer.finishTerm(text, stats);
}
@Override
public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount)
throws IOException {
delegateTermsConsumer.finish(sumTotalTermFreq, sumDocFreq, docCount);
}
@Override
public Comparator<BytesRef> getComparator() throws IOException {
return delegateTermsConsumer.getComparator();
}
} | 0true
| src_main_java_org_elasticsearch_index_codec_postingsformat_BloomFilterPostingsFormat.java |
289 | public class CassandraHelper {
public static List<ByteBuffer> convert(List<StaticBuffer> keys) {
List<ByteBuffer> requestKeys = new ArrayList<ByteBuffer>(keys.size());
for (int i = 0; i < keys.size(); i++) {
requestKeys.add(keys.get(i).asByteBuffer());
}
return requestKeys;
}
/**
* Constructs an {@link EntryList} from the Iterable of entries while excluding the end slice
* (since the method contract states that the end slice is exclusive, yet Cassandra treats it as
* inclusive) and respecting the limit.
*
* @param entries
* @param getter
* @param lastColumn TODO: make this StaticBuffer so we can avoid the conversion and provide equals method
* @param limit
* @param <E>
* @return
*/
public static<E> EntryList makeEntryList(final Iterable<E> entries,
final StaticArrayEntry.GetColVal<E,ByteBuffer> getter,
final StaticBuffer lastColumn, final int limit) {
return StaticArrayEntryList.ofByteBuffer(new Iterable<E>() {
@Override
public Iterator<E> iterator() {
return Iterators.filter(entries.iterator(),new FilterResultColumns<E>(lastColumn,limit,getter));
}
},getter);
}
private static class FilterResultColumns<E> implements Predicate<E> {
private int count = 0;
private final int limit;
private final StaticBuffer lastColumn;
private final StaticArrayEntry.GetColVal<E,ByteBuffer> getter;
private FilterResultColumns(StaticBuffer lastColumn, int limit, StaticArrayEntry.GetColVal<E, ByteBuffer> getter) {
this.limit = limit;
this.lastColumn = lastColumn;
this.getter = getter;
}
@Override
public boolean apply(@Nullable E e) {
assert e!=null;
if (count>=limit || BufferUtil.equals(lastColumn, getter.getColumn(e))) return false;
count++;
return true;
}
}
public static<E> Iterator<Entry> makeEntryIterator(final Iterable<E> entries,
final StaticArrayEntry.GetColVal<E,ByteBuffer> getter,
final StaticBuffer lastColumn, final int limit) {
return Iterators.transform(Iterators.filter(entries.iterator(),
new FilterResultColumns<E>(lastColumn, limit, getter)), new Function<E, Entry>() {
@Nullable
@Override
public Entry apply(@Nullable E e) {
return StaticArrayEntry.ofByteBuffer(e,getter);
}
});
}
public static KeyRange transformRange(Range<Token> range) {
return transformRange(range.left, range.right);
}
public static KeyRange transformRange(Token<?> leftKeyExclusive, Token<?> rightKeyInclusive) {
if (!(leftKeyExclusive instanceof BytesToken))
throw new UnsupportedOperationException();
// if left part is BytesToken, right part should be too, otherwise there is no sense in the ring
assert rightKeyInclusive instanceof BytesToken;
// l is exclusive, r is inclusive
BytesToken l = (BytesToken) leftKeyExclusive;
BytesToken r = (BytesToken) rightKeyInclusive;
Preconditions.checkArgument(l.token.length == r.token.length, "Tokens have unequal length");
int tokenLength = l.token.length;
byte[][] tokens = new byte[][]{l.token, r.token};
byte[][] plusOne = new byte[2][tokenLength];
for (int j = 0; j < 2; j++) {
boolean carry = true;
for (int i = tokenLength - 1; i >= 0; i--) {
byte b = tokens[j][i];
if (carry) {
b++;
carry = false;
}
if (b == 0) carry = true;
plusOne[j][i] = b;
}
}
StaticBuffer lb = StaticArrayBuffer.of(plusOne[0]);
StaticBuffer rb = StaticArrayBuffer.of(plusOne[1]);
Preconditions.checkArgument(lb.length() == tokenLength, lb.length());
Preconditions.checkArgument(rb.length() == tokenLength, rb.length());
return new KeyRange(lb, rb);
}
} | 0true
| titan-cassandra_src_main_java_com_thinkaurelius_titan_diskstorage_cassandra_utils_CassandraHelper.java |
309 | static final class Fields {
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString TIMED_OUT = new XContentBuilderString("timed_out");
static final XContentBuilderString NUMBER_OF_NODES = new XContentBuilderString("number_of_nodes");
static final XContentBuilderString NUMBER_OF_DATA_NODES = new XContentBuilderString("number_of_data_nodes");
static final XContentBuilderString ACTIVE_PRIMARY_SHARDS = new XContentBuilderString("active_primary_shards");
static final XContentBuilderString ACTIVE_SHARDS = new XContentBuilderString("active_shards");
static final XContentBuilderString RELOCATING_SHARDS = new XContentBuilderString("relocating_shards");
static final XContentBuilderString INITIALIZING_SHARDS = new XContentBuilderString("initializing_shards");
static final XContentBuilderString UNASSIGNED_SHARDS = new XContentBuilderString("unassigned_shards");
static final XContentBuilderString VALIDATION_FAILURES = new XContentBuilderString("validation_failures");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_health_ClusterHealthResponse.java |
1,569 | class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance);
if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) {
logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance);
ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance;
}
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_decider_ConcurrentRebalanceAllocationDecider.java |
3,592 | public static class CustomLongNumericField extends CustomNumericField {
private final long number;
private final NumberFieldMapper mapper;
public CustomLongNumericField(NumberFieldMapper mapper, long number, FieldType fieldType) {
super(mapper, number, fieldType);
this.mapper = mapper;
this.number = number;
}
@Override
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
if (fieldType().indexed()) {
return mapper.popCachedStream().setLongValue(number);
}
return null;
}
@Override
public String numericAsString() {
return Long.toString(number);
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_core_LongFieldMapper.java |
48 | {
@Override
public void masterIsElected( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void masterIsAvailable( HighAvailabilityMemberChangeEvent event )
{
if ( event.getOldState().equals( HighAvailabilityMemberState.TO_MASTER ) && event.getNewState().equals(
HighAvailabilityMemberState.MASTER ) )
{
doAfterRecoveryAndStartup( true );
}
}
@Override
public void slaveIsAvailable( HighAvailabilityMemberChangeEvent event )
{
if ( event.getOldState().equals( HighAvailabilityMemberState.TO_SLAVE ) && event.getNewState().equals(
HighAvailabilityMemberState.SLAVE ) )
{
doAfterRecoveryAndStartup( false );
}
}
@Override
public void instanceStops( HighAvailabilityMemberChangeEvent event )
{
}
private void doAfterRecoveryAndStartup( boolean isMaster )
{
try
{
synchronized ( xaDataSourceManager )
{
HighlyAvailableGraphDatabase.this.doAfterRecoveryAndStartup( isMaster );
}
}
catch ( Throwable throwable )
{
msgLog.error( "Post recovery error", throwable );
try
{
memberStateMachine.stop();
}
catch ( Throwable throwable1 )
{
msgLog.warn( "Could not stop", throwable1 );
}
try
{
memberStateMachine.start();
}
catch ( Throwable throwable1 )
{
msgLog.warn( "Could not start", throwable1 );
}
}
}
} ); | 1no label
| enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java |
2,146 | public static class Entry {
private final String name;
private final FastStringReader reader;
private final int startOffset;
private final float boost;
public Entry(String name, FastStringReader reader, int startOffset, float boost) {
this.name = name;
this.reader = reader;
this.startOffset = startOffset;
this.boost = boost;
}
public int startOffset() {
return startOffset;
}
public String name() {
return this.name;
}
public float boost() {
return this.boost;
}
public FastStringReader reader() {
return this.reader;
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_all_AllEntries.java |
116 | public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
* @return the new worker thread
* @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
} | 0true
| src_main_java_jsr166e_ForkJoinPool.java |
232 | public interface ModuleConfiguration extends Serializable {
public Long getId();
public void setId(Long id);
public String getModuleName();
public void setModuleName(String name);
public void setActiveStartDate(Date startDate);
public Date getActiveStartDate();
public void setActiveEndDate(Date startDate);
public Date getActiveEndDate();
public void setIsDefault(Boolean isDefault);
public Boolean getIsDefault();
public void setPriority(Integer priority);
public Integer getPriority();
public ModuleConfigurationType getModuleConfigurationType();
public void setAuditable(Auditable auditable);
public Auditable getAuditable();
} | 0true
| common_src_main_java_org_broadleafcommerce_common_config_domain_ModuleConfiguration.java |
1,007 | public class OStreamSerializerRecord implements OStreamSerializer {
public static final String NAME = "l";
public static final OStreamSerializerRecord INSTANCE = new OStreamSerializerRecord();
public String getName() {
return NAME;
}
/**
* Re-Create any object if the class has a public constructor that accepts a String as unique parameter.
*/
public Object fromStream(final byte[] iStream) throws IOException {
if (iStream == null || iStream.length == 0)
// NULL VALUE
return null;
final ORecordInternal<?> obj = Orient.instance().getRecordFactoryManager().newInstance();
final ORID rid = new ORecordId().fromStream(iStream);
obj.setIdentity(rid.getClusterId(), rid.getClusterPosition());
return obj;
}
public byte[] toStream(final Object iObject) throws IOException {
if (iObject == null)
return null;
if (((ORecord<?>) iObject).getIdentity() == null)
throw new OSerializationException("Cannot serialize record without identity. Store it before to serialize.");
return ((ORecord<?>) iObject).getIdentity().toStream();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_serializer_stream_OStreamSerializerRecord.java |
1,508 | @Component("blCartStateFilter")
/**
* <p>
* This filter should be configured after the BroadleafCommerce CustomerStateFilter listener from Spring Security.
* Retrieves the cart for the current BroadleafCommerce Customer based using the authenticated user OR creates an empty non-modifiable cart and
* stores it in the request.
* </p>
*
* @author bpolster
*/
public class CartStateFilter extends GenericFilterBean implements Ordered {
/** Logger for this class and subclasses */
protected final Log LOG = LogFactory.getLog(getClass());
@Resource(name = "blCartStateRequestProcessor")
protected CartStateRequestProcessor cartStateProcessor;
@Override
@SuppressWarnings("unchecked")
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
cartStateProcessor.process(new ServletWebRequest((HttpServletRequest) request, (HttpServletResponse)response));
chain.doFilter(request, response);
}
@Override
public int getOrder() {
//FilterChainOrder has been dropped from Spring Security 3
//return FilterChainOrder.REMEMBER_ME_FILTER+1;
return 1502;
}
} | 1no label
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_order_security_CartStateFilter.java |
719 | public class DeleteRequest extends ShardReplicationOperationRequest<DeleteRequest> {
private String type;
private String id;
@Nullable
private String routing;
private boolean refresh;
private long version;
private VersionType versionType = VersionType.INTERNAL;
/**
* Constructs a new delete request against the specified index. The {@link #type(String)} and {@link #id(String)}
* must be set.
*/
public DeleteRequest(String index) {
this.index = index;
}
/**
* Constructs a new delete request against the specified index with the type and id.
*
* @param index The index to get the document from
* @param type The type of the document
* @param id The id of the document
*/
public DeleteRequest(String index, String type, String id) {
this.index = index;
this.type = type;
this.id = id;
}
public DeleteRequest(DeleteRequest request) {
super(request);
this.type = request.type();
this.id = request.id();
this.routing = request.routing();
this.refresh = request.refresh();
this.version = request.version();
this.versionType = request.versionType();
}
public DeleteRequest() {
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (type == null) {
validationException = addValidationError("type is missing", validationException);
}
if (id == null) {
validationException = addValidationError("id is missing", validationException);
}
return validationException;
}
/**
* The type of the document to delete.
*/
public String type() {
return type;
}
/**
* Sets the type of the document to delete.
*/
public DeleteRequest type(String type) {
this.type = type;
return this;
}
/**
* The id of the document to delete.
*/
public String id() {
return id;
}
/**
* Sets the id of the document to delete.
*/
public DeleteRequest id(String id) {
this.id = id;
return this;
}
/**
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
* used for routing with delete requests.
*/
public DeleteRequest parent(String parent) {
if (routing == null) {
routing = parent;
}
return this;
}
/**
* Controls the shard routing of the request. Using this value to hash the shard
* and not the id.
*/
public DeleteRequest routing(String routing) {
if (routing != null && routing.length() == 0) {
this.routing = null;
} else {
this.routing = routing;
}
return this;
}
/**
* Controls the shard routing of the delete request. Using this value to hash the shard
* and not the id.
*/
public String routing() {
return this.routing;
}
/**
* Should a refresh be executed post this index operation causing the operation to
* be searchable. Note, heavy indexing should not set this to <tt>true</tt>. Defaults
* to <tt>false</tt>.
*/
public DeleteRequest refresh(boolean refresh) {
this.refresh = refresh;
return this;
}
public boolean refresh() {
return this.refresh;
}
/**
* Sets the version, which will cause the delete operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public DeleteRequest version(long version) {
this.version = version;
return this;
}
public long version() {
return this.version;
}
public DeleteRequest versionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
public VersionType versionType() {
return this.versionType;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
type = in.readSharedString();
id = in.readString();
routing = in.readOptionalString();
refresh = in.readBoolean();
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeSharedString(type);
out.writeString(id);
out.writeOptionalString(routing());
out.writeBoolean(refresh);
out.writeLong(version);
out.writeByte(versionType.getValue());
}
@Override
public String toString() {
return "delete {[" + index + "][" + type + "][" + id + "]}";
}
} | 1no label
| src_main_java_org_elasticsearch_action_delete_DeleteRequest.java |
155 | archiveStructuredContentTemplate.send(archiveStructuredContentDestination, new MessageCreator() {
public Message createMessage(Session session) throws JMSException {
HashMap<String, String> objectMap = new HashMap<String,String>(2);
objectMap.put("nameKey", baseNameKey);
objectMap.put("typeKey", baseTypeKey);
return session.createObjectMessage(objectMap);
}
}); | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_message_jms_JMSArchivedStructuredContentPublisher.java |
1,269 | @SuppressWarnings("serial")
public class OStorageConfigurationSegment extends OStorageConfiguration {
private static final int START_SIZE = 10000;
private OSingleFileSegment segment;
public OStorageConfigurationSegment(final OStorageLocalAbstract iStorage) throws IOException {
super(iStorage);
segment = new OSingleFileSegment((OStorageLocalAbstract) storage, new OStorageFileConfiguration(null, getDirectory()
+ "/database.ocf", "classic", fileTemplate.maxSize, fileTemplate.fileIncrementSize));
}
public void close() throws IOException {
segment.close();
}
public void create() throws IOException {
segment.create(START_SIZE);
super.create();
}
@Override
public OStorageConfiguration load() throws OSerializationException {
try {
if (segment.getFile().exists())
segment.open();
else {
segment.create(START_SIZE);
// @COMPATIBILITY0.9.25
// CHECK FOR OLD VERSION OF DATABASE
final ORawBuffer rawRecord = storage.readRecord(CONFIG_RID, null, false, null, false).getResult();
if (rawRecord != null)
fromStream(rawRecord.buffer);
update();
return this;
}
final int size = segment.getFile().readInt(0);
byte[] buffer = new byte[size];
segment.getFile().read(OBinaryProtocol.SIZE_INT, buffer, size);
fromStream(buffer);
} catch (Exception e) {
throw new OSerializationException("Cannot load database's configuration. The database seems to be corrupted.", e);
}
return this;
}
@Override
public void update() throws OSerializationException {
try {
final OFile f = segment.getFile();
if (!f.isOpen())
return;
final byte[] buffer = toStream();
final int len = buffer.length + OBinaryProtocol.SIZE_INT;
if (len > f.getFilledUpTo())
f.allocateSpace(len - f.getFilledUpTo());
f.writeInt(0, buffer.length);
f.write(OBinaryProtocol.SIZE_INT, buffer);
} catch (Exception e) {
throw new OSerializationException("Error on update storage configuration", e);
}
}
public void synch() throws IOException {
segment.getFile().synch();
}
@Override
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
segment.getFile().setSoftlyClosed(softlyClosed);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OStorageConfigurationSegment.java |
898 | public interface ORecordListener {
public enum EVENT {
CLEAR, RESET, MARSHALL, UNMARSHALL, UNLOAD, IDENTITY_CHANGED
}
public void onEvent(ORecord<?> iDocument, EVENT iEvent);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_record_ORecordListener.java |
1,454 | public static enum OpType {
AND,
OR
} | 0true
| src_main_java_org_elasticsearch_cluster_node_DiscoveryNodeFilters.java |
499 | public interface Site extends Serializable {
/**
* Unique/internal id for a site.
* @return
*/
public Long getId();
/**
* Sets the internal id for a site.
* @param id
*/
public void setId(Long id);
/**
* The display name for a site.
* @return
*/
public String getName();
/**
* Sets the displayName for a site.
* @param name
*/
public void setName(String name);
/**
* @deprecated use {@link #getSiteResolutionType()}
* Intended to be used along with the #getSiteIdentifierValue()
* by the SiteResolver to determine if this is the current site.
*
* @return
*/
public String getSiteIdentifierType();
/**
* @deprecated Use {@link #setSiteResolutionType(SiteResolutionType)}
* Sets the site identifier type.
* @see #getSiteIdentifierType()
* @param siteIdentifierType
*/
public void setSiteIdentifierType(String siteIdentifierType);
/**
* Used along with {@link #getSiteResolutionType()} to determine the current
* Site for a given request.
*
* @return
*/
public String getSiteIdentifierValue();
/**
*
* @param siteIdentifierValue
*/
public void setSiteIdentifierValue(String siteIdentifierValue);
/**
* If null, then this is a single-site installation. Otherwise,
* each site must define it's production sandbox so that data can
* be properly segmented.
*
* @return
*/
public SandBox getProductionSandbox();
/**
* Sets the production sandbox. Typically configured via the
* database.
*
* @see #getProductionSandbox();
* @param sandbox
*/
public void setProductionSandbox(SandBox sandbox);
/**
* Intended to be used along with the #getSiteIdentifierValue()
* by an implementation of SiteResolver to determine
* if this is the current site.
*
* @return
*/
public SiteResolutionType getSiteResolutionType();
/**
* Sets the site resolution type.
* @see #getSiteResolutionType()
* @param siteResolutionType
*/
public void setSiteResolutionType(SiteResolutionType siteResolutionType);
/**
* Retrieve a list of product, category and offer groupings that
* this site has access to
*
* @return a list of catalog groupings
*/
public List<Catalog> getCatalogs();
/**
* Set the list of product, category and offer groupings that
* this site has access to
*
* @param catalogs a list of catalog groupings
*/
public void setCatalogs(List<Catalog> catalogs);
/**
* Retrieve an deep copy of this site. Not bound by
* entity manager scope.
*
* @return a deep copy of this site
*/
public Site clone();
public boolean isDeactivated();
public void setDeactivated(boolean deactivated);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_site_domain_Site.java |
1,390 | public abstract class HibernateTestSupport {
private final ILogger logger = Logger.getLogger(getClass());
@BeforeClass
@AfterClass
public static void tearUpAndDown() {
Hazelcast.shutdownAll();
}
@After
public final void cleanup() {
Hazelcast.shutdownAll();
}
protected void sleep(int seconds) {
try {
Thread.sleep(1000 * seconds);
} catch (InterruptedException e) {
logger.warning("", e);
}
}
protected static SessionFactory createSessionFactory(Properties props) {
Configuration conf = new Configuration();
URL xml = HibernateTestSupport.class.getClassLoader().getResource("test-hibernate.cfg.xml");
props.put(CacheEnvironment.EXPLICIT_VERSION_CHECK, "true");
conf.addProperties(props);
conf.configure(xml);
final SessionFactory sf = conf.buildSessionFactory();
sf.getStatistics().setStatisticsEnabled(true);
return sf;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_test_java_com_hazelcast_hibernate_HibernateTestSupport.java |
2,823 | public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<BulgarianAnalyzer> {
private final BulgarianAnalyzer analyzer;
@Inject
public BulgarianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
analyzer = new BulgarianAnalyzer(version,
Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet(), version),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
}
@Override
public BulgarianAnalyzer get() {
return this.analyzer;
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_BulgarianAnalyzerProvider.java |
2,959 | public class SortedIndexStore implements IndexStore {
private static final float LOAD_FACTOR = 0.75f;
private final ConcurrentMap<Comparable, ConcurrentMap<Data, QueryableEntry>> mapRecords
= new ConcurrentHashMap<Comparable, ConcurrentMap<Data, QueryableEntry>>(1000);
private final NavigableSet<Comparable> sortedSet = new ConcurrentSkipListSet<Comparable>();
@Override
public void getSubRecordsBetween(MultiResultSet results, Comparable from, Comparable to) {
Set<Comparable> values = sortedSet.subSet(from, to);
for (Comparable value : values) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(value);
if (records != null) {
results.addResultSet(records);
}
}
// to wasn't included so include now
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(to);
if (records != null) {
results.addResultSet(records);
}
}
@Override
public void getSubRecords(MultiResultSet results, ComparisonType comparisonType, Comparable searchedValue) {
Set<Comparable> values;
boolean notEqual = false;
switch (comparisonType) {
case LESSER:
values = sortedSet.headSet(searchedValue, false);
break;
case LESSER_EQUAL:
values = sortedSet.headSet(searchedValue, true);
break;
case GREATER:
values = sortedSet.tailSet(searchedValue, false);
break;
case GREATER_EQUAL:
values = sortedSet.tailSet(searchedValue, true);
break;
case NOT_EQUAL:
values = sortedSet;
notEqual = true;
break;
default:
throw new IllegalArgumentException("Unrecognized comparisonType:" + comparisonType);
}
for (Comparable value : values) {
if (notEqual && searchedValue.equals(value)) {
// skip this value if predicateType is NOT_EQUAL
continue;
}
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(value);
if (records != null) {
results.addResultSet(records);
}
}
}
@Override
public void newIndex(Comparable newValue, QueryableEntry record) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(newValue);
if (records == null) {
records = new ConcurrentHashMap<Data, QueryableEntry>(1, LOAD_FACTOR, 1);
mapRecords.put(newValue, records);
if (!(newValue instanceof IndexImpl.NullObject)) {
sortedSet.add(newValue);
}
}
records.put(record.getIndexKey(), record);
}
@Override
public ConcurrentMap<Data, QueryableEntry> getRecordMap(Comparable indexValue) {
return mapRecords.get(indexValue);
}
@Override
public void clear() {
mapRecords.clear();
sortedSet.clear();
}
@Override
public void removeIndex(Comparable oldValue, Data indexKey) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(oldValue);
if (records != null) {
records.remove(indexKey);
if (records.size() == 0) {
mapRecords.remove(oldValue);
sortedSet.remove(oldValue);
}
}
}
@Override
public Set<QueryableEntry> getRecords(Comparable value) {
return new SingleResultSet(mapRecords.get(value));
}
@Override
public void getRecords(MultiResultSet results, Set<Comparable> values) {
for (Comparable value : values) {
ConcurrentMap<Data, QueryableEntry> records = mapRecords.get(value);
if (records != null) {
results.addResultSet(records);
}
}
}
@Override
public String toString() {
return "SortedIndexStore{"
+ "mapRecords=" + mapRecords.size()
+ '}';
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_query_impl_SortedIndexStore.java |
1,068 | public class TransportSingleShardMultiTermsVectorAction extends TransportShardSingleOperationAction<MultiTermVectorsShardRequest, MultiTermVectorsShardResponse> {
private final IndicesService indicesService;
@Inject
public TransportSingleShardMultiTermsVectorAction(Settings settings, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, ThreadPool threadPool) {
super(settings, threadPool, clusterService, transportService);
this.indicesService = indicesService;
}
@Override
protected String executor() {
return ThreadPool.Names.GET;
}
@Override
protected String transportAction() {
return MultiTermVectorsAction.NAME + "/shard";
}
@Override
protected MultiTermVectorsShardRequest newRequest() {
return new MultiTermVectorsShardRequest();
}
@Override
protected MultiTermVectorsShardResponse newResponse() {
return new MultiTermVectorsShardResponse();
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, MultiTermVectorsShardRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, MultiTermVectorsShardRequest request) {
return state.blocks().indexBlockedException(ClusterBlockLevel.READ, request.index());
}
@Override
protected ShardIterator shards(ClusterState state, MultiTermVectorsShardRequest request) {
return clusterService.operationRouting()
.getShards(clusterService.state(), request.index(), request.shardId(), request.preference());
}
@Override
protected void resolveRequest(ClusterState state, MultiTermVectorsShardRequest request) {
// no need to set concrete index and routing here, it has already been set by the multi term vectors action on the item
// request.index(state.metaData().concreteIndex(request.index()));
}
@Override
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, int shardId) throws ElasticsearchException {
MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
for (int i = 0; i < request.locations.size(); i++) {
TermVectorRequest termVectorRequest = request.requests.get(i);
try {
IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(shardId);
TermVectorResponse termVectorResponse = indexShard.termVectorService().getTermVector(termVectorRequest);
response.add(request.locations.get(i), termVectorResponse);
} catch (Throwable t) {
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
logger.debug("[{}][{}] failed to execute multi term vectors for [{}]/[{}]", t, request.index(), shardId, termVectorRequest.type(), termVectorRequest.id());
response.add(request.locations.get(i),
new MultiTermVectorsResponse.Failure(request.index(), termVectorRequest.type(), termVectorRequest.id(), ExceptionsHelper.detailedMessage(t)));
}
}
}
return response;
}
} | 0true
| src_main_java_org_elasticsearch_action_termvector_TransportSingleShardMultiTermsVectorAction.java |
912 | threadPool.generic().execute(new Runnable() {
@Override
public void run() {
ActionListener<T> lst = (ActionListener<T>) listener;
try {
lst.onResponse(actionGet());
} catch (ElasticsearchException e) {
lst.onFailure(e);
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_support_AbstractListenableActionFuture.java |
2,924 | public class PreBuiltCharFilterFactoryFactoryTests {
@Test
public void testThatDifferentVersionsCanBeLoaded() {
PreBuiltCharFilterFactoryFactory factory = new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT));
CharFilterFactory emptySettingsTokenizerFactory = factory.create("html_strip", ImmutableSettings.EMPTY);
CharFilterFactory former090TokenizerFactory = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
CharFilterFactory former090TokenizerFactoryCopy = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
CharFilterFactory currentTokenizerFactory = factory.create("html_strip", ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
assertThat(emptySettingsTokenizerFactory, is(currentTokenizerFactory));
assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactory));
assertThat(emptySettingsTokenizerFactory, is(former090TokenizerFactoryCopy));
}
} | 0true
| src_test_java_org_elasticsearch_index_analysis_PreBuiltCharFilterFactoryFactoryTests.java |
1,346 | public class OWriteAheadLog {
private static final long ONE_KB = 1024L;
public static final String MASTER_RECORD_EXTENSION = ".wmr";
public static final String WAL_SEGMENT_EXTENSION = ".wal";
private OLogSequenceNumber lastCheckpoint;
private final Object syncObject = new Object();
private final List<LogSegment> logSegments = new ArrayList<LogSegment>();
private boolean useFirstMasterRecord = true;
private final int maxPagesCacheSize;
private final int commitDelay;
private final long maxSegmentSize;
private final long maxLogSize;
private long logSize;
private final File walLocation;
private File masterRecordFile;
private final RandomAccessFile masterRecordLSNHolder;
private OLogSequenceNumber firstMasterRecord;
private OLogSequenceNumber secondMasterRecord;
private volatile OLogSequenceNumber flushedLsn;
private final OStorageLocalAbstract storage;
private boolean closed;
private static String calculateWalPath(OStorageLocalAbstract storage) {
String walPath = OGlobalConfiguration.WAL_LOCATION.getValueAsString();
if (walPath == null)
walPath = storage.getStoragePath();
return walPath;
}
public OWriteAheadLog(OStorageLocalAbstract storage) throws IOException {
this(OGlobalConfiguration.WAL_CACHE_SIZE.getValueAsInteger(), OGlobalConfiguration.WAL_COMMIT_TIMEOUT.getValueAsInteger(),
OGlobalConfiguration.WAL_MAX_SEGMENT_SIZE.getValueAsInteger() * ONE_KB * ONE_KB, OGlobalConfiguration.WAL_MAX_SIZE
.getValueAsInteger() * ONE_KB * ONE_KB, storage);
}
public OWriteAheadLog(int maxPagesCacheSize, int commitDelay, long maxSegmentSize, long maxLogSize, OStorageLocalAbstract storage)
throws IOException {
this.maxPagesCacheSize = maxPagesCacheSize;
this.commitDelay = commitDelay;
this.maxSegmentSize = maxSegmentSize;
this.maxLogSize = maxLogSize;
this.storage = storage;
try {
this.walLocation = new File(calculateWalPath(this.storage));
File[] walFiles = this.walLocation.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return validateName(name);
}
});
if (walFiles == null)
throw new IllegalStateException(
"Location passed in WAL does not exist, or IO error was happened. DB can not work in durable mode in such case.");
if (walFiles.length == 0) {
LogSegment logSegment = new LogSegment(new File(this.walLocation, getSegmentName(0)), maxPagesCacheSize);
logSegment.init();
logSegment.startFlush();
logSegments.add(logSegment);
logSize = 0;
flushedLsn = null;
} else {
for (File walFile : walFiles) {
LogSegment logSegment = new LogSegment(walFile, maxPagesCacheSize);
logSegment.init();
logSegments.add(logSegment);
logSize += logSegment.filledUpTo();
}
Collections.sort(logSegments);
logSegments.get(logSegments.size() - 1).startFlush();
flushedLsn = readFlushedLSN();
}
masterRecordFile = new File(walLocation, this.storage.getName() + MASTER_RECORD_EXTENSION);
masterRecordLSNHolder = new RandomAccessFile(masterRecordFile, "rws");
if (masterRecordLSNHolder.length() > 0) {
firstMasterRecord = readMasterRecord(this.storage.getName(), 0);
secondMasterRecord = readMasterRecord(this.storage.getName(), 1);
if (firstMasterRecord == null) {
useFirstMasterRecord = true;
lastCheckpoint = secondMasterRecord;
} else if (secondMasterRecord == null) {
useFirstMasterRecord = false;
lastCheckpoint = firstMasterRecord;
} else {
if (firstMasterRecord.compareTo(secondMasterRecord) >= 0) {
lastCheckpoint = firstMasterRecord;
useFirstMasterRecord = false;
} else {
lastCheckpoint = secondMasterRecord;
useFirstMasterRecord = true;
}
}
}
fixMasterRecords();
} catch (FileNotFoundException e) {
// never happened
OLogManager.instance().error(this, "Error during file initialization for storage %s", e, this.storage.getName());
throw new IllegalStateException("Error during file initialization for storage " + this.storage.getName(), e);
}
}
public File getWalLocation() {
return walLocation;
}
public OLogSequenceNumber begin() throws IOException {
synchronized (syncObject) {
checkForClose();
LogSegment first = logSegments.get(0);
if (first.filledUpTo() == 0)
return null;
return first.begin();
}
}
public OLogSequenceNumber end() throws IOException {
synchronized (syncObject) {
checkForClose();
int lastIndex = logSegments.size() - 1;
LogSegment last = logSegments.get(lastIndex);
while (last.filledUpTo == 0) {
lastIndex--;
if (lastIndex >= 0)
last = logSegments.get(lastIndex);
else
return null;
}
return last.end();
}
}
public void flush() {
synchronized (syncObject) {
checkForClose();
LogSegment last = logSegments.get(logSegments.size() - 1);
last.flush();
}
}
private void fixMasterRecords() throws IOException {
if (firstMasterRecord != null) {
int index = (int) (firstMasterRecord.getSegment() - logSegments.get(0).getOrder());
if (logSegments.size() <= index || index < 0) {
firstMasterRecord = null;
} else {
LogSegment firstMasterRecordSegment = logSegments.get(index);
if (firstMasterRecordSegment.filledUpTo() <= firstMasterRecord.getPosition())
firstMasterRecord = null;
}
}
if (secondMasterRecord != null) {
int index = (int) (secondMasterRecord.getSegment() - logSegments.get(0).getOrder());
if (logSegments.size() <= index || index < 0) {
secondMasterRecord = null;
} else {
LogSegment secondMasterRecordSegment = logSegments.get(index);
if (secondMasterRecordSegment.filledUpTo() <= secondMasterRecord.getPosition())
secondMasterRecord = null;
}
}
if (firstMasterRecord != null && secondMasterRecord != null)
return;
if (firstMasterRecord == null && secondMasterRecord == null) {
masterRecordLSNHolder.setLength(0);
masterRecordLSNHolder.getFD().sync();
lastCheckpoint = null;
} else {
if (secondMasterRecord == null)
secondMasterRecord = firstMasterRecord;
else
firstMasterRecord = secondMasterRecord;
lastCheckpoint = firstMasterRecord;
writeMasterRecord(0, firstMasterRecord);
writeMasterRecord(1, secondMasterRecord);
}
}
private OLogSequenceNumber readMasterRecord(String storageName, int index) throws IOException {
CRC32 crc32 = new CRC32();
try {
masterRecordLSNHolder.seek(index * (OIntegerSerializer.INT_SIZE + 2 * OLongSerializer.LONG_SIZE));
int firstCRC = masterRecordLSNHolder.readInt();
long segment = masterRecordLSNHolder.readLong();
long position = masterRecordLSNHolder.readLong();
byte[] serializedLSN = new byte[2 * OLongSerializer.LONG_SIZE];
OLongSerializer.INSTANCE.serialize(segment, serializedLSN, 0);
OLongSerializer.INSTANCE.serialize(position, serializedLSN, OLongSerializer.LONG_SIZE);
crc32.update(serializedLSN);
if (firstCRC != ((int) crc32.getValue())) {
OLogManager.instance().error(this, "Can not restore %d WAL master record for storage %s crc check is failed", index,
storageName);
return null;
}
return new OLogSequenceNumber(segment, position);
} catch (EOFException eofException) {
OLogManager.instance().warn(this, "Can not restore %d WAL master record for storage %s", index, storageName);
return null;
}
}
private void writeMasterRecord(int index, OLogSequenceNumber masterRecord) throws IOException {
masterRecordLSNHolder.seek(index * (OIntegerSerializer.INT_SIZE + 2 * OLongSerializer.LONG_SIZE));
CRC32 crc32 = new CRC32();
byte[] serializedLSN = new byte[2 * OLongSerializer.LONG_SIZE];
OLongSerializer.INSTANCE.serialize(masterRecord.getSegment(), serializedLSN, 0);
OLongSerializer.INSTANCE.serialize(masterRecord.getPosition(), serializedLSN, OLongSerializer.LONG_SIZE);
crc32.update(serializedLSN);
masterRecordLSNHolder.writeInt((int) crc32.getValue());
masterRecordLSNHolder.writeLong(masterRecord.getSegment());
masterRecordLSNHolder.writeLong(masterRecord.getPosition());
}
private String getSegmentName(long order) {
return storage.getName() + "." + order + WAL_SEGMENT_EXTENSION;
}
public OLogSequenceNumber logFuzzyCheckPointStart() throws IOException {
synchronized (syncObject) {
checkForClose();
OFuzzyCheckpointStartRecord record = new OFuzzyCheckpointStartRecord(lastCheckpoint);
log(record);
return record.getLsn();
}
}
public OLogSequenceNumber logFuzzyCheckPointEnd() throws IOException {
synchronized (syncObject) {
checkForClose();
OFuzzyCheckpointEndRecord record = new OFuzzyCheckpointEndRecord();
log(record);
return record.getLsn();
}
}
public OLogSequenceNumber log(OWALRecord record) throws IOException {
synchronized (syncObject) {
checkForClose();
final byte[] serializedForm = OWALRecordsFactory.INSTANCE.toStream(record);
LogSegment last = logSegments.get(logSegments.size() - 1);
long lastSize = last.filledUpTo();
final OLogSequenceNumber lsn = last.logRecord(serializedForm);
record.setLsn(lsn);
if (record.isUpdateMasterRecord()) {
lastCheckpoint = lsn;
if (useFirstMasterRecord) {
firstMasterRecord = lsn;
writeMasterRecord(0, firstMasterRecord);
useFirstMasterRecord = false;
} else {
secondMasterRecord = lsn;
writeMasterRecord(1, secondMasterRecord);
useFirstMasterRecord = true;
}
}
final long sizeDiff = last.filledUpTo() - lastSize;
logSize += sizeDiff;
if (logSize >= maxLogSize) {
LogSegment first = logSegments.get(0);
first.stopFlush(false);
logSize -= first.filledUpTo();
first.delete(false);
logSegments.remove(0);
fixMasterRecords();
}
if (last.filledUpTo() >= maxSegmentSize) {
last.stopFlush(true);
last = new LogSegment(new File(walLocation, getSegmentName(last.getOrder() + 1)), maxPagesCacheSize);
last.init();
last.startFlush();
logSegments.add(last);
}
return lsn;
}
}
public long size() {
synchronized (syncObject) {
return logSize;
}
}
public void shrinkTill(OLogSequenceNumber lsn) throws IOException {
if (lsn == null)
return;
synchronized (syncObject) {
ListIterator<LogSegment> iterator = logSegments.listIterator(logSegments.size());
while (iterator.hasPrevious()) {
final LogSegment logSegment = iterator.previous();
if (logSegment.end() == null || logSegment.end().compareTo(lsn) >= 0)
continue;
logSegment.delete(false);
iterator.remove();
}
}
}
public void close() throws IOException {
close(true);
}
public void close(boolean flush) throws IOException {
synchronized (syncObject) {
if (closed)
return;
closed = true;
for (LogSegment logSegment : logSegments)
logSegment.close(flush);
masterRecordLSNHolder.close();
}
}
private void checkForClose() {
if (closed)
throw new OStorageException("WAL log " + walLocation + " has been closed");
}
public void delete() throws IOException {
delete(false);
}
public void delete(boolean flush) throws IOException {
synchronized (syncObject) {
close(flush);
for (LogSegment logSegment : logSegments)
logSegment.delete(false);
boolean deleted = masterRecordFile.delete();
while (!deleted) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
deleted = !masterRecordFile.exists() || masterRecordFile.delete();
}
}
}
public void logDirtyPages(Set<ODirtyPage> dirtyPages) throws IOException {
synchronized (syncObject) {
checkForClose();
log(new ODirtyPagesRecord(dirtyPages));
}
}
public OLogSequenceNumber getLastCheckpoint() {
synchronized (syncObject) {
checkForClose();
return lastCheckpoint;
}
}
public OWALRecord read(OLogSequenceNumber lsn) throws IOException {
synchronized (syncObject) {
checkForClose();
long segment = lsn.getSegment();
int index = (int) (segment - logSegments.get(0).getOrder());
if (index < 0 || index >= logSegments.size())
return null;
LogSegment logSegment = logSegments.get(index);
byte[] recordEntry = logSegment.readRecord(lsn);
if (recordEntry == null)
return null;
final OWALRecord record = OWALRecordsFactory.INSTANCE.fromStream(recordEntry);
record.setLsn(lsn);
return record;
}
}
public OLogSequenceNumber next(OLogSequenceNumber lsn) throws IOException {
synchronized (syncObject) {
checkForClose();
long order = lsn.getSegment();
int index = (int) (order - logSegments.get(0).getOrder());
if (index < 0 || index >= logSegments.size())
return null;
LogSegment logSegment = logSegments.get(index);
OLogSequenceNumber nextLSN = logSegment.getNextLSN(lsn);
if (nextLSN == null) {
index++;
if (index >= logSegments.size())
return null;
LogSegment nextSegment = logSegments.get(index);
if (nextSegment.filledUpTo() == 0)
return null;
nextLSN = nextSegment.begin();
}
return nextLSN;
}
}
public OLogSequenceNumber getFlushedLSN() {
synchronized (syncObject) {
checkForClose();
return flushedLsn;
}
}
private OLogSequenceNumber readFlushedLSN() throws IOException {
int segment = logSegments.size() - 1;
while (segment >= 0) {
LogSegment logSegment = logSegments.get(segment);
OLogSequenceNumber flushedLSN = logSegment.readFlushedLSN();
if (flushedLSN == null)
segment--;
else
return flushedLSN;
}
return null;
}
public static boolean validateName(String name) {
if (!name.toLowerCase().endsWith(".wal"))
return false;
int walOrderStartIndex = name.indexOf('.');
if (walOrderStartIndex == name.length() - 4)
return false;
int walOrderEndIndex = name.indexOf('.', walOrderStartIndex + 1);
String walOrder = name.substring(walOrderStartIndex + 1, walOrderEndIndex);
try {
Integer.parseInt(walOrder);
} catch (NumberFormatException e) {
return false;
}
return true;
}
public OLogSequenceNumber logFullCheckpointStart() throws IOException {
return log(new OFullCheckpointStartRecord(lastCheckpoint));
}
public void logFullCheckpointEnd() throws IOException {
synchronized (syncObject) {
checkForClose();
log(new OCheckpointEndRecord());
}
}
private final class LogSegment implements Comparable<LogSegment> {
private final RandomAccessFile rndFile;
private final File file;
private long filledUpTo;
private final long order;
private final int maxPagesCacheSize;
private boolean closed;
private OWALPage currentPage;
private final ConcurrentLinkedQueue<OWALPage> pagesCache = new ConcurrentLinkedQueue<OWALPage>();
private long nextPositionToFlush;
private long flushId;
private final ScheduledExecutorService commitExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName("WAL Flush Task");
return thread;
}
});
private OLogSequenceNumber last = null;
private volatile boolean flushNewData = true;
private LogSegment(File file, int maxPagesCacheSize) throws IOException {
this.file = file;
this.maxPagesCacheSize = maxPagesCacheSize;
order = extractOrder(file.getName());
closed = false;
rndFile = new RandomAccessFile(file, "rw");
}
public void startFlush() {
if (commitDelay > 0)
commitExecutor.scheduleAtFixedRate(new FlushTask(), commitDelay, commitDelay, TimeUnit.MILLISECONDS);
}
public void stopFlush(boolean flush) {
if (flush)
flush();
if (!commitExecutor.isShutdown()) {
commitExecutor.shutdown();
try {
if (!commitExecutor
.awaitTermination(OGlobalConfiguration.WAL_SHUTDOWN_TIMEOUT.getValueAsInteger(), TimeUnit.MILLISECONDS))
throw new OStorageException("WAL flush task for " + getPath() + " segment can not be stopped.");
} catch (InterruptedException e) {
OLogManager.instance().error(this, "Can not shutdown background WAL commit thread.");
}
}
}
public long getOrder() {
return order;
}
public void init() throws IOException {
selfCheck();
initPageCache();
initLastPage();
}
private void initLastPage() throws IOException {
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
long currentPage = pagesCount - 1;
if (currentPage < 0)
return;
do {
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(content);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
try {
final OWALPage page = new OWALPage(pointer, false);
int lastPosition = findLastRecord(page, true);
if (lastPosition > -1) {
last = new OLogSequenceNumber(order, currentPage * OWALPage.PAGE_SIZE + lastPosition);
return;
}
currentPage--;
} finally {
pointer.free();
}
} while (currentPage >= 0);
}
}
private void initPageCache() throws IOException {
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
if (pagesCount == 0)
return;
rndFile.seek((pagesCount - 1) * OWALPage.PAGE_SIZE);
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(content);
flushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
currentPage = new OWALPage(pointer, false);
filledUpTo = (pagesCount - 1) * OWALPage.PAGE_SIZE + currentPage.getFilledUpTo();
nextPositionToFlush = (pagesCount - 1) * OWALPage.PAGE_SIZE;
pagesCache.add(currentPage);
}
}
private long extractOrder(String name) {
int walOrderStartIndex = name.indexOf('.') + 1;
int walOrderEndIndex = name.indexOf('.', walOrderStartIndex);
String walOrder = name.substring(walOrderStartIndex, walOrderEndIndex);
try {
return Long.parseLong(walOrder);
} catch (NumberFormatException e) {
// never happen
throw new IllegalStateException(e);
}
}
@Override
public int compareTo(LogSegment other) {
final long otherOrder = other.order;
if (order > otherOrder)
return 1;
else if (order < otherOrder)
return -1;
return 0;
}
public long filledUpTo() throws IOException {
return filledUpTo;
}
public OLogSequenceNumber begin() throws IOException {
if (!pagesCache.isEmpty())
return new OLogSequenceNumber(order, OWALPage.RECORDS_OFFSET);
if (rndFile.length() > 0)
return new OLogSequenceNumber(order, OWALPage.RECORDS_OFFSET);
return null;
}
public OLogSequenceNumber end() {
return last;
}
private int findLastRecord(OWALPage page, boolean skipTailRecords) {
int prevOffset = OWALPage.RECORDS_OFFSET;
int pageOffset = OWALPage.RECORDS_OFFSET;
int maxOffset = page.getFilledUpTo();
while (pageOffset < maxOffset) {
prevOffset = pageOffset;
pageOffset += page.getSerializedRecordSize(pageOffset);
}
if (skipTailRecords && page.recordTail(prevOffset))
return -1;
return prevOffset;
}
public void delete(boolean flush) throws IOException {
close(flush);
boolean deleted = file.delete();
while (!deleted) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
deleted = !file.exists() || file.delete();
}
}
public String getPath() {
return file.getAbsolutePath();
}
public OLogSequenceNumber logRecord(byte[] record) throws IOException {
flushNewData = true;
int pageOffset = (int) (filledUpTo % OWALPage.PAGE_SIZE);
long pageIndex = filledUpTo / OWALPage.PAGE_SIZE;
if (pageOffset == 0 && pageIndex > 0)
pageIndex--;
int pos = 0;
boolean firstChunk = true;
OLogSequenceNumber lsn = null;
while (pos < record.length) {
if (currentPage == null) {
ODirectMemoryPointer pointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
currentPage = new OWALPage(pointer, true);
pagesCache.add(currentPage);
filledUpTo += OWALPage.RECORDS_OFFSET;
}
int freeSpace = currentPage.getFreeSpace();
if (freeSpace < OWALPage.MIN_RECORD_SIZE) {
filledUpTo += freeSpace + OWALPage.RECORDS_OFFSET;
ODirectMemoryPointer pointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
currentPage = new OWALPage(pointer, true);
pagesCache.add(currentPage);
pageIndex++;
freeSpace = currentPage.getFreeSpace();
}
final OWALPage walPage = currentPage;
synchronized (walPage) {
final int entrySize = OWALPage.calculateSerializedSize(record.length - pos);
int addedChunkOffset;
if (entrySize <= freeSpace) {
if (pos == 0)
addedChunkOffset = walPage.appendRecord(record, false, !firstChunk);
else
addedChunkOffset = walPage.appendRecord(Arrays.copyOfRange(record, pos, record.length), false, !firstChunk);
pos = record.length;
} else {
int chunkSize = OWALPage.calculateRecordSize(freeSpace);
if (chunkSize > record.length - pos)
chunkSize = record.length - pos;
addedChunkOffset = walPage.appendRecord(Arrays.copyOfRange(record, pos, pos + chunkSize), true, !firstChunk);
pos += chunkSize;
}
if (firstChunk) {
lsn = new OLogSequenceNumber(order, pageIndex * OWALPage.PAGE_SIZE + addedChunkOffset);
}
int spaceDiff = freeSpace - walPage.getFreeSpace();
filledUpTo += spaceDiff;
firstChunk = false;
}
}
if (pagesCache.size() > maxPagesCacheSize) {
OLogManager.instance().info(this, "Max cache limit is reached (%d vs. %d), sync flush is performed.", maxPagesCacheSize,
pagesCache.size());
flush();
}
last = lsn;
return last;
}
public byte[] readRecord(OLogSequenceNumber lsn) throws IOException {
assert lsn.getSegment() == order;
if (lsn.getPosition() >= filledUpTo)
return null;
if (flushedLsn == null || flushedLsn.compareTo(lsn) < 0)
flush();
byte[] record = null;
long pageIndex = lsn.getPosition() / OWALPage.PAGE_SIZE;
int pageOffset = (int) (lsn.getPosition() % OWALPage.PAGE_SIZE);
long pageCount = (filledUpTo + OWALPage.PAGE_SIZE - 1) / OWALPage.PAGE_SIZE;
while (pageIndex < pageCount) {
synchronized (rndFile) {
byte[] pageContent = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(pageIndex * OWALPage.PAGE_SIZE);
rndFile.readFully(pageContent);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(pageContent);
try {
OWALPage page = new OWALPage(pointer, false);
byte[] content = page.getRecord(pageOffset);
if (record == null)
record = content;
else {
byte[] oldRecord = record;
record = new byte[record.length + content.length];
System.arraycopy(oldRecord, 0, record, 0, oldRecord.length);
System.arraycopy(content, 0, record, oldRecord.length, record.length - oldRecord.length);
}
if (page.mergeWithNextPage(pageOffset)) {
pageOffset = OWALPage.RECORDS_OFFSET;
pageIndex++;
} else
break;
} finally {
pointer.free();
}
}
}
return record;
}
public OLogSequenceNumber getNextLSN(OLogSequenceNumber lsn) throws IOException {
final byte[] record = readRecord(lsn);
if (record == null)
return null;
long pos = lsn.getPosition();
long pageIndex = pos / OWALPage.PAGE_SIZE;
int pageOffset = (int) (pos - pageIndex * OWALPage.PAGE_SIZE);
int restOfRecord = record.length;
while (restOfRecord > 0) {
int entrySize = OWALPage.calculateSerializedSize(restOfRecord);
if (entrySize + pageOffset < OWALPage.PAGE_SIZE) {
if (entrySize + pageOffset <= OWALPage.PAGE_SIZE - OWALPage.MIN_RECORD_SIZE)
pos += entrySize;
else
pos += OWALPage.PAGE_SIZE - pageOffset + OWALPage.RECORDS_OFFSET;
break;
} else if (entrySize + pageOffset == OWALPage.PAGE_SIZE) {
pos += entrySize + OWALPage.RECORDS_OFFSET;
break;
} else {
int chunkSize = OWALPage.calculateRecordSize(OWALPage.PAGE_SIZE - pageOffset);
restOfRecord -= chunkSize;
pos += OWALPage.PAGE_SIZE - pageOffset + OWALPage.RECORDS_OFFSET;
pageOffset = OWALPage.RECORDS_OFFSET;
}
}
if (pos >= filledUpTo)
return null;
return new OLogSequenceNumber(order, pos);
}
public void close(boolean flush) throws IOException {
if (!closed) {
stopFlush(flush);
rndFile.close();
closed = true;
if (!pagesCache.isEmpty()) {
for (OWALPage page : pagesCache)
page.getPagePointer().free();
}
currentPage = null;
}
}
private void selfCheck() throws IOException {
if (!pagesCache.isEmpty())
throw new IllegalStateException("WAL cache is not empty, we can not verify WAL after it was started to be used");
synchronized (rndFile) {
long pagesCount = rndFile.length() / OWALPage.PAGE_SIZE;
if (rndFile.length() % OWALPage.PAGE_SIZE > 0) {
OLogManager.instance().error(this, "Last WAL page was written partially, auto fix.");
rndFile.setLength(OWALPage.PAGE_SIZE * pagesCount);
}
long currentPage = pagesCount - 1;
CRC32 crc32 = new CRC32();
while (currentPage >= 0) {
crc32.reset();
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
int pageCRC = OIntegerSerializer.INSTANCE.deserializeNative(content, 0);
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
int calculatedCRC = (int) crc32.getValue();
if (pageCRC != calculatedCRC) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
} else
break;
}
if (currentPage < 0)
return;
byte[] content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
currentPage--;
long intialFlushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
long loadedFlushId = intialFlushId;
int flushedPagesCount = 1;
while (currentPage >= 0) {
content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
crc32.reset();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
int calculatedCRC = (int) crc32.getValue();
int pageCRC = OIntegerSerializer.INSTANCE.deserializeNative(content, 0);
if (pageCRC != calculatedCRC) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
flushedPagesCount = 0;
} else {
loadedFlushId = OLongSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_ID_OFFSET);
if (loadedFlushId == intialFlushId) {
flushedPagesCount++;
currentPage--;
} else
break;
}
}
if (flushedPagesCount != 0) {
content = new byte[OWALPage.PAGE_SIZE];
rndFile.seek((currentPage + 1) * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
final int firstFlushIndex = OIntegerSerializer.INSTANCE.deserializeNative(content, OWALPage.FLUSH_INDEX_OFFSET);
if (firstFlushIndex != 0) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage + 1);
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
flushedPagesCount = 0;
}
}
currentPage += flushedPagesCount;
while (currentPage >= 0) {
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
rndFile.readFully(content);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(content);
try {
OWALPage page = new OWALPage(pointer, false);
int pageOffset = findLastRecord(page, false);
if (pageOffset >= 0) {
if (page.mergeWithNextPage(pageOffset)) {
page.truncateTill(pageOffset);
rndFile.seek(currentPage * OWALPage.PAGE_SIZE);
content = pointer.get(0, OWALPage.PAGE_SIZE);
rndFile.write(content);
if (page.isEmpty()) {
OLogManager.instance().error(this, "%d WAL page has been broken and will be truncated.", currentPage);
currentPage--;
pagesCount = currentPage + 1;
rndFile.setLength(pagesCount * OWALPage.PAGE_SIZE);
} else
break;
} else
break;
} else
break;
} finally {
pointer.free();
}
}
rndFile.getFD().sync();
}
}
public OLogSequenceNumber readFlushedLSN() throws IOException {
long pages = rndFile.length() / OWALPage.PAGE_SIZE;
if (pages == 0)
return null;
long pageIndex = pages - 1;
while (true) {
rndFile.seek(pageIndex * OWALPage.PAGE_SIZE);
byte[] pageContent = new byte[OWALPage.PAGE_SIZE];
rndFile.readFully(pageContent);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(pageContent);
try {
OWALPage page = new OWALPage(pointer, false);
int pageOffset = findLastRecord(page, true);
if (pageOffset < 0) {
pageIndex--;
if (pageIndex < 0)
return null;
continue;
}
return new OLogSequenceNumber(order, pageIndex * OWALPage.PAGE_SIZE + pageOffset);
} finally {
pointer.free();
}
}
}
public void flush() {
if (!commitExecutor.isShutdown()) {
try {
commitExecutor.submit(new FlushTask()).get();
} catch (InterruptedException e) {
Thread.interrupted();
throw new OStorageException("Thread was interrupted during flush", e);
} catch (ExecutionException e) {
throw new OStorageException("Error during WAL segment " + getPath() + " flush.");
}
} else {
new FlushTask().run();
}
}
private final class FlushTask implements Runnable {
private FlushTask() {
}
@Override
public void run() {
try {
commit();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during WAL background flush", e);
}
}
private void commit() throws IOException {
if (pagesCache.isEmpty())
return;
if (!flushNewData)
return;
flushNewData = false;
final int maxSize = pagesCache.size();
ODirectMemoryPointer[] pagesToFlush = new ODirectMemoryPointer[maxSize];
long filePointer = nextPositionToFlush;
int lastRecordOffset = -1;
long lastPageIndex = -1;
int flushedPages = 0;
Iterator<OWALPage> pageIterator = pagesCache.iterator();
while (flushedPages < maxSize) {
final OWALPage page = pageIterator.next();
synchronized (page) {
ODirectMemoryPointer dataPointer;
if (flushedPages == maxSize - 1) {
dataPointer = new ODirectMemoryPointer(OWALPage.PAGE_SIZE);
page.getPagePointer().moveData(0, dataPointer, 0, OWALPage.PAGE_SIZE);
} else {
dataPointer = page.getPagePointer();
}
pagesToFlush[flushedPages] = dataPointer;
int recordOffset = findLastRecord(page, true);
if (recordOffset >= 0) {
lastRecordOffset = recordOffset;
lastPageIndex = flushedPages;
}
}
flushedPages++;
}
flushId++;
synchronized (rndFile) {
rndFile.seek(filePointer);
for (int i = 0; i < pagesToFlush.length; i++) {
ODirectMemoryPointer dataPointer = pagesToFlush[i];
byte[] pageContent = dataPointer.get(0, OWALPage.PAGE_SIZE);
if (i == pagesToFlush.length - 1)
dataPointer.free();
OLongSerializer.INSTANCE.serializeNative(flushId, pageContent, OWALPage.FLUSH_ID_OFFSET);
OIntegerSerializer.INSTANCE.serializeNative(i, pageContent, OWALPage.FLUSH_INDEX_OFFSET);
flushPage(pageContent);
filePointer += OWALPage.PAGE_SIZE;
}
rndFile.getFD().sync();
}
long oldPositionToFlush = nextPositionToFlush;
nextPositionToFlush = filePointer - OWALPage.PAGE_SIZE;
if (lastRecordOffset >= 0)
flushedLsn = new OLogSequenceNumber(order, oldPositionToFlush + lastPageIndex * OWALPage.PAGE_SIZE + lastRecordOffset);
for (int i = 0; i < flushedPages - 1; i++) {
OWALPage page = pagesCache.poll();
page.getPagePointer().free();
}
assert !pagesCache.isEmpty();
}
private void flushPage(byte[] content) throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(content, OIntegerSerializer.INT_SIZE, OWALPage.PAGE_SIZE - OIntegerSerializer.INT_SIZE);
OIntegerSerializer.INSTANCE.serializeNative((int) crc32.getValue(), content, 0);
rndFile.write(content);
}
}
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_wal_OWriteAheadLog.java |
8 | setInput(new BrowserInput(null) {
@Override
public String getHtml() {
return content;
}
@Override
public String getInputName() {
return "";
}
}); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_browser_BrowserInformationControl.java |
2,084 | public class PartitionWideEntryWithPredicateOperationFactory implements OperationFactory {
private String name;
private EntryProcessor entryProcessor;
private Predicate predicate;
public PartitionWideEntryWithPredicateOperationFactory() {
}
public PartitionWideEntryWithPredicateOperationFactory(String name, EntryProcessor entryProcessor, Predicate predicate) {
this.name = name;
this.entryProcessor = entryProcessor;
this.predicate = predicate;
}
@Override
public Operation createOperation() {
return new PartitionWideEntryWithPredicateOperation(name, entryProcessor, predicate);
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(name);
out.writeObject(entryProcessor);
out.writeObject(predicate);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
name = in.readUTF();
entryProcessor = in.readObject();
predicate = in.readObject();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_operation_PartitionWideEntryWithPredicateOperationFactory.java |
130 | @Entity
@Table(name = "BLC_QUAL_CRIT_SC_XREF")
@Inheritance(strategy=InheritanceType.JOINED)
public class CriteriaStructuredContentXref {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The category id. */
@EmbeddedId
CriteriaStructuredContentXrefPK criteriaStructuredContentXrefPK = new CriteriaStructuredContentXrefPK();
public CriteriaStructuredContentXrefPK getCriteriaStructuredContentXrefPK() {
return criteriaStructuredContentXrefPK;
}
public void setCriteriaStructuredContentXrefPK(final CriteriaStructuredContentXrefPK criteriaStructuredContentXrefPK) {
this.criteriaStructuredContentXrefPK = criteriaStructuredContentXrefPK;
}
public static class CriteriaStructuredContentXrefPK implements Serializable {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@ManyToOne(targetEntity = StructuredContentImpl.class, optional=false)
@JoinColumn(name = "SC_ID")
protected StructuredContent structuredContent = new StructuredContentImpl();
@ManyToOne(targetEntity = StructuredContentItemCriteriaImpl.class, optional=false)
@JoinColumn(name = "SC_ITEM_CRITERIA_ID")
protected StructuredContentItemCriteria structuredContentItemCriteria = new StructuredContentItemCriteriaImpl();
public StructuredContent getStructuredContent() {
return structuredContent;
}
public void setStructuredContent(StructuredContent structuredContent) {
this.structuredContent = structuredContent;
}
public StructuredContentItemCriteria getStructuredContentItemCriteria() {
return structuredContentItemCriteria;
}
public void setStructuredContentItemCriteria(StructuredContentItemCriteria structuredContentItemCriteria) {
this.structuredContentItemCriteria = structuredContentItemCriteria;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((structuredContent == null) ? 0 : structuredContent.hashCode());
result = prime * result + ((structuredContentItemCriteria == null) ? 0 : structuredContentItemCriteria.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CriteriaStructuredContentXrefPK other = (CriteriaStructuredContentXrefPK) obj;
if (structuredContent == null) {
if (other.structuredContent != null)
return false;
} else if (!structuredContent.equals(other.structuredContent))
return false;
if (structuredContentItemCriteria == null) {
if (other.structuredContentItemCriteria != null)
return false;
} else if (!structuredContentItemCriteria.equals(other.structuredContentItemCriteria))
return false;
return true;
}
}
} | 1no label
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_CriteriaStructuredContentXref.java |
2,752 | public class HttpException extends ElasticsearchException {
public HttpException(String message) {
super(message);
}
public HttpException(String message, Throwable cause) {
super(message, cause);
}
} | 0true
| src_main_java_org_elasticsearch_http_HttpException.java |
951 | ois = new java.io.ObjectInputStream(bais) {
@Override
public Class<?> resolveClass(java.io.ObjectStreamClass streamClass) throws java.io.IOException, ClassNotFoundException {
Class<?> c = Class.forName(streamClass.getName(), false, loader);
if (c == null) {
return super.resolveClass(streamClass);
} else {
return c; // Class loader knows of this class.
} // end else: not null
} // end resolveClass
}; // end ois | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_OBase64Utils.java |
729 | loadEntriesMajor(key, inclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > -1 && result.size() >= maxValuesToFetch)
return false;
return true;
}
}); | 1no label
| core_src_main_java_com_orientechnologies_orient_core_index_sbtree_local_OSBTree.java |
3,027 | public interface Factory {
DocValuesFormatProvider create(String name, Settings settings);
} | 0true
| src_main_java_org_elasticsearch_index_codec_docvaluesformat_DocValuesFormatProvider.java |
61 | public class AWSClient {
private String endpoint;
private final AwsConfig awsConfig;
public AWSClient(AwsConfig awsConfig) {
if (awsConfig == null) {
throw new IllegalArgumentException("AwsConfig is required!");
}
if (awsConfig.getAccessKey() == null) {
throw new IllegalArgumentException("AWS access key is required!");
}
if (awsConfig.getSecretKey() == null) {
throw new IllegalArgumentException("AWS secret key is required!");
}
this.awsConfig = awsConfig;
endpoint = awsConfig.getHostHeader();
}
public List<String> getPrivateIpAddresses() throws Exception {
return new DescribeInstances(awsConfig).execute(endpoint);
}
public void setEndpoint(String s) {
this.endpoint = s;
}
} | 0true
| hazelcast-cloud_src_main_java_com_hazelcast_aws_AWSClient.java |
1,516 | @Component("blHeadProcessor")
public class HeadProcessor extends AbstractFragmentHandlingElementProcessor {
@Resource(name = "blHeadProcessorExtensionManager")
protected HeadProcessorExtensionListener extensionManager;
public static final String FRAGMENT_ATTR_NAME = StandardFragmentAttrProcessor.ATTR_NAME;
protected String HEAD_PARTIAL_PATH = "layout/partials/head";
/**
* Sets the name of this processor to be used in Thymeleaf template
*/
public HeadProcessor() {
super("head");
}
@Override
public int getPrecedence() {
return 10000;
}
@Override
protected boolean getSubstituteInclusionNode(Arguments arguments, Element element) {
return true;
}
@Override
@SuppressWarnings("unchecked")
protected FragmentAndTarget getFragmentAndTarget(Arguments arguments, Element element, boolean substituteInclusionNode) {
// The pageTitle attribute could be an expression that needs to be evaluated. Try to evaluate, but fall back
// to its text value if the expression wasn't able to be processed. This will allow things like
// pageTitle="Hello this is a string"
// as well as expressions like
// pageTitle="${'Hello this is a ' + product.name}"
String pageTitle = element.getAttributeValue("pageTitle");
try {
pageTitle = (String) StandardExpressionProcessor.processExpression(arguments, pageTitle);
} catch (TemplateProcessingException e) {
// Do nothing.
}
((Map<String, Object>) arguments.getExpressionEvaluationRoot()).put("pageTitle", pageTitle);
((Map<String, Object>) arguments.getExpressionEvaluationRoot()).put("additionalCss", element.getAttributeValue("additionalCss"));
extensionManager.processAttributeValues(arguments, element);
return new FragmentAndTarget(HEAD_PARTIAL_PATH, WholeFragmentSpec.INSTANCE);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_processor_HeadProcessor.java |
1,111 | public class OSQLFunctionDistance extends OSQLFunctionAbstract {
public static final String NAME = "distance";
private final static double EARTH_RADIUS = 6371;
public OSQLFunctionDistance() {
super(NAME, 4, 5);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
try {
double distance;
final double[] values = new double[4];
for (int i = 0; i < iParameters.length; ++i) {
if (iParameters[i] == null)
return null;
values[i] = ((Double) OType.convert(iParameters[i], Double.class)).doubleValue();
}
final double deltaLat = Math.toRadians(values[2] - values[0]);
final double deltaLon = Math.toRadians(values[3] - values[1]);
final double a = Math.pow(Math.sin(deltaLat / 2), 2) + Math.cos(Math.toRadians(values[0]))
* Math.cos(Math.toRadians(values[2])) * Math.pow(Math.sin(deltaLon / 2), 2);
distance = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a)) * EARTH_RADIUS;
return distance;
} catch (Exception e) {
return null;
}
}
public String getSyntax() {
return "Syntax error: distance(<field-x>,<field-y>,<x-value>,<y-value>[,<unit>])";
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_sql_functions_geo_OSQLFunctionDistance.java |
3,626 | public class SimpleDateMappingTests extends ElasticsearchTestCase {
@Test
public void testAutomaticDateParser() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("date_field1", "2011/01/22")
.field("date_field2", "2011/01/22 00:00:00")
.field("wrong_date1", "-4")
.field("wrong_date2", "2012/2")
.field("wrong_date3", "2012/test")
.endObject()
.bytes());
FieldMapper<?> fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field1");
assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field2");
assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date1");
assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date2");
assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date3");
assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
}
@Test
public void testParseLocal() {
assertThat(Locale.GERMAN, equalTo(DateFieldMapper.parseLocale("de")));
assertThat(Locale.GERMANY, equalTo(DateFieldMapper.parseLocale("de_DE")));
assertThat(new Locale("de","DE","DE"), equalTo(DateFieldMapper.parseLocale("de_DE_DE")));
try {
DateFieldMapper.parseLocale("de_DE_DE_DE");
fail();
} catch(ElasticsearchIllegalArgumentException ex) {
// expected
}
assertThat(Locale.ROOT, equalTo(DateFieldMapper.parseLocale("")));
assertThat(Locale.ROOT, equalTo(DateFieldMapper.parseLocale("ROOT")));
}
@Test
public void testLocale() throws IOException {
String mapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("type")
.startObject("properties")
.startObject("date_field_default")
.field("type", "date")
.field("format", "E, d MMM yyyy HH:mm:ss Z")
.endObject()
.startObject("date_field_en")
.field("type", "date")
.field("format", "E, d MMM yyyy HH:mm:ss Z")
.field("locale", "EN")
.endObject()
.startObject("date_field_de")
.field("type", "date")
.field("format", "E, d MMM yyyy HH:mm:ss Z")
.field("locale", "DE_de")
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("date_field_en", "Wed, 06 Dec 2000 02:55:00 -0800")
.field("date_field_de", "Mi, 06 Dez 2000 02:55:00 -0800")
.field("date_field_default", "Wed, 06 Dec 2000 02:55:00 -0800") // check default - no exception is a successs!
.endObject()
.bytes());
assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_de");
assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_default");
}
private DocumentMapper mapper(String mapping) throws IOException {
// we serialize and deserialize the mapping to make sure serialization works just fine
DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.startObject();
defaultMapper.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
String rebuildMapping = builder.string();
return MapperTestUtils.newParser().parse(rebuildMapping);
}
private void assertNumericTokensEqual(ParsedDocument doc, DocumentMapper defaultMapper, String fieldA, String fieldB) throws IOException {
assertThat(doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
assertThat(doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
TokenStream tokenStream = doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.indexAnalyzer());
tokenStream.reset();
NumericTermAttribute nta = tokenStream.addAttribute(NumericTermAttribute.class);
List<Long> values = new ArrayList<Long>();
while(tokenStream.incrementToken()) {
values.add(nta.getRawValue());
}
tokenStream = doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.indexAnalyzer());
tokenStream.reset();
nta = tokenStream.addAttribute(NumericTermAttribute.class);
int pos = 0;
while(tokenStream.incrementToken()) {
assertThat(values.get(pos++), equalTo(nta.getRawValue()));
}
assertThat(pos, equalTo(values.size()));
}
@Test
public void testTimestampAsDate() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
long value = System.currentTimeMillis();
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("date_field", value)
.endObject()
.bytes());
assertThat(doc.rootDoc().getField("date_field").tokenStream(defaultMapper.indexAnalyzer()), notNullValue());
}
@Test
public void testDateDetection() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.field("date_detection", false)
.startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("date_field", "2010-01-01")
.field("date_field_x", "2010-01-01")
.endObject()
.bytes());
assertThat(doc.rootDoc().get("date_field"), nullValue());
assertThat(doc.rootDoc().get("date_field_x"), equalTo("2010-01-01"));
}
@Test
public void testHourFormat() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.field("date_detection", false)
.startObject("properties").startObject("date_field").field("type", "date").field("format", "HH:mm:ss").endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("date_field", "10:00:00")
.endObject()
.bytes());
assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis())));
Filter filter = defaultMapper.mappers().smartNameFieldMapper("date_field").rangeFilter("10:00:00", "11:00:00", true, true, null);
assertThat(filter, instanceOf(NumericRangeFilter.class));
NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;
assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(11).millis() + 999).getMillis())); // +999 to include the 00-01 minute
assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(10).millis()).getMillis()));
}
@Test
public void testDayWithoutYearFormat() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.field("date_detection", false)
.startObject("properties").startObject("date_field").field("type", "date").field("format", "MMM dd HH:mm:ss").endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("date_field", "Jan 02 10:00:00")
.endObject()
.bytes());
assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis())));
Filter filter = defaultMapper.mappers().smartNameFieldMapper("date_field").rangeFilter("Jan 02 10:00:00", "Jan 02 11:00:00", true, true, null);
assertThat(filter, instanceOf(NumericRangeFilter.class));
NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;
assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis() + 999).getMillis())); // +999 to include the 00-01 minute
assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(34).millis()).getMillis()));
}
@Test
public void testIgnoreMalformedOption() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("field1").field("type", "date").field("ignore_malformed", true).endObject()
.startObject("field2").field("type", "date").field("ignore_malformed", false).endObject()
.startObject("field3").field("type", "date").endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(mapping);
ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field1", "a")
.field("field2", "2010-01-01")
.endObject()
.bytes());
assertThat(doc.rootDoc().getField("field1"), nullValue());
assertThat(doc.rootDoc().getField("field2"), notNullValue());
try {
defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field2", "a")
.endObject()
.bytes());
} catch (MapperParsingException e) {
assertThat(e.getCause(), instanceOf(MapperParsingException.class));
}
// Verify that the default is false
try {
defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field3", "a")
.endObject()
.bytes());
} catch (MapperParsingException e) {
assertThat(e.getCause(), instanceOf(MapperParsingException.class));
}
// Unless the global ignore_malformed option is set to true
Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
defaultMapper = MapperTestUtils.newParser(indexSettings).parse(mapping);
doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field3", "a")
.endObject()
.bytes());
assertThat(doc.rootDoc().getField("field3"), nullValue());
// This should still throw an exception, since field2 is specifically set to ignore_malformed=false
try {
defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field2", "a")
.endObject()
.bytes());
} catch (MapperParsingException e) {
assertThat(e.getCause(), instanceOf(MapperParsingException.class));
}
}
@Test
public void testThatMergingWorks() throws Exception {
String initialMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("field").field("type", "date")
.field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")
.endObject()
.endObject()
.endObject().endObject().string();
String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("field")
.field("type", "date")
.field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ")
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = mapper(initialMapping);
DocumentMapper mergeMapper = mapper(updatedMapping);
assertThat(defaultMapper.mappers().name("field").mapper(), is(instanceOf(DateFieldMapper.class)));
DateFieldMapper initialDateFieldMapper = (DateFieldMapper) defaultMapper.mappers().name("field").mapper();
Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper);
assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy"));
DocumentMapper.MergeResult mergeResult = defaultMapper.merge(mergeMapper, DocumentMapper.MergeFlags.mergeFlags().simulate(false));
assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.conflicts()), mergeResult.hasConflicts(), is(false));
assertThat(defaultMapper.mappers().name("field").mapper(), is(instanceOf(DateFieldMapper.class)));
DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().name("field").mapper();
Map<String, String> mergedConfig = getConfigurationViaXContent(mergedFieldMapper);
assertThat(mergedConfig.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ"));
}
private Map<String, String> getConfigurationViaXContent(DateFieldMapper dateFieldMapper) throws IOException {
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
dateFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
Map<String, Object> dateFieldMapperMap = JsonXContent.jsonXContent.createParser(builder.string()).mapAndClose();
assertThat(dateFieldMapperMap, hasKey("field"));
assertThat(dateFieldMapperMap.get("field"), is(instanceOf(Map.class)));
return (Map<String, String>) dateFieldMapperMap.get("field");
}
} | 0true
| src_test_java_org_elasticsearch_index_mapper_date_SimpleDateMappingTests.java |
4,417 | public class IndicesFilterCache extends AbstractComponent implements RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> {
private final ThreadPool threadPool;
private final CacheRecycler cacheRecycler;
private Cache<WeightedFilterCache.FilterCacheKey, DocIdSet> cache;
private volatile String size;
private volatile long sizeInBytes;
private volatile TimeValue expire;
private final TimeValue cleanInterval;
private final Set<Object> readersKeysToClean = ConcurrentCollections.newConcurrentSet();
private volatile boolean closed;
public static final String INDICES_CACHE_FILTER_SIZE = "indices.cache.filter.size";
public static final String INDICES_CACHE_FILTER_EXPIRE = "indices.cache.filter.expire";
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
boolean replace = false;
String size = settings.get(INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size);
if (!size.equals(IndicesFilterCache.this.size)) {
logger.info("updating [indices.cache.filter.size] from [{}] to [{}]", IndicesFilterCache.this.size, size);
IndicesFilterCache.this.size = size;
replace = true;
}
TimeValue expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire);
if (!Objects.equal(expire, IndicesFilterCache.this.expire)) {
logger.info("updating [indices.cache.filter.expire] from [{}] to [{}]", IndicesFilterCache.this.expire, expire);
IndicesFilterCache.this.expire = expire;
replace = true;
}
if (replace) {
Cache<WeightedFilterCache.FilterCacheKey, DocIdSet> oldCache = IndicesFilterCache.this.cache;
computeSizeInBytes();
buildCache();
oldCache.invalidateAll();
}
}
}
@Inject
public IndicesFilterCache(Settings settings, ThreadPool threadPool, CacheRecycler cacheRecycler, NodeSettingsService nodeSettingsService) {
super(settings);
this.threadPool = threadPool;
this.cacheRecycler = cacheRecycler;
this.size = componentSettings.get("size", "20%");
this.expire = componentSettings.getAsTime("expire", null);
this.cleanInterval = componentSettings.getAsTime("clean_interval", TimeValue.timeValueSeconds(60));
computeSizeInBytes();
buildCache();
logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], expire [{}], clean_interval [{}]",
size, new ByteSizeValue(sizeInBytes), expire, cleanInterval);
nodeSettingsService.addListener(new ApplySettings());
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, new ReaderCleaner());
}
private void buildCache() {
CacheBuilder<WeightedFilterCache.FilterCacheKey, DocIdSet> cacheBuilder = CacheBuilder.newBuilder()
.removalListener(this)
.maximumWeight(sizeInBytes).weigher(new WeightedFilterCache.FilterCacheValueWeigher());
// defaults to 4, but this is a busy map for all indices, increase it a bit
cacheBuilder.concurrencyLevel(16);
if (expire != null) {
cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS);
}
cache = cacheBuilder.build();
}
private void computeSizeInBytes() {
this.sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes();
}
public void addReaderKeyToClean(Object readerKey) {
readersKeysToClean.add(readerKey);
}
public void close() {
closed = true;
cache.invalidateAll();
}
public Cache<WeightedFilterCache.FilterCacheKey, DocIdSet> cache() {
return this.cache;
}
@Override
public void onRemoval(RemovalNotification<WeightedFilterCache.FilterCacheKey, DocIdSet> removalNotification) {
WeightedFilterCache.FilterCacheKey key = removalNotification.getKey();
if (key == null) {
return;
}
if (key.removalListener != null) {
key.removalListener.onRemoval(removalNotification);
}
}
/**
* The reason we need this class is because we need to clean all the filters that are associated
* with a reader. We don't want to do it every time a reader closes, since iterating over all the map
* is expensive. There doesn't seem to be a nicer way to do it (and maintaining a list per reader
* of the filters will cost more).
*/
class ReaderCleaner implements Runnable {
@Override
public void run() {
if (closed) {
return;
}
if (readersKeysToClean.isEmpty()) {
schedule();
return;
}
try {
threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
@Override
public void run() {
Recycler.V<ObjectOpenHashSet<Object>> keys = cacheRecycler.hashSet(-1);
try {
for (Iterator<Object> it = readersKeysToClean.iterator(); it.hasNext(); ) {
keys.v().add(it.next());
it.remove();
}
cache.cleanUp();
if (!keys.v().isEmpty()) {
for (Iterator<WeightedFilterCache.FilterCacheKey> it = cache.asMap().keySet().iterator(); it.hasNext(); ) {
WeightedFilterCache.FilterCacheKey filterCacheKey = it.next();
if (keys.v().contains(filterCacheKey.readerKey())) {
// same as invalidate
it.remove();
}
}
}
schedule();
} finally {
keys.release();
}
}
});
} catch (EsRejectedExecutionException ex) {
logger.debug("Can not run ReaderCleaner - execution rejected", ex);
}
}
private void schedule() {
try {
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this);
} catch (EsRejectedExecutionException ex) {
logger.debug("Can not schedule ReaderCleaner - execution rejected", ex);
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_indices_cache_filter_IndicesFilterCache.java |
2,833 | public final class MigrationRequestOperation extends BaseMigrationOperation {
public static final int TRY_PAUSE_MILLIS = 1000;
private boolean returnResponse = true;
public MigrationRequestOperation() {
}
public MigrationRequestOperation(MigrationInfo migrationInfo) {
super(migrationInfo);
}
public void run() {
NodeEngine nodeEngine = getNodeEngine();
verifyGoodMaster(nodeEngine);
Address source = migrationInfo.getSource();
Address destination = migrationInfo.getDestination();
verifyExistingTarget(nodeEngine, destination);
if (destination.equals(source)) {
getLogger().warning("Source and destination addresses are the same! => " + toString());
success = false;
return;
}
verifyNotThisNode(nodeEngine, source);
InternalPartitionServiceImpl partitionService = getService();
InternalPartition partition = partitionService.getPartition(migrationInfo.getPartitionId());
Address owner = partition.getOwnerOrNull();
verifyOwnerExists(owner);
if (!migrationInfo.startProcessing()) {
getLogger().warning("Migration is cancelled -> " + migrationInfo);
success = false;
return;
}
try {
verifyOwner(source, partition, owner);
partitionService.addActiveMigration(migrationInfo);
long[] replicaVersions = partitionService.getPartitionReplicaVersions(migrationInfo.getPartitionId());
Collection<Operation> tasks = prepareMigrationTasks();
if (tasks.size() > 0) {
returnResponse = false;
spawnMigrationRequestTask(destination, replicaVersions, tasks);
} else {
success = true;
}
} catch (Throwable e) {
getLogger().warning(e);
success = false;
} finally {
migrationInfo.doneProcessing();
}
}
private void verifyNotThisNode(NodeEngine nodeEngine, Address source) {
if (source == null || !source.equals(nodeEngine.getThisAddress())) {
throw new RetryableHazelcastException("Source of migration is not this node! => " + toString());
}
}
private void verifyOwnerExists(Address owner) {
if (owner == null) {
throw new RetryableHazelcastException("Cannot migrate at the moment! Owner of the partition is null => "
+ migrationInfo);
}
}
private void verifyOwner(Address source, InternalPartition partition, Address owner) {
if (!source.equals(owner)) {
throw new HazelcastException("Cannot migrate! This node is not owner of the partition => "
+ migrationInfo + " -> " + partition);
}
}
private void spawnMigrationRequestTask(Address destination, long[] replicaVersions, Collection<Operation> tasks)
throws IOException {
NodeEngine nodeEngine = getNodeEngine();
SerializationService serializationService = nodeEngine.getSerializationService();
BufferObjectDataOutput out = createDataOutput(serializationService);
out.writeInt(tasks.size());
Iterator<Operation> iter = tasks.iterator();
while (iter.hasNext()) {
Operation task = iter.next();
if (task instanceof NonThreadSafe) {
serializationService.writeObject(out, task);
iter.remove();
}
}
ManagedExecutorService executor = nodeEngine.getExecutionService().getExecutor(ExecutionService.ASYNC_EXECUTOR);
MigrationRequestTask task = new MigrationRequestTask(tasks, out, replicaVersions, destination);
executor.execute(task);
}
private BufferObjectDataOutput createDataOutput(SerializationService serializationService) {
return serializationService.createObjectDataOutput(1024 * 32);
}
private void verifyGoodMaster(NodeEngine nodeEngine) {
Address masterAddress = nodeEngine.getMasterAddress();
if (!masterAddress.equals(migrationInfo.getMaster())) {
throw new RetryableHazelcastException("Migration initiator is not master node! => " + toString());
}
if (!masterAddress.equals(getCallerAddress())) {
throw new RetryableHazelcastException("Caller is not master node! => " + toString());
}
}
private void verifyExistingTarget(NodeEngine nodeEngine, Address destination) {
Member target = nodeEngine.getClusterService().getMember(destination);
if (target == null) {
throw new TargetNotMemberException("Destination of migration could not be found! => " + toString());
}
}
@Override
public ExceptionAction onException(Throwable throwable) {
if (throwable instanceof TargetNotMemberException) {
boolean rethrowException = rethrowException();
if (rethrowException) {
return ExceptionAction.THROW_EXCEPTION;
}
}
return super.onException(throwable);
}
private boolean rethrowException() {
NodeEngine nodeEngine = getNodeEngine();
if(nodeEngine == null){
return false;
}
MemberImpl destination = nodeEngine.getClusterService().getMember(migrationInfo.getDestination());
return destination == null;
}
@Override
public Object getResponse() {
return success;
}
@Override
public boolean returnsResponse() {
return returnResponse;
}
private Collection<Operation> prepareMigrationTasks() {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
PartitionReplicationEvent replicationEvent = new PartitionReplicationEvent(migrationInfo.getPartitionId(), 0);
PartitionMigrationEvent migrationEvent
= new PartitionMigrationEvent(MigrationEndpoint.SOURCE, migrationInfo.getPartitionId());
Collection<Operation> tasks = new LinkedList<Operation>();
for (ServiceInfo serviceInfo : nodeEngine.getServiceInfos(MigrationAwareService.class)) {
MigrationAwareService service = (MigrationAwareService) serviceInfo.getService();
service.beforeMigration(migrationEvent);
Operation op = service.prepareReplicationOperation(replicationEvent);
if (op != null) {
op.setServiceName(serviceInfo.getName());
tasks.add(op);
}
}
return tasks;
}
private class MigrationRequestTask implements Runnable {
private final SerializationService serializationService;
private final Collection<Operation> tasks;
private final BufferObjectDataOutput out;
private final long[] replicaVersions;
private final Address destination;
private final long timeout;
private final ResponseHandler responseHandler;
private final boolean compress;
public MigrationRequestTask(Collection<Operation> tasks, BufferObjectDataOutput out,
long[] replicaVersions, Address destination) {
this.tasks = tasks;
this.out = out;
this.replicaVersions = replicaVersions;
this.destination = destination;
this.responseHandler = getResponseHandler();
NodeEngine nodeEngine = getNodeEngine();
this.serializationService = nodeEngine.getSerializationService();
this.compress = nodeEngine.getGroupProperties().PARTITION_MIGRATION_ZIP_ENABLED.getBoolean();
this.timeout = nodeEngine.getGroupProperties().PARTITION_MIGRATION_TIMEOUT.getLong();
}
@Override
public void run() {
NodeEngine nodeEngine = getNodeEngine();
try {
byte[] data = getTaskData();
MigrationOperation operation = new MigrationOperation(
migrationInfo, replicaVersions, data, compress);
Future future = nodeEngine.getOperationService()
.createInvocationBuilder(InternalPartitionService.SERVICE_NAME, operation, destination)
.setTryPauseMillis(TRY_PAUSE_MILLIS)
.setReplicaIndex(getReplicaIndex())
.invoke();
Object response = future.get(timeout, TimeUnit.SECONDS);
Boolean result = nodeEngine.toObject(response);
migrationInfo.doneProcessing();
responseHandler.sendResponse(result);
} catch (Throwable e) {
responseHandler.sendResponse(Boolean.FALSE);
logThrowable(e);
}
}
private void logThrowable(Throwable e) {
if (e instanceof ExecutionException) {
e = e.getCause() != null ? e.getCause() : e;
}
Level level = getLogLevel(e);
getLogger().log(level, e.getMessage(), e);
}
private Level getLogLevel(Throwable e) {
return (e instanceof MemberLeftException || e instanceof InterruptedException)
|| !getNodeEngine().isActive() ? Level.INFO : Level.WARNING;
}
private byte[] getTaskData() throws IOException {
try {
for (Operation task : tasks) {
serializationService.writeObject(out, task);
}
if (compress) {
return IOUtil.compress(out.toByteArray());
} else {
return out.toByteArray();
}
} finally {
closeResource(out);
}
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_partition_impl_MigrationRequestOperation.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.