Unnamed: 0
int64
0
6.45k
func
stringlengths
29
253k
target
class label
2 classes
project
stringlengths
36
167
1,410
public class MetaDataIndexStateService extends AbstractComponent { public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE); private final ClusterService clusterService; private final AllocationService allocationService; @Inject public MetaDataIndexStateService(Settings settings, ClusterService clusterService, AllocationService allocationService) { super(settings); this.clusterService = clusterService; this.allocationService = allocationService; } public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) { if (request.indices() == null || request.indices().length == 0) { throw new ElasticsearchIllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask() { @Override public boolean mustAck(DiscoveryNode discoveryNode) { return true; } @Override public void onAllNodesAcked(@Nullable Throwable t) { listener.onResponse(new ClusterStateUpdateResponse(true)); } @Override public void onAckTimeout() { listener.onResponse(new ClusterStateUpdateResponse(false)); } @Override public TimeValue ackTimeout() { return request.ackTimeout(); } @Override public TimeValue timeout() { return request.masterNodeTimeout(); } @Override public void onFailure(String source, Throwable t) { listener.onFailure(t); } @Override public ClusterState execute(ClusterState currentState) { List<String> indicesToClose = new ArrayList<String>(); for (String index : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(index); if (indexMetaData == null) { throw new IndexMissingException(new Index(index)); } if (indexMetaData.state() != IndexMetaData.State.CLOSE) { IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index); for (IndexShardRoutingTable shard : indexRoutingTable) { if (!shard.primaryAllocatedPostApi()) { throw new IndexPrimaryShardNotAllocatedException(new Index(index)); } } indicesToClose.add(index); } } if (indicesToClose.isEmpty()) { return currentState; } logger.info("closing indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); for (String index : indicesToClose) { mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE)); blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK); } ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); for (String index : indicesToClose) { rtBuilder.remove(index); } RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build()); //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask return ClusterState.builder(updatedState).routingResult(routingResult).build(); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } }); } public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ClusterStateUpdateListener listener) { if (request.indices() == null || request.indices().length == 0) { throw new ElasticsearchIllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask() { @Override public boolean mustAck(DiscoveryNode discoveryNode) { return true; } @Override public void onAllNodesAcked(@Nullable Throwable t) { listener.onResponse(new ClusterStateUpdateResponse(true)); } @Override public void onAckTimeout() { listener.onResponse(new ClusterStateUpdateResponse(false)); } @Override public TimeValue ackTimeout() { return request.ackTimeout(); } @Override public TimeValue timeout() { return request.masterNodeTimeout(); } @Override public void onFailure(String source, Throwable t) { listener.onFailure(t); } @Override public ClusterState execute(ClusterState currentState) { List<String> indicesToOpen = new ArrayList<String>(); for (String index : request.indices()) { IndexMetaData indexMetaData = currentState.metaData().index(index); if (indexMetaData == null) { throw new IndexMissingException(new Index(index)); } if (indexMetaData.state() != IndexMetaData.State.OPEN) { indicesToOpen.add(index); } } if (indicesToOpen.isEmpty()) { return currentState; } logger.info("opening indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); for (String index : indicesToOpen) { mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN)); blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK); } ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable()); for (String index : indicesToOpen) { rtBuilder.addAsRecovery(updatedState.metaData().index(index)); } RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder).build()); //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask return ClusterState.builder(updatedState).routingResult(routingResult).build(); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } }); } }
0true
src_main_java_org_elasticsearch_cluster_metadata_MetaDataIndexStateService.java
1,724
public static class SampleIndexableObjectMapLoader implements MapLoader<Integer, SampleIndexableObject>, MapStoreFactory<Integer, SampleIndexableObject> { private SampleIndexableObject[] values = new SampleIndexableObject[10]; private Set<Integer> keys = new HashSet<Integer>(); boolean preloadValues = false; public SampleIndexableObjectMapLoader() { for (int i = 0; i < 10; i++) { keys.add(i); values[i] = new SampleIndexableObject("My-" + i, i); } } @Override public SampleIndexableObject load(Integer key) { if (!preloadValues) return null; return values[key]; } @Override public Map<Integer, SampleIndexableObject> loadAll(Collection<Integer> keys) { if (!preloadValues) return Collections.emptyMap(); Map<Integer, SampleIndexableObject> data = new HashMap<Integer, SampleIndexableObject>(); for (Integer key : keys) { data.put(key, values[key]); } return data; } @Override public Set<Integer> loadAllKeys() { if (!preloadValues) return Collections.emptySet(); return Collections.unmodifiableSet(keys); } @Override public MapLoader<Integer, SampleIndexableObject> newMapStore(String mapName, Properties properties) { return this; } }
0true
hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java
592
public class IndicesSegmentResponse extends BroadcastOperationResponse implements ToXContent { private ShardSegments[] shards; private Map<String, IndexSegments> indicesSegments; IndicesSegmentResponse() { } IndicesSegmentResponse(ShardSegments[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } public Map<String, IndexSegments> getIndices() { if (indicesSegments != null) { return indicesSegments; } Map<String, IndexSegments> indicesSegments = Maps.newHashMap(); Set<String> indices = Sets.newHashSet(); for (ShardSegments shard : shards) { indices.add(shard.getIndex()); } for (String index : indices) { List<ShardSegments> shards = Lists.newArrayList(); for (ShardSegments shard : this.shards) { if (shard.getShardRouting().index().equals(index)) { shards.add(shard); } } indicesSegments.put(index, new IndexSegments(index, shards.toArray(new ShardSegments[shards.size()]))); } this.indicesSegments = indicesSegments; return indicesSegments; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); shards = new ShardSegments[in.readVInt()]; for (int i = 0; i < shards.length; i++) { shards[i] = ShardSegments.readShardSegments(in); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(shards.length); for (ShardSegments shard : shards) { shard.writeTo(out); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.INDICES); for (IndexSegments indexSegments : getIndices().values()) { builder.startObject(indexSegments.getIndex(), XContentBuilder.FieldCaseConversion.NONE); builder.startObject(Fields.SHARDS); for (IndexShardSegments indexSegment : indexSegments) { builder.startArray(Integer.toString(indexSegment.getShardId().id())); for (ShardSegments shardSegments : indexSegment) { builder.startObject(); builder.startObject(Fields.ROUTING); builder.field(Fields.STATE, shardSegments.getShardRouting().state()); builder.field(Fields.PRIMARY, shardSegments.getShardRouting().primary()); builder.field(Fields.NODE, shardSegments.getShardRouting().currentNodeId()); if (shardSegments.getShardRouting().relocatingNodeId() != null) { builder.field(Fields.RELOCATING_NODE, shardSegments.getShardRouting().relocatingNodeId()); } builder.endObject(); builder.field(Fields.NUM_COMMITTED_SEGMENTS, shardSegments.getNumberOfCommitted()); builder.field(Fields.NUM_SEARCH_SEGMENTS, shardSegments.getNumberOfSearch()); builder.startObject(Fields.SEGMENTS); for (Segment segment : shardSegments) { builder.startObject(segment.getName()); builder.field(Fields.GENERATION, segment.getGeneration()); builder.field(Fields.NUM_DOCS, segment.getNumDocs()); builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs()); builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSizeInBytes()); builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getMemoryInBytes()); builder.field(Fields.COMMITTED, segment.isCommitted()); builder.field(Fields.SEARCH, segment.isSearch()); if (segment.getVersion() != null) { builder.field(Fields.VERSION, segment.getVersion()); } if (segment.isCompound() != null) { builder.field(Fields.COMPOUND, segment.isCompound()); } if (segment.getMergeId() != null) { builder.field(Fields.MERGE_ID, segment.getMergeId()); } builder.endObject(); } builder.endObject(); builder.endObject(); } builder.endArray(); } builder.endObject(); builder.endObject(); } builder.endObject(); return builder; } static final class Fields { static final XContentBuilderString INDICES = new XContentBuilderString("indices"); static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); static final XContentBuilderString STATE = new XContentBuilderString("state"); static final XContentBuilderString PRIMARY = new XContentBuilderString("primary"); static final XContentBuilderString NODE = new XContentBuilderString("node"); static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node"); static final XContentBuilderString SEGMENTS = new XContentBuilderString("segments"); static final XContentBuilderString GENERATION = new XContentBuilderString("generation"); static final XContentBuilderString NUM_COMMITTED_SEGMENTS = new XContentBuilderString("num_committed_segments"); static final XContentBuilderString NUM_SEARCH_SEGMENTS = new XContentBuilderString("num_search_segments"); static final XContentBuilderString NUM_DOCS = new XContentBuilderString("num_docs"); static final XContentBuilderString DELETED_DOCS = new XContentBuilderString("deleted_docs"); static final XContentBuilderString SIZE = new XContentBuilderString("size"); static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes"); static final XContentBuilderString COMMITTED = new XContentBuilderString("committed"); static final XContentBuilderString SEARCH = new XContentBuilderString("search"); static final XContentBuilderString VERSION = new XContentBuilderString("version"); static final XContentBuilderString COMPOUND = new XContentBuilderString("compound"); static final XContentBuilderString MERGE_ID = new XContentBuilderString("merge_id"); static final XContentBuilderString MEMORY = new XContentBuilderString("memory"); static final XContentBuilderString MEMORY_IN_BYTES = new XContentBuilderString("memory_in_bytes"); } }
0true
src_main_java_org_elasticsearch_action_admin_indices_segments_IndicesSegmentResponse.java
446
public static class ProcessStats implements ToXContent, Streamable { int count; int cpuPercent; long totalOpenFileDescriptors; long minOpenFileDescriptors = Long.MAX_VALUE; long maxOpenFileDescriptors = Long.MIN_VALUE; public void addNodeStats(NodeStats nodeStats) { if (nodeStats.getProcess() == null) { return; } count++; if (nodeStats.getProcess().cpu() != null) { // with no sigar, this may not be available cpuPercent += nodeStats.getProcess().cpu().getPercent(); } long fd = nodeStats.getProcess().openFileDescriptors(); if (fd > 0) { // fd can be -1 if not supported on platform totalOpenFileDescriptors += fd; } // we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes. minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd); maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd); } /** * Cpu usage in percentages - 100 is 1 core. */ public int getCpuPercent() { return cpuPercent; } public long getAvgOpenFileDescriptors() { if (count == 0) { return -1; } return totalOpenFileDescriptors / count; } public long getMaxOpenFileDescriptors() { if (count == 0) { return -1; } return maxOpenFileDescriptors; } public long getMinOpenFileDescriptors() { if (count == 0) { return -1; } return minOpenFileDescriptors; } @Override public void readFrom(StreamInput in) throws IOException { count = in.readVInt(); cpuPercent = in.readVInt(); totalOpenFileDescriptors = in.readVLong(); minOpenFileDescriptors = in.readLong(); maxOpenFileDescriptors = in.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(count); out.writeVInt(cpuPercent); out.writeVLong(totalOpenFileDescriptors); out.writeLong(minOpenFileDescriptors); out.writeLong(maxOpenFileDescriptors); } public static ProcessStats readStats(StreamInput in) throws IOException { ProcessStats cpu = new ProcessStats(); cpu.readFrom(in); return cpu; } static final class Fields { static final XContentBuilderString CPU = new XContentBuilderString("cpu"); static final XContentBuilderString PERCENT = new XContentBuilderString("percent"); static final XContentBuilderString OPEN_FILE_DESCRIPTORS = new XContentBuilderString("open_file_descriptors"); static final XContentBuilderString MIN = new XContentBuilderString("min"); static final XContentBuilderString MAX = new XContentBuilderString("max"); static final XContentBuilderString AVG = new XContentBuilderString("avg"); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject(); if (count > 0) { builder.startObject(Fields.OPEN_FILE_DESCRIPTORS); builder.field(Fields.MIN, getMinOpenFileDescriptors()); builder.field(Fields.MAX, getMaxOpenFileDescriptors()); builder.field(Fields.AVG, getAvgOpenFileDescriptors()); builder.endObject(); } return builder; } }
0true
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java
956
public class OMemoryStream extends OutputStream { public static final int DEF_SIZE = 1024; private byte[] buffer; private int position; private static final int NATIVE_COPY_THRESHOLD = 9; private static long metricResize = 0; static { Orient .instance() .getProfiler() .registerHookValue("system.memory.stream.resize", "Number of resizes of memory stream buffer", METRIC_TYPE.COUNTER, new OProfilerHookValue() { public Object getValue() { return metricResize; } }); } public OMemoryStream() { this(DEF_SIZE); } /** * Callee takes ownership of 'buf'. */ public OMemoryStream(final int initialCapacity) { buffer = new byte[initialCapacity]; } public OMemoryStream(byte[] stream) { buffer = stream; } /** * Move bytes left or right of an offset. * * @param iFrom * Starting position * @param iPosition * Offset to the iFrom value: positive values mean move right, otherwise move left */ public void move(final int iFrom, final int iPosition) { if (iPosition == 0) return; final int to = iFrom + iPosition; final int size = iPosition > 0 ? buffer.length - to : buffer.length - iFrom; System.arraycopy(buffer, iFrom, buffer, to, size); } public void copyFrom(final OMemoryStream iSource, final int iSize) { if (iSize < 0) return; assureSpaceFor(position + iSize); System.arraycopy(iSource.buffer, iSource.position, buffer, position, iSize); } public final void writeTo(final OutputStream out) throws IOException { out.write(buffer, 0, position); } public final byte[] getInternalBuffer() { return buffer; } /** * Returns the used buffer as byte[]. * * @return [result.length = size()] */ public final byte[] toByteArray() { if (position == buffer.length - 1) // 100% USED, RETURN THE FULL BUFFER return buffer; final int pos = position; final byte[] destinBuffer = new byte[pos]; final byte[] sourceBuffer = buffer; if (pos < NATIVE_COPY_THRESHOLD) for (int i = 0; i < pos; ++i) destinBuffer[i] = sourceBuffer[i]; else System.arraycopy(sourceBuffer, 0, destinBuffer, 0, pos); return destinBuffer; } /** * Does not reduce the current capacity. */ public final void reset() { position = 0; } // OutputStream: @Override public final void write(final int b) { assureSpaceFor(OBinaryProtocol.SIZE_BYTE); buffer[position++] = (byte) b; } @Override public final void write(final byte[] iBuffer, final int iOffset, final int iLength) { final int pos = position; final int tot = pos + iLength; assureSpaceFor(iLength); final byte[] localBuffer = buffer; if (iLength < NATIVE_COPY_THRESHOLD) for (int i = 0; i < iLength; ++i) localBuffer[pos + i] = iBuffer[iOffset + i]; else System.arraycopy(iBuffer, iOffset, localBuffer, pos, iLength); position = tot; } /** * Equivalent to {@link #reset()}. */ @Override public final void close() { reset(); } public final void setAsFixed(final byte[] iContent) { if (iContent == null) return; write(iContent, 0, iContent.length); } /** * Append byte[] to the stream. * * @param iContent * @return The begin offset of the appended content * @throws IOException */ public int set(final byte[] iContent) { if (iContent == null) return -1; final int begin = position; assureSpaceFor(OBinaryProtocol.SIZE_INT + iContent.length); OBinaryProtocol.int2bytes(iContent.length, buffer, position); position += OBinaryProtocol.SIZE_INT; write(iContent, 0, iContent.length); return begin; } public void remove(final int iBegin, final int iEnd) { if (iBegin > iEnd) throw new IllegalArgumentException("Begin is bigger than end"); if (iEnd > buffer.length) throw new IndexOutOfBoundsException("Position " + iEnd + " is greater than the buffer length (" + buffer.length + ")"); System.arraycopy(buffer, iEnd, buffer, iBegin, buffer.length - iEnd); } public void set(final byte iContent) { write(iContent); } public final int set(final String iContent) { return set(OBinaryProtocol.string2bytes(iContent)); } public int set(final boolean iContent) { final int begin = position; write((byte) (iContent ? 1 : 0)); return begin; } public int set(final char iContent) { assureSpaceFor(OBinaryProtocol.SIZE_CHAR); final int begin = position; OBinaryProtocol.char2bytes(iContent, buffer, position); position += OBinaryProtocol.SIZE_CHAR; return begin; } public int set(final int iContent) { assureSpaceFor(OBinaryProtocol.SIZE_INT); final int begin = position; OBinaryProtocol.int2bytes(iContent, buffer, position); position += OBinaryProtocol.SIZE_INT; return begin; } public int set(final long iContent) { assureSpaceFor(OBinaryProtocol.SIZE_LONG); final int begin = position; OBinaryProtocol.long2bytes(iContent, buffer, position); position += OBinaryProtocol.SIZE_LONG; return begin; } public int set(final short iContent) { assureSpaceFor(OBinaryProtocol.SIZE_SHORT); final int begin = position; OBinaryProtocol.short2bytes(iContent, buffer, position); position += OBinaryProtocol.SIZE_SHORT; return begin; } public int getPosition() { return position; } private void assureSpaceFor(final int iLength) { final byte[] localBuffer = buffer; final int pos = position; final int capacity = position + iLength; final int bufferLength = localBuffer.length; if (bufferLength < capacity) { metricResize++; final byte[] newbuf = new byte[Math.max(bufferLength << 1, capacity)]; if (pos < NATIVE_COPY_THRESHOLD) for (int i = 0; i < pos; ++i) newbuf[i] = localBuffer[i]; else System.arraycopy(localBuffer, 0, newbuf, 0, pos); buffer = newbuf; } } /** * Jumps bytes positioning forward of passed bytes. * * @param iLength * Bytes to jump */ public void fill(final int iLength) { assureSpaceFor(iLength); position += iLength; } /** * Fills the stream from current position writing iLength times the iFiller byte * * @param iLength * Bytes to jump * @param iFiller * Byte to use to fill the space */ public void fill(final int iLength, final byte iFiller) { assureSpaceFor(iLength); Arrays.fill(buffer, position, position + iLength, iFiller); position += iLength; } public OMemoryStream jump(final int iOffset) { if (iOffset > buffer.length) throw new IndexOutOfBoundsException("Offset " + iOffset + " is greater than the buffer size " + buffer.length); position = iOffset; return this; } public byte[] getAsByteArrayFixed(final int iSize) { if (position >= buffer.length) return null; final byte[] portion = OArrays.copyOfRange(buffer, position, position + iSize); position += iSize; return portion; } /** * Browse the stream but just return the begin of the byte array. This is used to lazy load the information only when needed. * */ public int getAsByteArrayOffset() { if (position >= buffer.length) return -1; final int begin = position; final int size = OBinaryProtocol.bytes2int(buffer, position); position += OBinaryProtocol.SIZE_INT + size; return begin; } public int read() { return buffer[position++]; } public int read(final byte[] b) { return read(b, 0, b.length); } public int read(final byte[] b, final int off, final int len) { if (position >= buffer.length) return 0; System.arraycopy(buffer, position, b, off, len); position += len; return len; } public byte[] getAsByteArray(int iOffset) { if (buffer == null || iOffset >= buffer.length) return null; final int size = OBinaryProtocol.bytes2int(buffer, iOffset); if (size == 0) return null; iOffset += OBinaryProtocol.SIZE_INT; return OArrays.copyOfRange(buffer, iOffset, iOffset + size); } public byte[] getAsByteArray() { if (position >= buffer.length) return null; final int size = OBinaryProtocol.bytes2int(buffer, position); position += OBinaryProtocol.SIZE_INT; final byte[] portion = OArrays.copyOfRange(buffer, position, position + size); position += size; return portion; } public String getAsString() { final int size = getVariableSize(); if (size < 0) return null; return OBinaryProtocol.bytes2string(this, size); } public boolean getAsBoolean() { return buffer[position++] == 1; } public char getAsChar() { final char value = OBinaryProtocol.bytes2char(buffer, position); position += OBinaryProtocol.SIZE_CHAR; return value; } public byte getAsByte() { return buffer[position++]; } public long getAsLong() { final long value = OBinaryProtocol.bytes2long(buffer, position); position += OBinaryProtocol.SIZE_LONG; return value; } public int getAsInteger() { final int value = OBinaryProtocol.bytes2int(buffer, position); position += OBinaryProtocol.SIZE_INT; return value; } public short getAsShort() { final short value = OBinaryProtocol.bytes2short(buffer, position); position += OBinaryProtocol.SIZE_SHORT; return value; } public byte peek() { return buffer[position]; } public void setSource(final byte[] iBuffer) { buffer = iBuffer; position = 0; } public byte[] copy() { if (buffer == null) return null; final int size = position > 0 ? position : buffer.length; final byte[] copy = new byte[size]; System.arraycopy(buffer, 0, copy, 0, size); return copy; } public int getVariableSize() { if (position >= buffer.length) return -1; final int size = OBinaryProtocol.bytes2int(buffer, position); position += OBinaryProtocol.SIZE_INT; return size; } public int getSize() { return buffer.length; } public final int size() { return position; } }
0true
core_src_main_java_com_orientechnologies_orient_core_serialization_OMemoryStream.java
1,162
transportServiceServer.registerHandler("benchmark", new BaseTransportRequestHandler<BenchmarkMessageRequest>() { @Override public BenchmarkMessageRequest newInstance() { return new BenchmarkMessageRequest(); } @Override public String executor() { return ThreadPool.Names.GENERIC; } @Override public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new BenchmarkMessageResponse(request)); } });
0true
src_test_java_org_elasticsearch_benchmark_transport_BenchmarkNettyLargeMessages.java
46
public class OCaseInsentiveComparator implements Comparator<String> { public int compare(final String stringOne, final String stringTwo) { return stringOne.compareToIgnoreCase(stringTwo); } }
0true
commons_src_main_java_com_orientechnologies_common_comparator_OCaseInsentiveComparator.java
939
public class RecordOfferUsageActivity extends BaseActivity<CheckoutContext> { /** * Key to retrieve the audits that were persisted */ public static final String SAVED_AUDITS = "savedAudits"; protected static final Log LOG = LogFactory.getLog(RecordOfferUsageActivity.class); @Resource(name="blOfferAuditService") protected OfferAuditService offerAuditService; @Resource(name = "blOfferService") protected OfferService offerService; @Override public CheckoutContext execute(CheckoutContext context) throws Exception { Order order = context.getSeedData().getOrder(); Set<Offer> appliedOffers = offerService.getUniqueOffersFromOrder(order); Map<Offer, OfferCode> offerToCodeMapping = offerService.getOffersRetrievedFromCodes(order.getAddedOfferCodes(), appliedOffers); List<OfferAudit> audits = saveOfferIds(appliedOffers, offerToCodeMapping, order); Map<String, Object> state = new HashMap<String, Object>(); state.put(SAVED_AUDITS, audits); ActivityStateManagerImpl.getStateManager().registerState(this, context, getRollbackHandler(), state); return context; } /** * Persists each of the offers to the database as {@link OfferAudit}s. * * @return the {@link OfferAudit}s that were persisted */ protected List<OfferAudit> saveOfferIds(Set<Offer> offers, Map<Offer, OfferCode> offerToCodeMapping, Order order) { List<OfferAudit> audits = new ArrayList<OfferAudit>(offers.size()); for (Offer offer : offers) { OfferAudit audit = offerAuditService.create(); audit.setCustomerId(order.getCustomer().getId()); audit.setOfferId(offer.getId()); audit.setOrderId(order.getId()); //add the code that was used to obtain the offer to the audit context try { OfferCode codeUsedToRetrieveOffer = offerToCodeMapping.get(offer); if (codeUsedToRetrieveOffer != null) { audit.setOfferCodeId(codeUsedToRetrieveOffer.getId()); } } catch (UnsupportedOperationException e) { LOG.warn("Checking for offer code max usage has not been enabled in your Broadleaf installation. This warning" + " will only appear in the Broadleaf 3.0 line, versions 3.0.6-GA and above. In order to fix your" + " version of Broadleaf to enable this functionality, refer to the OfferAuditWeaveImpl or directly to" + " https://github.com/BroadleafCommerce/BroadleafCommerce/pull/195."); } audit.setRedeemedDate(SystemTime.asDate()); audit = offerAuditService.save(audit); audits.add(audit); } return audits; } }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_workflow_RecordOfferUsageActivity.java
1,879
boolean b = h1.executeTransaction(options, new TransactionalTask<Boolean>() { public Boolean execute(TransactionalTaskContext context) throws TransactionException { final TransactionalMap<Object, Object> txMap = context.getMap("default"); txMap.put("3", "3"); assertEquals(3, txMap.values().size()); map2.put("4", "4"); assertEquals(4, txMap.values().size()); txMap.remove("1"); assertEquals(3, txMap.values().size()); map2.remove("2"); assertEquals(2, txMap.values().size()); assertEquals(2, txMap.size()); txMap.put("12", "32"); assertEquals(2, map2.values().size()); return true; } });
0true
hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java
38
public class ConcurrentHashMapV8<K,V> extends AbstractMap<K,V> implements ConcurrentMap<K,V>, Serializable { private static final long serialVersionUID = 7249069246763182397L; /** * An object for traversing and partitioning elements of a source. * This interface provides a subset of the functionality of JDK8 * java.util.Spliterator. */ public static interface ConcurrentHashMapSpliterator<T> { /** * If possible, returns a new spliterator covering * approximately one half of the elements, which will not be * covered by this spliterator. Returns null if cannot be * split. */ ConcurrentHashMapSpliterator<T> trySplit(); /** * Returns an estimate of the number of elements covered by * this Spliterator. */ long estimateSize(); /** Applies the action to each untraversed element */ void forEachRemaining(Action<? super T> action); /** If an element remains, applies the action and returns true. */ boolean tryAdvance(Action<? super T> action); } // Sams /** Interface describing a void action of one argument */ public interface Action<A> { void apply(A a); } /** Interface describing a void action of two arguments */ public interface BiAction<A,B> { void apply(A a, B b); } /** Interface describing a function of one argument */ public interface Fun<A,T> { T apply(A a); } /** Interface describing a function of two arguments */ public interface BiFun<A,B,T> { T apply(A a, B b); } /** Interface describing a function mapping its argument to a double */ public interface ObjectToDouble<A> { double apply(A a); } /** Interface describing a function mapping its argument to a long */ public interface ObjectToLong<A> { long apply(A a); } /** Interface describing a function mapping its argument to an int */ public interface ObjectToInt<A> {int apply(A a); } /** Interface describing a function mapping two arguments to a double */ public interface ObjectByObjectToDouble<A,B> { double apply(A a, B b); } /** Interface describing a function mapping two arguments to a long */ public interface ObjectByObjectToLong<A,B> { long apply(A a, B b); } /** Interface describing a function mapping two arguments to an int */ public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); } /** Interface describing a function mapping two doubles to a double */ public interface DoubleByDoubleToDouble { double apply(double a, double b); } /** Interface describing a function mapping two longs to a long */ public interface LongByLongToLong { long apply(long a, long b); } /** Interface describing a function mapping two ints to an int */ public interface IntByIntToInt { int apply(int a, int b); } /* * Overview: * * The primary design goal of this hash table is to maintain * concurrent readability (typically method get(), but also * iterators and related methods) while minimizing update * contention. Secondary goals are to keep space consumption about * the same or better than java.util.HashMap, and to support high * initial insertion rates on an empty table by many threads. * * This map usually acts as a binned (bucketed) hash table. Each * key-value mapping is held in a Node. Most nodes are instances * of the basic Node class with hash, key, value, and next * fields. However, various subclasses exist: TreeNodes are * arranged in balanced trees, not lists. TreeBins hold the roots * of sets of TreeNodes. ForwardingNodes are placed at the heads * of bins during resizing. ReservationNodes are used as * placeholders while establishing values in computeIfAbsent and * related methods. The types TreeBin, ForwardingNode, and * ReservationNode do not hold normal user keys, values, or * hashes, and are readily distinguishable during search etc * because they have negative hash fields and null key and value * fields. (These special nodes are either uncommon or transient, * so the impact of carrying around some unused fields is * insignificant.) * * The table is lazily initialized to a power-of-two size upon the * first insertion. Each bin in the table normally contains a * list of Nodes (most often, the list has only zero or one Node). * Table accesses require volatile/atomic reads, writes, and * CASes. Because there is no other way to arrange this without * adding further indirections, we use intrinsics * (sun.misc.Unsafe) operations. * * We use the top (sign) bit of Node hash fields for control * purposes -- it is available anyway because of addressing * constraints. Nodes with negative hash fields are specially * handled or ignored in map methods. * * Insertion (via put or its variants) of the first node in an * empty bin is performed by just CASing it to the bin. This is * by far the most common case for put operations under most * key/hash distributions. Other update operations (insert, * delete, and replace) require locks. We do not want to waste * the space required to associate a distinct lock object with * each bin, so instead use the first node of a bin list itself as * a lock. Locking support for these locks relies on builtin * "synchronized" monitors. * * Using the first node of a list as a lock does not by itself * suffice though: When a node is locked, any update must first * validate that it is still the first node after locking it, and * retry if not. Because new nodes are always appended to lists, * once a node is first in a bin, it remains first until deleted * or the bin becomes invalidated (upon resizing). * * The main disadvantage of per-bin locks is that other update * operations on other nodes in a bin list protected by the same * lock can stall, for example when user equals() or mapping * functions take a long time. However, statistically, under * random hash codes, this is not a common problem. Ideally, the * frequency of nodes in bins follows a Poisson distribution * (http://en.wikipedia.org/wiki/Poisson_distribution) with a * parameter of about 0.5 on average, given the resizing threshold * of 0.75, although with a large variance because of resizing * granularity. Ignoring variance, the expected occurrences of * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The * first values are: * * 0: 0.60653066 * 1: 0.30326533 * 2: 0.07581633 * 3: 0.01263606 * 4: 0.00157952 * 5: 0.00015795 * 6: 0.00001316 * 7: 0.00000094 * 8: 0.00000006 * more: less than 1 in ten million * * Lock contention probability for two threads accessing distinct * elements is roughly 1 / (8 * #elements) under random hashes. * * Actual hash code distributions encountered in practice * sometimes deviate significantly from uniform randomness. This * includes the case when N > (1<<30), so some keys MUST collide. * Similarly for dumb or hostile usages in which multiple keys are * designed to have identical hash codes or ones that differs only * in masked-out high bits. So we use a secondary strategy that * applies when the number of nodes in a bin exceeds a * threshold. These TreeBins use a balanced tree to hold nodes (a * specialized form of red-black trees), bounding search time to * O(log N). Each search step in a TreeBin is at least twice as * slow as in a regular list, but given that N cannot exceed * (1<<64) (before running out of addresses) this bounds search * steps, lock hold times, etc, to reasonable constants (roughly * 100 nodes inspected per operation worst case) so long as keys * are Comparable (which is very common -- String, Long, etc). * TreeBin nodes (TreeNodes) also maintain the same "next" * traversal pointers as regular nodes, so can be traversed in * iterators in the same way. * * The table is resized when occupancy exceeds a percentage * threshold (nominally, 0.75, but see below). Any thread * noticing an overfull bin may assist in resizing after the * initiating thread allocates and sets up the replacement array. * However, rather than stalling, these other threads may proceed * with insertions etc. The use of TreeBins shields us from the * worst case effects of overfilling while resizes are in * progress. Resizing proceeds by transferring bins, one by one, * from the table to the next table. However, threads claim small * blocks of indices to transfer (via field transferIndex) before * doing so, reducing contention. A generation stamp in field * sizeCtl ensures that resizings do not overlap. Because we are * using power-of-two expansion, the elements from each bin must * either stay at same index, or move with a power of two * offset. We eliminate unnecessary node creation by catching * cases where old nodes can be reused because their next fields * won't change. On average, only about one-sixth of them need * cloning when a table doubles. The nodes they replace will be * garbage collectable as soon as they are no longer referenced by * any reader thread that may be in the midst of concurrently * traversing table. Upon transfer, the old table bin contains * only a special forwarding node (with hash field "MOVED") that * contains the next table as its key. On encountering a * forwarding node, access and update operations restart, using * the new table. * * Each bin transfer requires its bin lock, which can stall * waiting for locks while resizing. However, because other * threads can join in and help resize rather than contend for * locks, average aggregate waits become shorter as resizing * progresses. The transfer operation must also ensure that all * accessible bins in both the old and new table are usable by any * traversal. This is arranged in part by proceeding from the * last bin (table.length - 1) up towards the first. Upon seeing * a forwarding node, traversals (see class Traverser) arrange to * move to the new table without revisiting nodes. To ensure that * no intervening nodes are skipped even when moved out of order, * a stack (see class TableStack) is created on first encounter of * a forwarding node during a traversal, to maintain its place if * later processing the current table. The need for these * save/restore mechanics is relatively rare, but when one * forwarding node is encountered, typically many more will be. * So Traversers use a simple caching scheme to avoid creating so * many new TableStack nodes. (Thanks to Peter Levart for * suggesting use of a stack here.) * * The traversal scheme also applies to partial traversals of * ranges of bins (via an alternate Traverser constructor) * to support partitioned aggregate operations. Also, read-only * operations give up if ever forwarded to a null table, which * provides support for shutdown-style clearing, which is also not * currently implemented. * * Lazy table initialization minimizes footprint until first use, * and also avoids resizings when the first operation is from a * putAll, constructor with map argument, or deserialization. * These cases attempt to override the initial capacity settings, * but harmlessly fail to take effect in cases of races. * * The element count is maintained using a specialization of * LongAdder. We need to incorporate a specialization rather than * just use a LongAdder in order to access implicit * contention-sensing that leads to creation of multiple * CounterCells. The counter mechanics avoid contention on * updates but can encounter cache thrashing if read too * frequently during concurrent access. To avoid reading so often, * resizing under contention is attempted only upon adding to a * bin already holding two or more nodes. Under uniform hash * distributions, the probability of this occurring at threshold * is around 13%, meaning that only about 1 in 8 puts check * threshold (and after resizing, many fewer do so). * * TreeBins use a special form of comparison for search and * related operations (which is the main reason we cannot use * existing collections such as TreeMaps). TreeBins contain * Comparable elements, but may contain others, as well as * elements that are Comparable but not necessarily Comparable for * the same T, so we cannot invoke compareTo among them. To handle * this, the tree is ordered primarily by hash value, then by * Comparable.compareTo order if applicable. On lookup at a node, * if elements are not comparable or compare as 0 then both left * and right children may need to be searched in the case of tied * hash values. (This corresponds to the full list search that * would be necessary if all elements were non-Comparable and had * tied hashes.) On insertion, to keep a total ordering (or as * close as is required here) across rebalancings, we compare * classes and identityHashCodes as tie-breakers. The red-black * balancing code is updated from pre-jdk-collections * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) * based in turn on Cormen, Leiserson, and Rivest "Introduction to * Algorithms" (CLR). * * TreeBins also require an additional locking mechanism. While * list traversal is always possible by readers even during * updates, tree traversal is not, mainly because of tree-rotations * that may change the root node and/or its linkages. TreeBins * include a simple read-write lock mechanism parasitic on the * main bin-synchronization strategy: Structural adjustments * associated with an insertion or removal are already bin-locked * (and so cannot conflict with other writers) but must wait for * ongoing readers to finish. Since there can be only one such * waiter, we use a simple scheme using a single "waiter" field to * block writers. However, readers need never block. If the root * lock is held, they proceed along the slow traversal path (via * next-pointers) until the lock becomes available or the list is * exhausted, whichever comes first. These cases are not fast, but * maximize aggregate expected throughput. * * Maintaining API and serialization compatibility with previous * versions of this class introduces several oddities. Mainly: We * leave untouched but unused constructor arguments refering to * concurrencyLevel. We accept a loadFactor constructor argument, * but apply it only to initial table capacity (which is the only * time that we can guarantee to honor it.) We also declare an * unused "Segment" class that is instantiated in minimal form * only when serializing. * * Also, solely for compatibility with previous versions of this * class, it extends AbstractMap, even though all of its methods * are overridden, so it is just useless baggage. * * This file is organized to make things a little easier to follow * while reading than they might otherwise: First the main static * declarations and utilities, then fields, then main public * methods (with a few factorings of multiple public methods into * internal ones), then sizing methods, trees, traversers, and * bulk operations. */ /* ---------------- Constants -------------- */ /** * The largest possible table capacity. This value must be * exactly 1<<30 to stay within Java array allocation and indexing * bounds for power of two table sizes, and is further required * because the top two bits of 32bit hash fields are used for * control purposes. */ private static final int MAXIMUM_CAPACITY = 1 << 30; /** * The default initial table capacity. Must be a power of 2 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. */ private static final int DEFAULT_CAPACITY = 16; /** * The largest possible (non-power of two) array size. * Needed by toArray and related methods. */ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** * The default concurrency level for this table. Unused but * defined for compatibility with previous versions of this class. */ private static final int DEFAULT_CONCURRENCY_LEVEL = 16; /** * The load factor for this table. Overrides of this value in * constructors affect only the initial table capacity. The * actual floating point value isn't normally used -- it is * simpler to use expressions such as {@code n - (n >>> 2)} for * the associated resizing threshold. */ private static final float LOAD_FACTOR = 0.75f; /** * The bin count threshold for using a tree rather than list for a * bin. Bins are converted to trees when adding an element to a * bin with at least this many nodes. The value must be greater * than 2, and should be at least 8 to mesh with assumptions in * tree removal about conversion back to plain bins upon * shrinkage. */ static final int TREEIFY_THRESHOLD = 8; /** * The bin count threshold for untreeifying a (split) bin during a * resize operation. Should be less than TREEIFY_THRESHOLD, and at * most 6 to mesh with shrinkage detection under removal. */ static final int UNTREEIFY_THRESHOLD = 6; /** * The smallest table capacity for which bins may be treeified. * (Otherwise the table is resized if too many nodes in a bin.) * The value should be at least 4 * TREEIFY_THRESHOLD to avoid * conflicts between resizing and treeification thresholds. */ static final int MIN_TREEIFY_CAPACITY = 64; /** * Minimum number of rebinnings per transfer step. Ranges are * subdivided to allow multiple resizer threads. This value * serves as a lower bound to avoid resizers encountering * excessive memory contention. The value should be at least * DEFAULT_CAPACITY. */ private static final int MIN_TRANSFER_STRIDE = 16; /** * The number of bits used for generation stamp in sizeCtl. * Must be at least 6 for 32bit arrays. */ private static int RESIZE_STAMP_BITS = 16; /** * The maximum number of threads that can help resize. * Must fit in 32 - RESIZE_STAMP_BITS bits. */ private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; /** * The bit shift for recording size stamp in sizeCtl. */ private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; /* * Encodings for Node hash fields. See above for explanation. */ static final int MOVED = -1; // hash for forwarding nodes static final int TREEBIN = -2; // hash for roots of trees static final int RESERVED = -3; // hash for transient reservations static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash /** Number of CPUS, to place bounds on some sizings */ static final int NCPU = Runtime.getRuntime().availableProcessors(); /** For serialization compatibility. */ private static final ObjectStreamField[] serialPersistentFields = { new ObjectStreamField("segments", Segment[].class), new ObjectStreamField("segmentMask", Integer.TYPE), new ObjectStreamField("segmentShift", Integer.TYPE) }; /* ---------------- Nodes -------------- */ /** * Key-value entry. This class is never exported out as a * user-mutable Map.Entry (i.e., one supporting setValue; see * MapEntry below), but can be used for read-only traversals used * in bulk tasks. Subclasses of Node with a negative hash field * are special, and contain null keys and values (but are never * exported). Otherwise, keys and vals are never null. */ static class Node<K,V> implements Map.Entry<K,V> { final int hash; final K key; volatile V val; volatile Node<K,V> next; Node(int hash, K key, V val, Node<K,V> next) { this.hash = hash; this.key = key; this.val = val; this.next = next; } public final K getKey() { return key; } public final V getValue() { return val; } public final int hashCode() { return key.hashCode() ^ val.hashCode(); } public final String toString(){ return key + "=" + val; } public final V setValue(V value) { throw new UnsupportedOperationException(); } public final boolean equals(Object o) { Object k, v, u; Map.Entry<?,?> e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry<?,?>)o).getKey()) != null && (v = e.getValue()) != null && (k == key || k.equals(key)) && (v == (u = val) || v.equals(u))); } /** * Virtualized support for map.get(); overridden in subclasses. */ Node<K,V> find(int h, Object k) { Node<K,V> e = this; if (k != null) { do { K ek; if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; } while ((e = e.next) != null); } return null; } } /* ---------------- Static utilities -------------- */ /** * Spreads (XORs) higher bits of hash to lower and also forces top * bit to 0. Because the table uses power-of-two masking, sets of * hashes that vary only in bits above the current mask will * always collide. (Among known examples are sets of Float keys * holding consecutive whole numbers in small tables.) So we * apply a transform that spreads the impact of higher bits * downward. There is a tradeoff between speed, utility, and * quality of bit-spreading. Because many common sets of hashes * are already reasonably distributed (so don't benefit from * spreading), and because we use trees to handle large sets of * collisions in bins, we just XOR some shifted bits in the * cheapest possible way to reduce systematic lossage, as well as * to incorporate impact of the highest bits that would otherwise * never be used in index calculations because of table bounds. */ static final int spread(int h) { return (h ^ (h >>> 16)) & HASH_BITS; } /** * Returns a power of two table size for the given desired capacity. * See Hackers Delight, sec 3.2 */ private static final int tableSizeFor(int c) { int n = c - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; } /** * Returns x's Class if it is of the form "class C implements * Comparable<C>", else null. */ static Class<?> comparableClassFor(Object x) { if (x instanceof Comparable) { Class<?> c; Type[] ts, as; Type t; ParameterizedType p; if ((c = x.getClass()) == String.class) // bypass checks return c; if ((ts = c.getGenericInterfaces()) != null) { for (int i = 0; i < ts.length; ++i) { if (((t = ts[i]) instanceof ParameterizedType) && ((p = (ParameterizedType)t).getRawType() == Comparable.class) && (as = p.getActualTypeArguments()) != null && as.length == 1 && as[0] == c) // type arg is c return c; } } } return null; } /** * Returns k.compareTo(x) if x matches kc (k's screened comparable * class), else 0. */ @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable static int compareComparables(Class<?> kc, Object k, Object x) { return (x == null || x.getClass() != kc ? 0 : ((Comparable)k).compareTo(x)); } /* ---------------- Table element access -------------- */ /* * Volatile access methods are used for table elements as well as * elements of in-progress next table while resizing. All uses of * the tab arguments must be null checked by callers. All callers * also paranoically precheck that tab's length is not zero (or an * equivalent check), thus ensuring that any index argument taking * the form of a hash value anded with (length - 1) is a valid * index. Note that, to be correct wrt arbitrary concurrency * errors by users, these checks must operate on local variables, * which accounts for some odd-looking inline assignments below. * Note that calls to setTabAt always occur within locked regions, * and so in principle require only release ordering, not * full volatile semantics, but are currently coded as volatile * writes to be conservative. */ @SuppressWarnings("unchecked") static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); } static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v) { return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); } static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); } /* ---------------- Fields -------------- */ /** * The array of bins. Lazily initialized upon first insertion. * Size is always a power of two. Accessed directly by iterators. */ transient volatile Node<K,V>[] table; /** * The next table to use; non-null only while resizing. */ private transient volatile Node<K,V>[] nextTable; /** * Base counter value, used mainly when there is no contention, * but also as a fallback during table initialization * races. Updated via CAS. */ private transient volatile long baseCount; /** * Table initialization and resizing control. When negative, the * table is being initialized or resized: -1 for initialization, * else -(1 + the number of active resizing threads). Otherwise, * when table is null, holds the initial table size to use upon * creation, or 0 for default. After initialization, holds the * next element count value upon which to resize the table. */ private transient volatile int sizeCtl; /** * The next table index (plus one) to split while resizing. */ private transient volatile int transferIndex; /** * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. */ private transient volatile int cellsBusy; /** * Table of counter cells. When non-null, size is a power of 2. */ private transient volatile CounterCell[] counterCells; // views private transient KeySetView<K,V> keySet; private transient ValuesView<K,V> values; private transient EntrySetView<K,V> entrySet; /* ---------------- Public operations -------------- */ /** * Creates a new, empty map with the default initial table size (16). */ public ConcurrentHashMapV8() { } /** * Creates a new, empty map with an initial table size * accommodating the specified number of elements without the need * to dynamically resize. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative */ public ConcurrentHashMapV8(int initialCapacity) { if (initialCapacity < 0) throw new IllegalArgumentException(); int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); this.sizeCtl = cap; } /** * Creates a new map with the same mappings as the given map. * * @param m the map */ public ConcurrentHashMapV8(Map<? extends K, ? extends V> m) { this.sizeCtl = DEFAULT_CAPACITY; putAll(m); } /** * Creates a new, empty map with an initial table size based on * the given number of elements ({@code initialCapacity}) and * initial table density ({@code loadFactor}). * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadFactor the load factor (table density) for * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, 1); } /** * Creates a new, empty map with an initial table size based on * the given number of elements ({@code initialCapacity}), table * density ({@code loadFactor}), and number of concurrently * updating threads ({@code concurrencyLevel}). * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadFactor the load factor (table density) for * establishing the initial table size * @param concurrencyLevel the estimated number of concurrently * updating threads. The implementation may use this value as * a sizing hint. * @throws IllegalArgumentException if the initial capacity is * negative or the load factor or concurrencyLevel are * nonpositive */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor, int concurrencyLevel) { if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) throw new IllegalArgumentException(); if (initialCapacity < concurrencyLevel) // Use at least as many bins initialCapacity = concurrencyLevel; // as estimated threads long size = (long)(1.0 + (long)initialCapacity / loadFactor); int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size); this.sizeCtl = cap; } // Original (since JDK1.2) Map methods /** * {@inheritDoc} */ public int size() { long n = sumCount(); return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n); } /** * {@inheritDoc} */ public boolean isEmpty() { return sumCount() <= 0L; // ignore transient negative values } /** * Returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>More formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (There can be at most one such mapping.) * * @throws NullPointerException if the specified key is null */ public V get(Object key) { Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; int h = spread(key.hashCode()); if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) { if ((eh = e.hash) == h) { if ((ek = e.key) == key || (ek != null && key.equals(ek))) return e.val; } else if (eh < 0) return (p = e.find(h, key)) != null ? p.val : null; while ((e = e.next) != null) { if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) return e.val; } } return null; } /** * Tests if the specified object is a key in this table. * * @param key possible key * @return {@code true} if and only if the specified object * is a key in this table, as determined by the * {@code equals} method; {@code false} otherwise * @throws NullPointerException if the specified key is null */ public boolean containsKey(Object key) { return get(key) != null; } /** * Returns {@code true} if this map maps one or more keys to the * specified value. Note: This method may require a full traversal * of the map, and is much slower than method {@code containsKey}. * * @param value value whose presence in this map is to be tested * @return {@code true} if this map maps one or more keys to the * specified value * @throws NullPointerException if the specified value is null */ public boolean containsValue(Object value) { if (value == null) throw new NullPointerException(); Node<K,V>[] t; if ((t = table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) { V v; if ((v = p.val) == value || (v != null && value.equals(v))) return true; } } return false; } /** * Maps the specified key to the specified value in this table. * Neither the key nor the value can be null. * * <p>The value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ public V put(K key, V value) { return putVal(key, value, false); } /** Implementation for put and putIfAbsent */ final V putVal(K key, V value, boolean onlyIfAbsent) { if (key == null || value == null) throw new NullPointerException(); int hash = spread(key.hashCode()); int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = initTable(); else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null))) break; // no lock when adding to empty bin } else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { V oldVal = null; synchronized (f) { if (tabAt(tab, i) == f) { if (fh >= 0) { binCount = 1; for (Node<K,V> e = f;; ++binCount) { K ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldVal = e.val; if (!onlyIfAbsent) e.val = value; break; } Node<K,V> pred = e; if ((e = e.next) == null) { pred.next = new Node<K,V>(hash, key, value, null); break; } } } else if (f instanceof TreeBin) { Node<K,V> p; binCount = 2; if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, value)) != null) { oldVal = p.val; if (!onlyIfAbsent) p.val = value; } } } } if (binCount != 0) { if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); if (oldVal != null) return oldVal; break; } } } addCount(1L, binCount); return null; } /** * Copies all of the mappings from the specified map to this one. * These mappings replace any mappings that this map had for any of the * keys currently in the specified map. * * @param m mappings to be stored in this map */ public void putAll(Map<? extends K, ? extends V> m) { tryPresize(m.size()); for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) putVal(e.getKey(), e.getValue(), false); } /** * Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * * @param key the key that needs to be removed * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key is null */ public V remove(Object key) { return replaceNode(key, null, null); } /** * Implementation for the four public remove/replace methods: * Replaces node value with v, conditional upon match of cv if * non-null. If resulting value is null, delete. */ final V replaceNode(Object key, V value, Object cv) { int hash = spread(key.hashCode()); for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; if (tab == null || (n = tab.length) == 0 || (f = tabAt(tab, i = (n - 1) & hash)) == null) break; else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { V oldVal = null; boolean validated = false; synchronized (f) { if (tabAt(tab, i) == f) { if (fh >= 0) { validated = true; for (Node<K,V> e = f, pred = null;;) { K ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { V ev = e.val; if (cv == null || cv == ev || (ev != null && cv.equals(ev))) { oldVal = ev; if (value != null) e.val = value; else if (pred != null) pred.next = e.next; else setTabAt(tab, i, e.next); } break; } pred = e; if ((e = e.next) == null) break; } } else if (f instanceof TreeBin) { validated = true; TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> r, p; if ((r = t.root) != null && (p = r.findTreeNode(hash, key, null)) != null) { V pv = p.val; if (cv == null || cv == pv || (pv != null && cv.equals(pv))) { oldVal = pv; if (value != null) p.val = value; else if (t.removeTreeNode(p)) setTabAt(tab, i, untreeify(t.first)); } } } } } if (validated) { if (oldVal != null) { if (value == null) addCount(-1L, -1); return oldVal; } break; } } } return null; } /** * Removes all of the mappings from this map. */ public void clear() { long delta = 0L; // negative number of deletions int i = 0; Node<K,V>[] tab = table; while (tab != null && i < tab.length) { int fh; Node<K,V> f = tabAt(tab, i); if (f == null) ++i; else if ((fh = f.hash) == MOVED) { tab = helpTransfer(tab, f); i = 0; // restart } else { synchronized (f) { if (tabAt(tab, i) == f) { Node<K,V> p = (fh >= 0 ? f : (f instanceof TreeBin) ? ((TreeBin<K,V>)f).first : null); while (p != null) { --delta; p = p.next; } setTabAt(tab, i++, null); } } } } if (delta != 0L) addCount(delta, -1); } /** * Returns a {@link Set} view of the keys contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. The set supports element * removal, which removes the corresponding mapping from this map, * via the {@code Iterator.remove}, {@code Set.remove}, * {@code removeAll}, {@code retainAll}, and {@code clear} * operations. It does not support the {@code add} or * {@code addAll} operations. * * <p>The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return the set view */ public KeySetView<K,V> keySet() { KeySetView<K,V> ks; return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null)); } /** * Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are * reflected in the collection, and vice-versa. The collection * supports element removal, which removes the corresponding * mapping from this map, via the {@code Iterator.remove}, * {@code Collection.remove}, {@code removeAll}, * {@code retainAll}, and {@code clear} operations. It does not * support the {@code add} or {@code addAll} operations. * * <p>The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return the collection view */ public Collection<V> values() { ValuesView<K,V> vs; return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this)); } /** * Returns a {@link Set} view of the mappings contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. The set supports element * removal, which removes the corresponding mapping from the map, * via the {@code Iterator.remove}, {@code Set.remove}, * {@code removeAll}, {@code retainAll}, and {@code clear} * operations. * * <p>The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. * * @return the set view */ public Set<Map.Entry<K,V>> entrySet() { EntrySetView<K,V> es; return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this)); } /** * Returns the hash code value for this {@link Map}, i.e., * the sum of, for each key-value pair in the map, * {@code key.hashCode() ^ value.hashCode()}. * * @return the hash code value for this map */ public int hashCode() { int h = 0; Node<K,V>[] t; if ((t = table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) h += p.key.hashCode() ^ p.val.hashCode(); } return h; } /** * Returns a string representation of this map. The string * representation consists of a list of key-value mappings (in no * particular order) enclosed in braces ("{@code {}}"). Adjacent * mappings are separated by the characters {@code ", "} (comma * and space). Each key-value mapping is rendered as the key * followed by an equals sign ("{@code =}") followed by the * associated value. * * @return a string representation of this map */ public String toString() { Node<K,V>[] t; int f = (t = table) == null ? 0 : t.length; Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); StringBuilder sb = new StringBuilder(); sb.append('{'); Node<K,V> p; if ((p = it.advance()) != null) { for (;;) { K k = p.key; V v = p.val; sb.append(k == this ? "(this Map)" : k); sb.append('='); sb.append(v == this ? "(this Map)" : v); if ((p = it.advance()) == null) break; sb.append(',').append(' '); } } return sb.append('}').toString(); } /** * Compares the specified object with this map for equality. * Returns {@code true} if the given object is a map with the same * mappings as this map. This operation may return misleading * results if either map is concurrently modified during execution * of this method. * * @param o object to be compared for equality with this map * @return {@code true} if the specified object is equal to this map */ public boolean equals(Object o) { if (o != this) { if (!(o instanceof Map)) return false; Map<?,?> m = (Map<?,?>) o; Node<K,V>[] t; int f = (t = table) == null ? 0 : t.length; Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); for (Node<K,V> p; (p = it.advance()) != null; ) { V val = p.val; Object v = m.get(p.key); if (v == null || (v != val && !v.equals(val))) return false; } for (Map.Entry<?,?> e : m.entrySet()) { Object mk, mv, v; if ((mk = e.getKey()) == null || (mv = e.getValue()) == null || (v = get(mk)) == null || (mv != v && !mv.equals(v))) return false; } } return true; } /** * Stripped-down version of helper class used in previous version, * declared for the sake of serialization compatibility */ static class Segment<K,V> extends ReentrantLock implements Serializable { private static final long serialVersionUID = 2249069246763182397L; final float loadFactor; Segment(float lf) { this.loadFactor = lf; } } /** * Saves the state of the {@code ConcurrentHashMapV8} instance to a * stream (i.e., serializes it). * @param s the stream * @throws java.io.IOException if an I/O error occurs * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. * The key-value mappings are emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { // For serialization compatibility // Emulate segment calculation from previous version of this class int sshift = 0; int ssize = 1; while (ssize < DEFAULT_CONCURRENCY_LEVEL) { ++sshift; ssize <<= 1; } int segmentShift = 32 - sshift; int segmentMask = ssize - 1; @SuppressWarnings("unchecked") Segment<K,V>[] segments = (Segment<K,V>[]) new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; for (int i = 0; i < segments.length; ++i) segments[i] = new Segment<K,V>(LOAD_FACTOR); s.putFields().put("segments", segments); s.putFields().put("segmentShift", segmentShift); s.putFields().put("segmentMask", segmentMask); s.writeFields(); Node<K,V>[] t; if ((t = table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) { s.writeObject(p.key); s.writeObject(p.val); } } s.writeObject(null); s.writeObject(null); segments = null; // throw away } /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream * @throws ClassNotFoundException if the class of a serialized object * could not be found * @throws java.io.IOException if an I/O error occurs */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { /* * To improve performance in typical cases, we create nodes * while reading, then place in table once size is known. * However, we must also validate uniqueness and deal with * overpopulated bins while doing so, which requires * specialized versions of putVal mechanics. */ sizeCtl = -1; // force exclusion for table construction s.defaultReadObject(); long size = 0L; Node<K,V> p = null; for (;;) { @SuppressWarnings("unchecked") K k = (K) s.readObject(); @SuppressWarnings("unchecked") V v = (V) s.readObject(); if (k != null && v != null) { p = new Node<K,V>(spread(k.hashCode()), k, v, p); ++size; } else break; } if (size == 0L) sizeCtl = 0; else { int n; if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) n = MAXIMUM_CAPACITY; else { int sz = (int)size; n = tableSizeFor(sz + (sz >>> 1) + 1); } @SuppressWarnings("unchecked") Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; int mask = n - 1; long added = 0L; while (p != null) { boolean insertAtFront; Node<K,V> next = p.next, first; int h = p.hash, j = h & mask; if ((first = tabAt(tab, j)) == null) insertAtFront = true; else { K k = p.key; if (first.hash < 0) { TreeBin<K,V> t = (TreeBin<K,V>)first; if (t.putTreeVal(h, k, p.val) == null) ++added; insertAtFront = false; } else { int binCount = 0; insertAtFront = true; Node<K,V> q; K qk; for (q = first; q != null; q = q.next) { if (q.hash == h && ((qk = q.key) == k || (qk != null && k.equals(qk)))) { insertAtFront = false; break; } ++binCount; } if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { insertAtFront = false; ++added; p.next = first; TreeNode<K,V> hd = null, tl = null; for (q = p; q != null; q = q.next) { TreeNode<K,V> t = new TreeNode<K,V> (q.hash, q.key, q.val, null, null); if ((t.prev = tl) == null) hd = t; else tl.next = t; tl = t; } setTabAt(tab, j, new TreeBin<K,V>(hd)); } } } if (insertAtFront) { ++added; p.next = first; setTabAt(tab, j, p); } p = next; } table = tab; sizeCtl = n - (n >>> 2); baseCount = added; } } // ConcurrentMap methods /** * {@inheritDoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ public V putIfAbsent(K key, V value) { return putVal(key, value, true); } /** * {@inheritDoc} * * @throws NullPointerException if the specified key is null */ public boolean remove(Object key, Object value) { if (key == null) throw new NullPointerException(); return value != null && replaceNode(key, null, value) != null; } /** * {@inheritDoc} * * @throws NullPointerException if any of the arguments are null */ public boolean replace(K key, V oldValue, V newValue) { if (key == null || oldValue == null || newValue == null) throw new NullPointerException(); return replaceNode(key, newValue, oldValue) != null; } /** * {@inheritDoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ public V replace(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return replaceNode(key, value, null); } // Overrides of JDK8+ Map extension method defaults /** * Returns the value to which the specified key is mapped, or the * given default value if this map contains no mapping for the * key. * * @param key the key whose associated value is to be returned * @param defaultValue the value to return if this map contains * no mapping for the given key * @return the mapping for the key, if present; else the default value * @throws NullPointerException if the specified key is null */ public V getOrDefault(Object key, V defaultValue) { V v; return (v = get(key)) == null ? defaultValue : v; } public void forEach(BiAction<? super K, ? super V> action) { if (action == null) throw new NullPointerException(); Node<K,V>[] t; if ((t = table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) { action.apply(p.key, p.val); } } } public void replaceAll(BiFun<? super K, ? super V, ? extends V> function) { if (function == null) throw new NullPointerException(); Node<K,V>[] t; if ((t = table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) { V oldValue = p.val; for (K key = p.key;;) { V newValue = function.apply(key, oldValue); if (newValue == null) throw new NullPointerException(); if (replaceNode(key, newValue, oldValue) != null || (oldValue = get(key)) == null) break; } } } } /** * If the specified key is not already associated with a value, * attempts to compute its value using the given mapping function * and enters it into this map unless {@code null}. The entire * method invocation is performed atomically, so the function is * applied at most once per key. Some attempted update operations * on this map by other threads may be blocked while computation * is in progress, so the computation should be short and simple, * and must not attempt to update any other mappings of this map. * * @param key key with which the specified value is to be associated * @param mappingFunction the function to compute a value * @return the current (existing or computed) value associated with * the specified key, or null if the computed value is null * @throws NullPointerException if the specified key or mappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the mappingFunction does so, * in which case the mapping is left unestablished */ public V computeIfAbsent(K key, Fun<? super K, ? extends V> mappingFunction) { if (key == null || mappingFunction == null) throw new NullPointerException(); int h = spread(key.hashCode()); V val = null; int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = initTable(); else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { Node<K,V> r = new ReservationNode<K,V>(); synchronized (r) { if (casTabAt(tab, i, null, r)) { binCount = 1; Node<K,V> node = null; try { if ((val = mappingFunction.apply(key)) != null) node = new Node<K,V>(h, key, val, null); } finally { setTabAt(tab, i, node); } } } if (binCount != 0) break; } else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { boolean added = false; synchronized (f) { if (tabAt(tab, i) == f) { if (fh >= 0) { binCount = 1; for (Node<K,V> e = f;; ++binCount) { K ek; V ev; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = e.val; break; } Node<K,V> pred = e; if ((e = e.next) == null) { if ((val = mappingFunction.apply(key)) != null) { added = true; pred.next = new Node<K,V>(h, key, val, null); } break; } } } else if (f instanceof TreeBin) { binCount = 2; TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> r, p; if ((r = t.root) != null && (p = r.findTreeNode(h, key, null)) != null) val = p.val; else if ((val = mappingFunction.apply(key)) != null) { added = true; t.putTreeVal(h, key, val); } } } } if (binCount != 0) { if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); if (!added) return val; break; } } } if (val != null) addCount(1L, binCount); return val; } /** * If the value for the specified key is present, attempts to * compute a new mapping given the key and its current mapped * value. The entire method invocation is performed atomically. * Some attempted update operations on this map by other threads * may be blocked while computation is in progress, so the * computation should be short and simple, and must not attempt to * update any other mappings of this map. * * @param key key with which a value may be associated * @param remappingFunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ public V computeIfPresent(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); int h = spread(key.hashCode()); V val = null; int delta = 0; int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = initTable(); else if ((f = tabAt(tab, i = (n - 1) & h)) == null) break; else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { synchronized (f) { if (tabAt(tab, i) == f) { if (fh >= 0) { binCount = 1; for (Node<K,V> e = f, pred = null;; ++binCount) { K ek; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = remappingFunction.apply(key, e.val); if (val != null) e.val = val; else { delta = -1; Node<K,V> en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) break; } } else if (f instanceof TreeBin) { binCount = 2; TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> r, p; if ((r = t.root) != null && (p = r.findTreeNode(h, key, null)) != null) { val = remappingFunction.apply(key, p.val); if (val != null) p.val = val; else { delta = -1; if (t.removeTreeNode(p)) setTabAt(tab, i, untreeify(t.first)); } } } } } if (binCount != 0) break; } } if (delta != 0) addCount((long)delta, binCount); return val; } /** * Attempts to compute a mapping for the specified key and its * current mapped value (or {@code null} if there is no current * mapping). The entire method invocation is performed atomically. * Some attempted update operations on this map by other threads * may be blocked while computation is in progress, so the * computation should be short and simple, and must not attempt to * update any other mappings of this Map. * * @param key key with which the specified value is to be associated * @param remappingFunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ public V compute(K key, BiFun<? super K, ? super V, ? extends V> remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); int h = spread(key.hashCode()); V val = null; int delta = 0; int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = initTable(); else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { Node<K,V> r = new ReservationNode<K,V>(); synchronized (r) { if (casTabAt(tab, i, null, r)) { binCount = 1; Node<K,V> node = null; try { if ((val = remappingFunction.apply(key, null)) != null) { delta = 1; node = new Node<K,V>(h, key, val, null); } } finally { setTabAt(tab, i, node); } } } if (binCount != 0) break; } else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { synchronized (f) { if (tabAt(tab, i) == f) { if (fh >= 0) { binCount = 1; for (Node<K,V> e = f, pred = null;; ++binCount) { K ek; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = remappingFunction.apply(key, e.val); if (val != null) e.val = val; else { delta = -1; Node<K,V> en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { val = remappingFunction.apply(key, null); if (val != null) { delta = 1; pred.next = new Node<K,V>(h, key, val, null); } break; } } } else if (f instanceof TreeBin) { binCount = 1; TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> r, p; if ((r = t.root) != null) p = r.findTreeNode(h, key, null); else p = null; V pv = (p == null) ? null : p.val; val = remappingFunction.apply(key, pv); if (val != null) { if (p != null) p.val = val; else { delta = 1; t.putTreeVal(h, key, val); } } else if (p != null) { delta = -1; if (t.removeTreeNode(p)) setTabAt(tab, i, untreeify(t.first)); } } } } if (binCount != 0) { if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); break; } } } if (delta != 0) addCount((long)delta, binCount); return val; } /** * If the specified key is not already associated with a * (non-null) value, associates it with the given value. * Otherwise, replaces the value with the results of the given * remapping function, or removes if {@code null}. The entire * method invocation is performed atomically. Some attempted * update operations on this map by other threads may be blocked * while computation is in progress, so the computation should be * short and simple, and must not attempt to update any other * mappings of this Map. * * @param key key with which the specified value is to be associated * @param value the value to use if absent * @param remappingFunction the function to recompute a value if present * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or the * remappingFunction is null * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ public V merge(K key, V value, BiFun<? super V, ? super V, ? extends V> remappingFunction) { if (key == null || value == null || remappingFunction == null) throw new NullPointerException(); int h = spread(key.hashCode()); V val = null; int delta = 0; int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = initTable(); else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) { delta = 1; val = value; break; } } else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { synchronized (f) { if (tabAt(tab, i) == f) { if (fh >= 0) { binCount = 1; for (Node<K,V> e = f, pred = null;; ++binCount) { K ek; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = remappingFunction.apply(e.val, value); if (val != null) e.val = val; else { delta = -1; Node<K,V> en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { delta = 1; val = value; pred.next = new Node<K,V>(h, key, val, null); break; } } } else if (f instanceof TreeBin) { binCount = 2; TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> r = t.root; TreeNode<K,V> p = (r == null) ? null : r.findTreeNode(h, key, null); val = (p == null) ? value : remappingFunction.apply(p.val, value); if (val != null) { if (p != null) p.val = val; else { delta = 1; t.putTreeVal(h, key, val); } } else if (p != null) { delta = -1; if (t.removeTreeNode(p)) setTabAt(tab, i, untreeify(t.first)); } } } } if (binCount != 0) { if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); break; } } } if (delta != 0) addCount((long)delta, binCount); return val; } // Hashtable legacy methods /** * Legacy method testing if some key maps into the specified value * in this table. This method is identical in functionality to * {@link #containsValue(Object)}, and exists solely to ensure * full compatibility with class {@link java.util.Hashtable}, * which supported this method prior to introduction of the * Java Collections framework. * * @param value a value to search for * @return {@code true} if and only if some key maps to the * {@code value} argument in this table as * determined by the {@code equals} method; * {@code false} otherwise * @throws NullPointerException if the specified value is null */ @Deprecated public boolean contains(Object value) { return containsValue(value); } /** * Returns an enumeration of the keys in this table. * * @return an enumeration of the keys in this table * @see #keySet() */ public Enumeration<K> keys() { Node<K,V>[] t; int f = (t = table) == null ? 0 : t.length; return new KeyIterator<K,V>(t, f, 0, f, this); } /** * Returns an enumeration of the values in this table. * * @return an enumeration of the values in this table * @see #values() */ public Enumeration<V> elements() { Node<K,V>[] t; int f = (t = table) == null ? 0 : t.length; return new ValueIterator<K,V>(t, f, 0, f, this); } // ConcurrentHashMapV8-only methods /** * Returns the number of mappings. This method should be used * instead of {@link #size} because a ConcurrentHashMapV8 may * contain more mappings than can be represented as an int. The * value returned is an estimate; the actual count may differ if * there are concurrent insertions or removals. * * @return the number of mappings * @since 1.8 */ public long mappingCount() { long n = sumCount(); return (n < 0L) ? 0L : n; // ignore transient negative values } /** * Creates a new {@link Set} backed by a ConcurrentHashMapV8 * from the given type to {@code Boolean.TRUE}. * * @return the new set * @since 1.8 */ public static <K> KeySetView<K,Boolean> newKeySet() { return new KeySetView<K,Boolean> (new ConcurrentHashMapV8<K,Boolean>(), Boolean.TRUE); } /** * Creates a new {@link Set} backed by a ConcurrentHashMapV8 * from the given type to {@code Boolean.TRUE}. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @return the new set * @throws IllegalArgumentException if the initial capacity of * elements is negative * @since 1.8 */ public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { return new KeySetView<K,Boolean> (new ConcurrentHashMapV8<K,Boolean>(initialCapacity), Boolean.TRUE); } /** * Returns a {@link Set} view of the keys in this map, using the * given common mapped value for any additions (i.e., {@link * Collection#add} and {@link Collection#addAll(Collection)}). * This is of course only appropriate if it is acceptable to use * the same value for all additions from this view. * * @param mappedValue the mapped value to use for any additions * @return the set view * @throws NullPointerException if the mappedValue is null */ public KeySetView<K,V> keySet(V mappedValue) { if (mappedValue == null) throw new NullPointerException(); return new KeySetView<K,V>(this, mappedValue); } /* ---------------- Special Nodes -------------- */ /** * A node inserted at head of bins during transfer operations. */ static final class ForwardingNode<K,V> extends Node<K,V> { final Node<K,V>[] nextTable; ForwardingNode(Node<K,V>[] tab) { super(MOVED, null, null, null); this.nextTable = tab; } Node<K,V> find(int h, Object k) { // loop to avoid arbitrarily deep recursion on forwarding nodes outer: for (Node<K,V>[] tab = nextTable;;) { Node<K,V> e; int n; if (k == null || tab == null || (n = tab.length) == 0 || (e = tabAt(tab, (n - 1) & h)) == null) return null; for (;;) { int eh; K ek; if ((eh = e.hash) == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; if (eh < 0) { if (e instanceof ForwardingNode) { tab = ((ForwardingNode<K,V>)e).nextTable; continue outer; } else return e.find(h, k); } if ((e = e.next) == null) return null; } } } } /** * A place-holder node used in computeIfAbsent and compute */ static final class ReservationNode<K,V> extends Node<K,V> { ReservationNode() { super(RESERVED, null, null, null); } Node<K,V> find(int h, Object k) { return null; } } /* ---------------- Table Initialization and Resizing -------------- */ /** * Returns the stamp bits for resizing a table of size n. * Must be negative when shifted left by RESIZE_STAMP_SHIFT. */ static final int resizeStamp(int n) { return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); } /** * Initializes table, using the size recorded in sizeCtl. */ private final Node<K,V>[] initTable() { Node<K,V>[] tab; int sc; while ((tab = table) == null || tab.length == 0) { if ((sc = sizeCtl) < 0) Thread.yield(); // lost initialization race; just spin else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if ((tab = table) == null || tab.length == 0) { int n = (sc > 0) ? sc : DEFAULT_CAPACITY; @SuppressWarnings("unchecked") Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; table = tab = nt; sc = n - (n >>> 2); } } finally { sizeCtl = sc; } break; } } return tab; } /** * Adds to count, and if table is too small and not already * resizing, initiates transfer. If already resizing, helps * perform transfer if work is available. Rechecks occupancy * after a transfer to see if another resize is already needed * because resizings are lagging additions. * * @param x the count to add * @param check if <0, don't check resize, if <= 1 only check if uncontended */ private final void addCount(long x, int check) { CounterCell[] as; long b, s; if ((as = counterCells) != null || !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { CounterHashCode hc; CounterCell a; long v; int m; boolean uncontended = true; if ((hc = threadCounterHashCode.get()) == null || as == null || (m = as.length - 1) < 0 || (a = as[m & hc.code]) == null || !(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { fullAddCount(x, hc, uncontended); return; } if (check <= 1) return; s = sumCount(); } if (check >= 0) { Node<K,V>[] tab, nt; int n, sc; while (s >= (long)(sc = sizeCtl) && (tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY) { int rs = resizeStamp(n); if (sc < 0) { if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || (nt = nextTable) == null || transferIndex <= 0) break; if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); s = sumCount(); } } } /** * Helps transfer if a resize is in progress. */ final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { Node<K,V>[] nextTab; int sc; if (tab != null && (f instanceof ForwardingNode) && (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { int rs = resizeStamp(tab.length); while (nextTab == nextTable && table == tab && (sc = sizeCtl) < 0) { if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || transferIndex <= 0) break; if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { transfer(tab, nextTab); break; } } return nextTab; } return table; } /** * Tries to presize table to accommodate the given number of elements. * * @param size number of elements (doesn't need to be perfectly accurate) */ private final void tryPresize(int size) { int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(size + (size >>> 1) + 1); int sc; while ((sc = sizeCtl) >= 0) { Node<K,V>[] tab = table; int n; if (tab == null || (n = tab.length) == 0) { n = (sc > c) ? sc : c; if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if (table == tab) { @SuppressWarnings("unchecked") Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; table = nt; sc = n - (n >>> 2); } } finally { sizeCtl = sc; } } } else if (c <= sc || n >= MAXIMUM_CAPACITY) break; else if (tab == table) { int rs = resizeStamp(n); if (sc < 0) { Node<K,V>[] nt; if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || (nt = nextTable) == null || transferIndex <= 0) break; if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); } } } /** * Moves and/or copies the nodes in each bin to new table. See * above for explanation. */ private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { int n = tab.length, stride; if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) stride = MIN_TRANSFER_STRIDE; // subdivide range if (nextTab == null) { // initiating try { @SuppressWarnings("unchecked") Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; nextTab = nt; } catch (Throwable ex) { // try to cope with OOME sizeCtl = Integer.MAX_VALUE; return; } nextTable = nextTab; transferIndex = n; } int nextn = nextTab.length; ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); boolean advance = true; boolean finishing = false; // to ensure sweep before committing nextTab for (int i = 0, bound = 0;;) { Node<K,V> f; int fh; while (advance) { int nextIndex, nextBound; if (--i >= bound || finishing) advance = false; else if ((nextIndex = transferIndex) <= 0) { i = -1; advance = false; } else if (U.compareAndSwapInt (this, TRANSFERINDEX, nextIndex, nextBound = (nextIndex > stride ? nextIndex - stride : 0))) { bound = nextBound; i = nextIndex - 1; advance = false; } } if (i < 0 || i >= n || i + n >= nextn) { int sc; if (finishing) { nextTable = null; table = nextTab; sizeCtl = (n << 1) - (n >>> 1); return; } if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) return; finishing = advance = true; i = n; // recheck before commit } } else if ((f = tabAt(tab, i)) == null) advance = casTabAt(tab, i, null, fwd); else if ((fh = f.hash) == MOVED) advance = true; // already processed else { synchronized (f) { if (tabAt(tab, i) == f) { Node<K,V> ln, hn; if (fh >= 0) { int runBit = fh & n; Node<K,V> lastRun = f; for (Node<K,V> p = f.next; p != null; p = p.next) { int b = p.hash & n; if (b != runBit) { runBit = b; lastRun = p; } } if (runBit == 0) { ln = lastRun; hn = null; } else { hn = lastRun; ln = null; } for (Node<K,V> p = f; p != lastRun; p = p.next) { int ph = p.hash; K pk = p.key; V pv = p.val; if ((ph & n) == 0) ln = new Node<K,V>(ph, pk, pv, ln); else hn = new Node<K,V>(ph, pk, pv, hn); } setTabAt(nextTab, i, ln); setTabAt(nextTab, i + n, hn); setTabAt(tab, i, fwd); advance = true; } else if (f instanceof TreeBin) { TreeBin<K,V> t = (TreeBin<K,V>)f; TreeNode<K,V> lo = null, loTail = null; TreeNode<K,V> hi = null, hiTail = null; int lc = 0, hc = 0; for (Node<K,V> e = t.first; e != null; e = e.next) { int h = e.hash; TreeNode<K,V> p = new TreeNode<K,V> (h, e.key, e.val, null, null); if ((h & n) == 0) { if ((p.prev = loTail) == null) lo = p; else loTail.next = p; loTail = p; ++lc; } else { if ((p.prev = hiTail) == null) hi = p; else hiTail.next = p; hiTail = p; ++hc; } } ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : (hc != 0) ? new TreeBin<K,V>(lo) : t; hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : (lc != 0) ? new TreeBin<K,V>(hi) : t; setTabAt(nextTab, i, ln); setTabAt(nextTab, i + n, hn); setTabAt(tab, i, fwd); advance = true; } } } } } } /* ---------------- Conversion from/to TreeBins -------------- */ /** * Replaces all linked nodes in bin at given index unless table is * too small, in which case resizes instead. */ private final void treeifyBin(Node<K,V>[] tab, int index) { Node<K,V> b; int n, sc; if (tab != null) { if ((n = tab.length) < MIN_TREEIFY_CAPACITY) tryPresize(n << 1); else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { synchronized (b) { if (tabAt(tab, index) == b) { TreeNode<K,V> hd = null, tl = null; for (Node<K,V> e = b; e != null; e = e.next) { TreeNode<K,V> p = new TreeNode<K,V>(e.hash, e.key, e.val, null, null); if ((p.prev = tl) == null) hd = p; else tl.next = p; tl = p; } setTabAt(tab, index, new TreeBin<K,V>(hd)); } } } } } /** * Returns a list on non-TreeNodes replacing those in given list. */ static <K,V> Node<K,V> untreeify(Node<K,V> b) { Node<K,V> hd = null, tl = null; for (Node<K,V> q = b; q != null; q = q.next) { Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null); if (tl == null) hd = p; else tl.next = p; tl = p; } return hd; } /* ---------------- TreeNodes -------------- */ /** * Nodes for use in TreeBins */ static final class TreeNode<K,V> extends Node<K,V> { TreeNode<K,V> parent; // red-black tree links TreeNode<K,V> left; TreeNode<K,V> right; TreeNode<K,V> prev; // needed to unlink next upon deletion boolean red; TreeNode(int hash, K key, V val, Node<K,V> next, TreeNode<K,V> parent) { super(hash, key, val, next); this.parent = parent; } Node<K,V> find(int h, Object k) { return findTreeNode(h, k, null); } /** * Returns the TreeNode (or null if not found) for the given key * starting at given root. */ final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) { if (k != null) { TreeNode<K,V> p = this; do { int ph, dir; K pk; TreeNode<K,V> q; TreeNode<K,V> pl = p.left, pr = p.right; if ((ph = p.hash) > h) p = pl; else if (ph < h) p = pr; else if ((pk = p.key) == k || (pk != null && k.equals(pk))) return p; else if (pl == null) p = pr; else if (pr == null) p = pl; else if ((kc != null || (kc = comparableClassFor(k)) != null) && (dir = compareComparables(kc, k, pk)) != 0) p = (dir < 0) ? pl : pr; else if ((q = pr.findTreeNode(h, k, kc)) != null) return q; else p = pl; } while (p != null); } return null; } } /* ---------------- TreeBins -------------- */ /** * TreeNodes used at the heads of bins. TreeBins do not hold user * keys or values, but instead point to list of TreeNodes and * their root. They also maintain a parasitic read-write lock * forcing writers (who hold bin lock) to wait for readers (who do * not) to complete before tree restructuring operations. */ static final class TreeBin<K,V> extends Node<K,V> { TreeNode<K,V> root; volatile TreeNode<K,V> first; volatile Thread waiter; volatile int lockState; // values for lockState static final int WRITER = 1; // set while holding write lock static final int WAITER = 2; // set when waiting for write lock static final int READER = 4; // increment value for setting read lock /** * Tie-breaking utility for ordering insertions when equal * hashCodes and non-comparable. We don't require a total * order, just a consistent insertion rule to maintain * equivalence across rebalancings. Tie-breaking further than * necessary simplifies testing a bit. */ static int tieBreakOrder(Object a, Object b) { int d; if (a == null || b == null || (d = a.getClass().getName(). compareTo(b.getClass().getName())) == 0) d = (System.identityHashCode(a) <= System.identityHashCode(b) ? -1 : 1); return d; } /** * Creates bin with initial set of nodes headed by b. */ TreeBin(TreeNode<K,V> b) { super(TREEBIN, null, null, null); this.first = b; TreeNode<K,V> r = null; for (TreeNode<K,V> x = b, next; x != null; x = next) { next = (TreeNode<K,V>)x.next; x.left = x.right = null; if (r == null) { x.parent = null; x.red = false; r = x; } else { K k = x.key; int h = x.hash; Class<?> kc = null; for (TreeNode<K,V> p = r;;) { int dir, ph; K pk = p.key; if ((ph = p.hash) > h) dir = -1; else if (ph < h) dir = 1; else if ((kc == null && (kc = comparableClassFor(k)) == null) || (dir = compareComparables(kc, k, pk)) == 0) dir = tieBreakOrder(k, pk); TreeNode<K,V> xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { x.parent = xp; if (dir <= 0) xp.left = x; else xp.right = x; r = balanceInsertion(r, x); break; } } } } this.root = r; assert checkInvariants(root); } /** * Acquires write lock for tree restructuring. */ private final void lockRoot() { if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) contendedLock(); // offload to separate method } /** * Releases write lock for tree restructuring. */ private final void unlockRoot() { lockState = 0; } /** * Possibly blocks awaiting root lock. */ private final void contendedLock() { boolean waiting = false; for (int s;;) { if (((s = lockState) & ~WAITER) == 0) { if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { if (waiting) waiter = null; return; } } else if ((s & WAITER) == 0) { if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { waiting = true; waiter = Thread.currentThread(); } } else if (waiting) LockSupport.park(this); } } /** * Returns matching node or null if none. Tries to search * using tree comparisons from root, but continues linear * search when lock not available. */ final Node<K,V> find(int h, Object k) { if (k != null) { for (Node<K,V> e = first; e != null; ) { int s; K ek; if (((s = lockState) & (WAITER|WRITER)) != 0) { if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; e = e.next; } else if (U.compareAndSwapInt(this, LOCKSTATE, s, s + READER)) { TreeNode<K,V> r, p; try { p = ((r = root) == null ? null : r.findTreeNode(h, k, null)); } finally { Thread w; int ls; do {} while (!U.compareAndSwapInt (this, LOCKSTATE, ls = lockState, ls - READER)); if (ls == (READER|WAITER) && (w = waiter) != null) LockSupport.unpark(w); } return p; } } } return null; } /** * Finds or adds a node. * @return null if added */ final TreeNode<K,V> putTreeVal(int h, K k, V v) { Class<?> kc = null; boolean searched = false; for (TreeNode<K,V> p = root;;) { int dir, ph; K pk; if (p == null) { first = root = new TreeNode<K,V>(h, k, v, null, null); break; } else if ((ph = p.hash) > h) dir = -1; else if (ph < h) dir = 1; else if ((pk = p.key) == k || (pk != null && k.equals(pk))) return p; else if ((kc == null && (kc = comparableClassFor(k)) == null) || (dir = compareComparables(kc, k, pk)) == 0) { if (!searched) { TreeNode<K,V> q, ch; searched = true; if (((ch = p.left) != null && (q = ch.findTreeNode(h, k, kc)) != null) || ((ch = p.right) != null && (q = ch.findTreeNode(h, k, kc)) != null)) return q; } dir = tieBreakOrder(k, pk); } TreeNode<K,V> xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { TreeNode<K,V> x, f = first; first = x = new TreeNode<K,V>(h, k, v, f, xp); if (f != null) f.prev = x; if (dir <= 0) xp.left = x; else xp.right = x; if (!xp.red) x.red = true; else { lockRoot(); try { root = balanceInsertion(root, x); } finally { unlockRoot(); } } break; } } assert checkInvariants(root); return null; } /** * Removes the given node, that must be present before this * call. This is messier than typical red-black deletion code * because we cannot swap the contents of an interior node * with a leaf successor that is pinned by "next" pointers * that are accessible independently of lock. So instead we * swap the tree linkages. * * @return true if now too small, so should be untreeified */ final boolean removeTreeNode(TreeNode<K,V> p) { TreeNode<K,V> next = (TreeNode<K,V>)p.next; TreeNode<K,V> pred = p.prev; // unlink traversal pointers TreeNode<K,V> r, rl; if (pred == null) first = next; else pred.next = next; if (next != null) next.prev = pred; if (first == null) { root = null; return true; } if ((r = root) == null || r.right == null || // too small (rl = r.left) == null || rl.left == null) return true; lockRoot(); try { TreeNode<K,V> replacement; TreeNode<K,V> pl = p.left; TreeNode<K,V> pr = p.right; if (pl != null && pr != null) { TreeNode<K,V> s = pr, sl; while ((sl = s.left) != null) // find successor s = sl; boolean c = s.red; s.red = p.red; p.red = c; // swap colors TreeNode<K,V> sr = s.right; TreeNode<K,V> pp = p.parent; if (s == pr) { // p was s's direct parent p.parent = s; s.right = p; } else { TreeNode<K,V> sp = s.parent; if ((p.parent = sp) != null) { if (s == sp.left) sp.left = p; else sp.right = p; } if ((s.right = pr) != null) pr.parent = s; } p.left = null; if ((p.right = sr) != null) sr.parent = p; if ((s.left = pl) != null) pl.parent = s; if ((s.parent = pp) == null) r = s; else if (p == pp.left) pp.left = s; else pp.right = s; if (sr != null) replacement = sr; else replacement = p; } else if (pl != null) replacement = pl; else if (pr != null) replacement = pr; else replacement = p; if (replacement != p) { TreeNode<K,V> pp = replacement.parent = p.parent; if (pp == null) r = replacement; else if (p == pp.left) pp.left = replacement; else pp.right = replacement; p.left = p.right = p.parent = null; } root = (p.red) ? r : balanceDeletion(r, replacement); if (p == replacement) { // detach pointers TreeNode<K,V> pp; if ((pp = p.parent) != null) { if (p == pp.left) pp.left = null; else if (p == pp.right) pp.right = null; p.parent = null; } } } finally { unlockRoot(); } assert checkInvariants(root); return false; } /* ------------------------------------------------------------ */ // Red-black tree methods, all adapted from CLR static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p) { TreeNode<K,V> r, pp, rl; if (p != null && (r = p.right) != null) { if ((rl = p.right = r.left) != null) rl.parent = p; if ((pp = r.parent = p.parent) == null) (root = r).red = false; else if (pp.left == p) pp.left = r; else pp.right = r; r.left = p; p.parent = r; } return root; } static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, TreeNode<K,V> p) { TreeNode<K,V> l, pp, lr; if (p != null && (l = p.left) != null) { if ((lr = p.left = l.right) != null) lr.parent = p; if ((pp = l.parent = p.parent) == null) (root = l).red = false; else if (pp.right == p) pp.right = l; else pp.left = l; l.right = p; p.parent = l; } return root; } static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x) { x.red = true; for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { if ((xp = x.parent) == null) { x.red = false; return x; } else if (!xp.red || (xpp = xp.parent) == null) return root; if (xp == (xppl = xpp.left)) { if ((xppr = xpp.right) != null && xppr.red) { xppr.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.right) { root = rotateLeft(root, x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; root = rotateRight(root, xpp); } } } } else { if (xppl != null && xppl.red) { xppl.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.left) { root = rotateRight(root, x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; root = rotateLeft(root, xpp); } } } } } } static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x) { for (TreeNode<K,V> xp, xpl, xpr;;) { if (x == null || x == root) return root; else if ((xp = x.parent) == null) { x.red = false; return x; } else if (x.red) { x.red = false; return root; } else if ((xpl = xp.left) == x) { if ((xpr = xp.right) != null && xpr.red) { xpr.red = false; xp.red = true; root = rotateLeft(root, xp); xpr = (xp = x.parent) == null ? null : xp.right; } if (xpr == null) x = xp; else { TreeNode<K,V> sl = xpr.left, sr = xpr.right; if ((sr == null || !sr.red) && (sl == null || !sl.red)) { xpr.red = true; x = xp; } else { if (sr == null || !sr.red) { if (sl != null) sl.red = false; xpr.red = true; root = rotateRight(root, xpr); xpr = (xp = x.parent) == null ? null : xp.right; } if (xpr != null) { xpr.red = (xp == null) ? false : xp.red; if ((sr = xpr.right) != null) sr.red = false; } if (xp != null) { xp.red = false; root = rotateLeft(root, xp); } x = root; } } } else { // symmetric if (xpl != null && xpl.red) { xpl.red = false; xp.red = true; root = rotateRight(root, xp); xpl = (xp = x.parent) == null ? null : xp.left; } if (xpl == null) x = xp; else { TreeNode<K,V> sl = xpl.left, sr = xpl.right; if ((sl == null || !sl.red) && (sr == null || !sr.red)) { xpl.red = true; x = xp; } else { if (sl == null || !sl.red) { if (sr != null) sr.red = false; xpl.red = true; root = rotateLeft(root, xpl); xpl = (xp = x.parent) == null ? null : xp.left; } if (xpl != null) { xpl.red = (xp == null) ? false : xp.red; if ((sl = xpl.left) != null) sl.red = false; } if (xp != null) { xp.red = false; root = rotateRight(root, xp); } x = root; } } } } } /** * Recursive invariant check */ static <K,V> boolean checkInvariants(TreeNode<K,V> t) { TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, tb = t.prev, tn = (TreeNode<K,V>)t.next; if (tb != null && tb.next != t) return false; if (tn != null && tn.prev != t) return false; if (tp != null && t != tp.left && t != tp.right) return false; if (tl != null && (tl.parent != t || tl.hash > t.hash)) return false; if (tr != null && (tr.parent != t || tr.hash < t.hash)) return false; if (t.red && tl != null && tl.red && tr != null && tr.red) return false; if (tl != null && !checkInvariants(tl)) return false; if (tr != null && !checkInvariants(tr)) return false; return true; } private static final sun.misc.Unsafe U; private static final long LOCKSTATE; static { try { U = getUnsafe(); Class<?> k = TreeBin.class; LOCKSTATE = U.objectFieldOffset (k.getDeclaredField("lockState")); } catch (Exception e) { throw new Error(e); } } } /* ----------------Table Traversal -------------- */ /** * Records the table, its length, and current traversal index for a * traverser that must process a region of a forwarded table before * proceeding with current table. */ static final class TableStack<K,V> { int length; int index; Node<K,V>[] tab; TableStack<K,V> next; } /** * Encapsulates traversal for methods such as containsValue; also * serves as a base class for other iterators and spliterators. * * Method advance visits once each still-valid node that was * reachable upon iterator construction. It might miss some that * were added to a bin after the bin was visited, which is OK wrt * consistency guarantees. Maintaining this property in the face * of possible ongoing resizes requires a fair amount of * bookkeeping state that is difficult to optimize away amidst * volatile accesses. Even so, traversal maintains reasonable * throughput. * * Normally, iteration proceeds bin-by-bin traversing lists. * However, if the table has been resized, then all future steps * must traverse both the bin at the current index as well as at * (index + baseSize); and so on for further resizings. To * paranoically cope with potential sharing by users of iterators * across threads, iteration terminates if a bounds checks fails * for a table read. */ static class Traverser<K,V> { Node<K,V>[] tab; // current table; updated if resized Node<K,V> next; // the next entry to use TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes int index; // index of bin to use next int baseIndex; // current index of initial table int baseLimit; // index bound for initial table final int baseSize; // initial table size Traverser(Node<K,V>[] tab, int size, int index, int limit) { this.tab = tab; this.baseSize = size; this.baseIndex = this.index = index; this.baseLimit = limit; this.next = null; } /** * Advances if possible, returning next valid node, or null if none. */ final Node<K,V> advance() { Node<K,V> e; if ((e = next) != null) e = e.next; for (;;) { Node<K,V>[] t; int i, n; // must use locals in checks if (e != null) return next = e; if (baseIndex >= baseLimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0) return next = null; if ((e = tabAt(t, i)) != null && e.hash < 0) { if (e instanceof ForwardingNode) { tab = ((ForwardingNode<K,V>)e).nextTable; e = null; pushState(t, i, n); continue; } else if (e instanceof TreeBin) e = ((TreeBin<K,V>)e).first; else e = null; } if (stack != null) recoverState(n); else if ((index = i + baseSize) >= n) index = ++baseIndex; // visit upper slots if present } } /** * Saves traversal state upon encountering a forwarding node. */ private void pushState(Node<K,V>[] t, int i, int n) { TableStack<K,V> s = spare; // reuse if possible if (s != null) spare = s.next; else s = new TableStack<K,V>(); s.tab = t; s.length = n; s.index = i; s.next = stack; stack = s; } /** * Possibly pops traversal state. * * @param n length of current table */ private void recoverState(int n) { TableStack<K,V> s; int len; while ((s = stack) != null && (index += (len = s.length)) >= n) { n = len; index = s.index; tab = s.tab; s.tab = null; TableStack<K,V> next = s.next; s.next = spare; // save for reuse stack = next; spare = s; } if (s == null && (index += baseSize) >= n) index = ++baseIndex; } } /** * Base of key, value, and entry Iterators. Adds fields to * Traverser to support iterator.remove. */ static class BaseIterator<K,V> extends Traverser<K,V> { final ConcurrentHashMapV8<K,V> map; Node<K,V> lastReturned; BaseIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMapV8<K,V> map) { super(tab, size, index, limit); this.map = map; advance(); } public final boolean hasNext() { return next != null; } public final boolean hasMoreElements() { return next != null; } public final void remove() { Node<K,V> p; if ((p = lastReturned) == null) throw new IllegalStateException(); lastReturned = null; map.replaceNode(p.key, null, null); } } static final class KeyIterator<K,V> extends BaseIterator<K,V> implements Iterator<K>, Enumeration<K> { KeyIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMapV8<K,V> map) { super(tab, index, size, limit, map); } public final K next() { Node<K,V> p; if ((p = next) == null) throw new NoSuchElementException(); K k = p.key; lastReturned = p; advance(); return k; } public final K nextElement() { return next(); } } static final class ValueIterator<K,V> extends BaseIterator<K,V> implements Iterator<V>, Enumeration<V> { ValueIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMapV8<K,V> map) { super(tab, index, size, limit, map); } public final V next() { Node<K,V> p; if ((p = next) == null) throw new NoSuchElementException(); V v = p.val; lastReturned = p; advance(); return v; } public final V nextElement() { return next(); } } static final class EntryIterator<K,V> extends BaseIterator<K,V> implements Iterator<Map.Entry<K,V>> { EntryIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMapV8<K,V> map) { super(tab, index, size, limit, map); } public final Map.Entry<K,V> next() { Node<K,V> p; if ((p = next) == null) throw new NoSuchElementException(); K k = p.key; V v = p.val; lastReturned = p; advance(); return new MapEntry<K,V>(k, v, map); } } /** * Exported Entry for EntryIterator */ static final class MapEntry<K,V> implements Map.Entry<K,V> { final K key; // non-null V val; // non-null final ConcurrentHashMapV8<K,V> map; MapEntry(K key, V val, ConcurrentHashMapV8<K,V> map) { this.key = key; this.val = val; this.map = map; } public K getKey() { return key; } public V getValue() { return val; } public int hashCode() { return key.hashCode() ^ val.hashCode(); } public String toString() { return key + "=" + val; } public boolean equals(Object o) { Object k, v; Map.Entry<?,?> e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry<?,?>)o).getKey()) != null && (v = e.getValue()) != null && (k == key || k.equals(key)) && (v == val || v.equals(val))); } /** * Sets our entry's value and writes through to the map. The * value to return is somewhat arbitrary here. Since we do not * necessarily track asynchronous changes, the most recent * "previous" value could be different from what we return (or * could even have been removed, in which case the put will * re-establish). We do not and cannot guarantee more. */ public V setValue(V value) { if (value == null) throw new NullPointerException(); V v = val; val = value; map.put(key, value); return v; } } static final class KeySpliterator<K,V> extends Traverser<K,V> implements ConcurrentHashMapSpliterator<K> { long est; // size estimate KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est) { super(tab, size, index, limit); this.est = est; } public ConcurrentHashMapSpliterator<K> trySplit() { int i, f, h; return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, f, est >>>= 1); } public void forEachRemaining(Action<? super K> action) { if (action == null) throw new NullPointerException(); for (Node<K,V> p; (p = advance()) != null;) action.apply(p.key); } public boolean tryAdvance(Action<? super K> action) { if (action == null) throw new NullPointerException(); Node<K,V> p; if ((p = advance()) == null) return false; action.apply(p.key); return true; } public long estimateSize() { return est; } } static final class ValueSpliterator<K,V> extends Traverser<K,V> implements ConcurrentHashMapSpliterator<V> { long est; // size estimate ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, long est) { super(tab, size, index, limit); this.est = est; } public ConcurrentHashMapSpliterator<V> trySplit() { int i, f, h; return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, f, est >>>= 1); } public void forEachRemaining(Action<? super V> action) { if (action == null) throw new NullPointerException(); for (Node<K,V> p; (p = advance()) != null;) action.apply(p.val); } public boolean tryAdvance(Action<? super V> action) { if (action == null) throw new NullPointerException(); Node<K,V> p; if ((p = advance()) == null) return false; action.apply(p.val); return true; } public long estimateSize() { return est; } } static final class EntrySpliterator<K,V> extends Traverser<K,V> implements ConcurrentHashMapSpliterator<Map.Entry<K,V>> { final ConcurrentHashMapV8<K,V> map; // To export MapEntry long est; // size estimate EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est, ConcurrentHashMapV8<K,V> map) { super(tab, size, index, limit); this.map = map; this.est = est; } public ConcurrentHashMapSpliterator<Map.Entry<K,V>> trySplit() { int i, f, h; return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, f, est >>>= 1, map); } public void forEachRemaining(Action<? super Map.Entry<K,V>> action) { if (action == null) throw new NullPointerException(); for (Node<K,V> p; (p = advance()) != null; ) action.apply(new MapEntry<K,V>(p.key, p.val, map)); } public boolean tryAdvance(Action<? super Map.Entry<K,V>> action) { if (action == null) throw new NullPointerException(); Node<K,V> p; if ((p = advance()) == null) return false; action.apply(new MapEntry<K,V>(p.key, p.val, map)); return true; } public long estimateSize() { return est; } } // Parallel bulk operations /** * Computes initial batch value for bulk tasks. The returned value * is approximately exp2 of the number of times (minus one) to * split task by two before executing leaf action. This value is * faster to compute and more convenient to use as a guide to * splitting than is the depth, since it is used while dividing by * two anyway. */ final int batchFor(long b) { long n; if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) return 0; int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; } /** * Performs the given action for each (key, value). * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void forEach(long parallelismThreshold, BiAction<? super K,? super V> action) { if (action == null) throw new NullPointerException(); new ForEachMappingTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, action).invoke(); } /** * Performs the given action for each non-null transformation * of each (key, value). * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @since 1.8 */ public <U> void forEach(long parallelismThreshold, BiFun<? super K, ? super V, ? extends U> transformer, Action<? super U> action) { if (transformer == null || action == null) throw new NullPointerException(); new ForEachTransformedMappingTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, transformer, action).invoke(); } /** * Returns a non-null result from applying the given search * function on each (key, value), or null if none. Upon * success, further element processing is suppressed and the * results of any other parallel invocations of the search * function are ignored. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchFunction a function returning a non-null * result on success, else null * @return a non-null result from applying the given search * function on each (key, value), or null if none * @since 1.8 */ public <U> U search(long parallelismThreshold, BiFun<? super K, ? super V, ? extends U> searchFunction) { if (searchFunction == null) throw new NullPointerException(); return new SearchMappingsTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, searchFunction, new AtomicReference<U>()).invoke(); } /** * Returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, or null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public <U> U reduce(long parallelismThreshold, BiFun<? super K, ? super V, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceMappingsTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public double reduceToDouble(long parallelismThreshold, ObjectByObjectToDouble<? super K, ? super V> transformer, double basis, DoubleByDoubleToDouble reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceMappingsToDoubleTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public long reduceToLong(long parallelismThreshold, ObjectByObjectToLong<? super K, ? super V> transformer, long basis, LongByLongToLong reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceMappingsToLongTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public int reduceToInt(long parallelismThreshold, ObjectByObjectToInt<? super K, ? super V> transformer, int basis, IntByIntToInt reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceMappingsToIntTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Performs the given action for each key. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void forEachKey(long parallelismThreshold, Action<? super K> action) { if (action == null) throw new NullPointerException(); new ForEachKeyTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, action).invoke(); } /** * Performs the given action for each non-null transformation * of each key. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @since 1.8 */ public <U> void forEachKey(long parallelismThreshold, Fun<? super K, ? extends U> transformer, Action<? super U> action) { if (transformer == null || action == null) throw new NullPointerException(); new ForEachTransformedKeyTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, transformer, action).invoke(); } /** * Returns a non-null result from applying the given search * function on each key, or null if none. Upon success, * further element processing is suppressed and the results of * any other parallel invocations of the search function are * ignored. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchFunction a function returning a non-null * result on success, else null * @return a non-null result from applying the given search * function on each key, or null if none * @since 1.8 */ public <U> U searchKeys(long parallelismThreshold, Fun<? super K, ? extends U> searchFunction) { if (searchFunction == null) throw new NullPointerException(); return new SearchKeysTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, searchFunction, new AtomicReference<U>()).invoke(); } /** * Returns the result of accumulating all keys using the given * reducer to combine values, or null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param reducer a commutative associative combining function * @return the result of accumulating all keys using the given * reducer to combine values, or null if none * @since 1.8 */ public K reduceKeys(long parallelismThreshold, BiFun<? super K, ? super K, ? extends K> reducer) { if (reducer == null) throw new NullPointerException(); return new ReduceKeysTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, or * null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public <U> U reduceKeys(long parallelismThreshold, Fun<? super K, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceKeysTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, and * the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public double reduceKeysToDouble(long parallelismThreshold, ObjectToDouble<? super K> transformer, double basis, DoubleByDoubleToDouble reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceKeysToDoubleTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, and * the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public long reduceKeysToLong(long parallelismThreshold, ObjectToLong<? super K> transformer, long basis, LongByLongToLong reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceKeysToLongTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, and * the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public int reduceKeysToInt(long parallelismThreshold, ObjectToInt<? super K> transformer, int basis, IntByIntToInt reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceKeysToIntTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Performs the given action for each value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void forEachValue(long parallelismThreshold, Action<? super V> action) { if (action == null) throw new NullPointerException(); new ForEachValueTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, action).invoke(); } /** * Performs the given action for each non-null transformation * of each value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @since 1.8 */ public <U> void forEachValue(long parallelismThreshold, Fun<? super V, ? extends U> transformer, Action<? super U> action) { if (transformer == null || action == null) throw new NullPointerException(); new ForEachTransformedValueTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, transformer, action).invoke(); } /** * Returns a non-null result from applying the given search * function on each value, or null if none. Upon success, * further element processing is suppressed and the results of * any other parallel invocations of the search function are * ignored. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchFunction a function returning a non-null * result on success, else null * @return a non-null result from applying the given search * function on each value, or null if none * @since 1.8 */ public <U> U searchValues(long parallelismThreshold, Fun<? super V, ? extends U> searchFunction) { if (searchFunction == null) throw new NullPointerException(); return new SearchValuesTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, searchFunction, new AtomicReference<U>()).invoke(); } /** * Returns the result of accumulating all values using the * given reducer to combine values, or null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param reducer a commutative associative combining function * @return the result of accumulating all values * @since 1.8 */ public V reduceValues(long parallelismThreshold, BiFun<? super V, ? super V, ? extends V> reducer) { if (reducer == null) throw new NullPointerException(); return new ReduceValuesTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all values using the given reducer to combine values, or * null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public <U> U reduceValues(long parallelismThreshold, Fun<? super V, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceValuesTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all values using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public double reduceValuesToDouble(long parallelismThreshold, ObjectToDouble<? super V> transformer, double basis, DoubleByDoubleToDouble reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceValuesToDoubleTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all values using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public long reduceValuesToLong(long parallelismThreshold, ObjectToLong<? super V> transformer, long basis, LongByLongToLong reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceValuesToLongTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all values using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public int reduceValuesToInt(long parallelismThreshold, ObjectToInt<? super V> transformer, int basis, IntByIntToInt reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceValuesToIntTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Performs the given action for each entry. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void forEachEntry(long parallelismThreshold, Action<? super Map.Entry<K,V>> action) { if (action == null) throw new NullPointerException(); new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, action).invoke(); } /** * Performs the given action for each non-null transformation * of each entry. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @since 1.8 */ public <U> void forEachEntry(long parallelismThreshold, Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) { if (transformer == null || action == null) throw new NullPointerException(); new ForEachTransformedEntryTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, transformer, action).invoke(); } /** * Returns a non-null result from applying the given search * function on each entry, or null if none. Upon success, * further element processing is suppressed and the results of * any other parallel invocations of the search function are * ignored. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchFunction a function returning a non-null * result on success, else null * @return a non-null result from applying the given search * function on each entry, or null if none * @since 1.8 */ public <U> U searchEntries(long parallelismThreshold, Fun<Map.Entry<K,V>, ? extends U> searchFunction) { if (searchFunction == null) throw new NullPointerException(); return new SearchEntriesTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, searchFunction, new AtomicReference<U>()).invoke(); } /** * Returns the result of accumulating all entries using the * given reducer to combine values, or null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param reducer a commutative associative combining function * @return the result of accumulating all entries * @since 1.8 */ public Map.Entry<K,V> reduceEntries(long parallelismThreshold, BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { if (reducer == null) throw new NullPointerException(); return new ReduceEntriesTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * or null if none. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public <U> U reduceEntries(long parallelismThreshold, Fun<Map.Entry<K,V>, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceEntriesTask<K,V,U> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public double reduceEntriesToDouble(long parallelismThreshold, ObjectToDouble<Map.Entry<K,V>> transformer, double basis, DoubleByDoubleToDouble reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceEntriesToDoubleTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public long reduceEntriesToLong(long parallelismThreshold, ObjectToLong<Map.Entry<K,V>> transformer, long basis, LongByLongToLong reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceEntriesToLongTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /** * Returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismThreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public int reduceEntriesToInt(long parallelismThreshold, ObjectToInt<Map.Entry<K,V>> transformer, int basis, IntByIntToInt reducer) { if (transformer == null || reducer == null) throw new NullPointerException(); return new MapReduceEntriesToIntTask<K,V> (null, batchFor(parallelismThreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); } /* ----------------Views -------------- */ /** * Base class for views. */ abstract static class CollectionView<K,V,E> implements Collection<E>, java.io.Serializable { private static final long serialVersionUID = 7249069246763182397L; final ConcurrentHashMapV8<K,V> map; CollectionView(ConcurrentHashMapV8<K,V> map) { this.map = map; } /** * Returns the map backing this view. * * @return the map backing this view */ public ConcurrentHashMapV8<K,V> getMap() { return map; } /** * Removes all of the elements from this view, by removing all * the mappings from the map backing this view. */ public final void clear() { map.clear(); } public final int size() { return map.size(); } public final boolean isEmpty() { return map.isEmpty(); } // implementations below rely on concrete classes supplying these // abstract methods /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. */ public abstract Iterator<E> iterator(); public abstract boolean contains(Object o); public abstract boolean remove(Object o); private static final String oomeMsg = "Required array size too large"; public final Object[] toArray() { long sz = map.mappingCount(); if (sz > MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); int n = (int)sz; Object[] r = new Object[n]; int i = 0; for (E e : this) { if (i == n) { if (n >= MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) n = MAX_ARRAY_SIZE; else n += (n >>> 1) + 1; r = Arrays.copyOf(r, n); } r[i++] = e; } return (i == n) ? r : Arrays.copyOf(r, i); } @SuppressWarnings("unchecked") public final <T> T[] toArray(T[] a) { long sz = map.mappingCount(); if (sz > MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); int m = (int)sz; T[] r = (a.length >= m) ? a : (T[])java.lang.reflect.Array .newInstance(a.getClass().getComponentType(), m); int n = r.length; int i = 0; for (E e : this) { if (i == n) { if (n >= MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) n = MAX_ARRAY_SIZE; else n += (n >>> 1) + 1; r = Arrays.copyOf(r, n); } r[i++] = (T)e; } if (a == r && i < n) { r[i] = null; // null-terminate return r; } return (i == n) ? r : Arrays.copyOf(r, i); } /** * Returns a string representation of this collection. * The string representation consists of the string representations * of the collection's elements in the order they are returned by * its iterator, enclosed in square brackets ({@code "[]"}). * Adjacent elements are separated by the characters {@code ", "} * (comma and space). Elements are converted to strings as by * {@link String#valueOf(Object)}. * * @return a string representation of this collection */ public final String toString() { StringBuilder sb = new StringBuilder(); sb.append('['); Iterator<E> it = iterator(); if (it.hasNext()) { for (;;) { Object e = it.next(); sb.append(e == this ? "(this Collection)" : e); if (!it.hasNext()) break; sb.append(',').append(' '); } } return sb.append(']').toString(); } public final boolean containsAll(Collection<?> c) { if (c != this) { for (Object e : c) { if (e == null || !contains(e)) return false; } } return true; } public final boolean removeAll(Collection<?> c) { boolean modified = false; for (Iterator<E> it = iterator(); it.hasNext();) { if (c.contains(it.next())) { it.remove(); modified = true; } } return modified; } public final boolean retainAll(Collection<?> c) { boolean modified = false; for (Iterator<E> it = iterator(); it.hasNext();) { if (!c.contains(it.next())) { it.remove(); modified = true; } } return modified; } } /** * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in * which additions may optionally be enabled by mapping to a * common value. This class cannot be directly instantiated. * See {@link #keySet() keySet()}, * {@link #keySet(Object) keySet(V)}, * {@link #newKeySet() newKeySet()}, * {@link #newKeySet(int) newKeySet(int)}. * * @since 1.8 */ public static class KeySetView<K,V> extends CollectionView<K,V,K> implements Set<K>, java.io.Serializable { private static final long serialVersionUID = 7249069246763182397L; private final V value; KeySetView(ConcurrentHashMapV8<K,V> map, V value) { // non-public super(map); this.value = value; } /** * Returns the default mapped value for additions, * or {@code null} if additions are not supported. * * @return the default mapped value for additions, or {@code null} * if not supported */ public V getMappedValue() { return value; } /** * {@inheritDoc} * @throws NullPointerException if the specified key is null */ public boolean contains(Object o) { return map.containsKey(o); } /** * Removes the key from this map view, by removing the key (and its * corresponding value) from the backing map. This method does * nothing if the key is not in the map. * * @param o the key to be removed from the backing map * @return {@code true} if the backing map contained the specified key * @throws NullPointerException if the specified key is null */ public boolean remove(Object o) { return map.remove(o) != null; } /** * @return an iterator over the keys of the backing map */ public Iterator<K> iterator() { Node<K,V>[] t; ConcurrentHashMapV8<K,V> m = map; int f = (t = m.table) == null ? 0 : t.length; return new KeyIterator<K,V>(t, f, 0, f, m); } /** * Adds the specified key to this set view by mapping the key to * the default mapped value in the backing map, if defined. * * @param e key to be added * @return {@code true} if this set changed as a result of the call * @throws NullPointerException if the specified key is null * @throws UnsupportedOperationException if no default mapped value * for additions was provided */ public boolean add(K e) { V v; if ((v = value) == null) throw new UnsupportedOperationException(); return map.putVal(e, v, true) == null; } /** * Adds all of the elements in the specified collection to this set, * as if by calling {@link #add} on each one. * * @param c the elements to be inserted into this set * @return {@code true} if this set changed as a result of the call * @throws NullPointerException if the collection or any of its * elements are {@code null} * @throws UnsupportedOperationException if no default mapped value * for additions was provided */ public boolean addAll(Collection<? extends K> c) { boolean added = false; V v; if ((v = value) == null) throw new UnsupportedOperationException(); for (K e : c) { if (map.putVal(e, v, true) == null) added = true; } return added; } public int hashCode() { int h = 0; for (K e : this) h += e.hashCode(); return h; } public boolean equals(Object o) { Set<?> c; return ((o instanceof Set) && ((c = (Set<?>)o) == this || (containsAll(c) && c.containsAll(this)))); } public ConcurrentHashMapSpliterator<K> spliteratorJSR166() { Node<K,V>[] t; ConcurrentHashMapV8<K,V> m = map; long n = m.sumCount(); int f = (t = m.table) == null ? 0 : t.length; return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); } public void forEach(Action<? super K> action) { if (action == null) throw new NullPointerException(); Node<K,V>[] t; if ((t = map.table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) action.apply(p.key); } } } /** * A view of a ConcurrentHashMapV8 as a {@link Collection} of * values, in which additions are disabled. This class cannot be * directly instantiated. See {@link #values()}. */ static final class ValuesView<K,V> extends CollectionView<K,V,V> implements Collection<V>, java.io.Serializable { private static final long serialVersionUID = 2249069246763182397L; ValuesView(ConcurrentHashMapV8<K,V> map) { super(map); } public final boolean contains(Object o) { return map.containsValue(o); } public final boolean remove(Object o) { if (o != null) { for (Iterator<V> it = iterator(); it.hasNext();) { if (o.equals(it.next())) { it.remove(); return true; } } } return false; } public final Iterator<V> iterator() { ConcurrentHashMapV8<K,V> m = map; Node<K,V>[] t; int f = (t = m.table) == null ? 0 : t.length; return new ValueIterator<K,V>(t, f, 0, f, m); } public final boolean add(V e) { throw new UnsupportedOperationException(); } public final boolean addAll(Collection<? extends V> c) { throw new UnsupportedOperationException(); } public ConcurrentHashMapSpliterator<V> spliteratorJSR166() { Node<K,V>[] t; ConcurrentHashMapV8<K,V> m = map; long n = m.sumCount(); int f = (t = m.table) == null ? 0 : t.length; return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); } public void forEach(Action<? super V> action) { if (action == null) throw new NullPointerException(); Node<K,V>[] t; if ((t = map.table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) action.apply(p.val); } } } /** * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) * entries. This class cannot be directly instantiated. See * {@link #entrySet()}. */ static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> implements Set<Map.Entry<K,V>>, java.io.Serializable { private static final long serialVersionUID = 2249069246763182397L; EntrySetView(ConcurrentHashMapV8<K,V> map) { super(map); } public boolean contains(Object o) { Object k, v, r; Map.Entry<?,?> e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry<?,?>)o).getKey()) != null && (r = map.get(k)) != null && (v = e.getValue()) != null && (v == r || v.equals(r))); } public boolean remove(Object o) { Object k, v; Map.Entry<?,?> e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry<?,?>)o).getKey()) != null && (v = e.getValue()) != null && map.remove(k, v)); } /** * @return an iterator over the entries of the backing map */ public Iterator<Map.Entry<K,V>> iterator() { ConcurrentHashMapV8<K,V> m = map; Node<K,V>[] t; int f = (t = m.table) == null ? 0 : t.length; return new EntryIterator<K,V>(t, f, 0, f, m); } public boolean add(Entry<K,V> e) { return map.putVal(e.getKey(), e.getValue(), false) == null; } public boolean addAll(Collection<? extends Entry<K,V>> c) { boolean added = false; for (Entry<K,V> e : c) { if (add(e)) added = true; } return added; } public final int hashCode() { int h = 0; Node<K,V>[] t; if ((t = map.table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) { h += p.hashCode(); } } return h; } public final boolean equals(Object o) { Set<?> c; return ((o instanceof Set) && ((c = (Set<?>)o) == this || (containsAll(c) && c.containsAll(this)))); } public ConcurrentHashMapSpliterator<Map.Entry<K,V>> spliteratorJSR166() { Node<K,V>[] t; ConcurrentHashMapV8<K,V> m = map; long n = m.sumCount(); int f = (t = m.table) == null ? 0 : t.length; return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); } public void forEach(Action<? super Map.Entry<K,V>> action) { if (action == null) throw new NullPointerException(); Node<K,V>[] t; if ((t = map.table) != null) { Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); for (Node<K,V> p; (p = it.advance()) != null; ) action.apply(new MapEntry<K,V>(p.key, p.val, map)); } } } // ------------------------------------------------------- /** * Base class for bulk tasks. Repeats some fields and code from * class Traverser, because we need to subclass CountedCompleter. */ abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { Node<K,V>[] tab; // same as Traverser Node<K,V> next; int index; int baseIndex; int baseLimit; final int baseSize; int batch; // split control BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { super(par); this.batch = b; this.index = this.baseIndex = i; if ((this.tab = t) == null) this.baseSize = this.baseLimit = 0; else if (par == null) this.baseSize = this.baseLimit = t.length; else { this.baseLimit = f; this.baseSize = par.baseSize; } } /** * Same as Traverser version */ final Node<K,V> advance() { Node<K,V> e; if ((e = next) != null) e = e.next; for (;;) { Node<K,V>[] t; int i, n; K ek; // must use locals in checks if (e != null) return next = e; if (baseIndex >= baseLimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0) return next = null; if ((e = tabAt(t, index)) != null && e.hash < 0) { if (e instanceof ForwardingNode) { tab = ((ForwardingNode<K,V>)e).nextTable; e = null; continue; } else if (e instanceof TreeBin) e = ((TreeBin<K,V>)e).first; else e = null; } if ((index += baseSize) >= n) index = ++baseIndex; // visit upper slots if present } } } /* * Task classes. Coded in a regular but ugly format/style to * simplify checks that each variant differs in the right way from * others. The null screenings exist because compilers cannot tell * that we've already null-checked task arguments, so we force * simplest hoisted bypass to help avoid convoluted traps. */ @SuppressWarnings("serial") static final class ForEachKeyTask<K,V> extends BulkTask<K,V,Void> { final Action<? super K> action; ForEachKeyTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Action<? super K> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final Action<? super K> action; if ((action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachKeyTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, action).fork(); } for (Node<K,V> p; (p = advance()) != null;) action.apply(p.key); propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachValueTask<K,V> extends BulkTask<K,V,Void> { final Action<? super V> action; ForEachValueTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Action<? super V> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final Action<? super V> action; if ((action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachValueTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, action).fork(); } for (Node<K,V> p; (p = advance()) != null;) action.apply(p.val); propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachEntryTask<K,V> extends BulkTask<K,V,Void> { final Action<? super Entry<K,V>> action; ForEachEntryTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Action<? super Entry<K,V>> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final Action<? super Entry<K,V>> action; if ((action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachEntryTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, action).fork(); } for (Node<K,V> p; (p = advance()) != null; ) action.apply(p); propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachMappingTask<K,V> extends BulkTask<K,V,Void> { final BiAction<? super K, ? super V> action; ForEachMappingTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiAction<? super K,? super V> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final BiAction<? super K, ? super V> action; if ((action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachMappingTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, action).fork(); } for (Node<K,V> p; (p = advance()) != null; ) action.apply(p.key, p.val); propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachTransformedKeyTask<K,V,U> extends BulkTask<K,V,Void> { final Fun<? super K, ? extends U> transformer; final Action<? super U> action; ForEachTransformedKeyTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Fun<? super K, ? extends U> transformer, Action<? super U> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final Fun<? super K, ? extends U> transformer; final Action<? super U> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachTransformedKeyTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, transformer, action).fork(); } for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p.key)) != null) action.apply(u); } propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachTransformedValueTask<K,V,U> extends BulkTask<K,V,Void> { final Fun<? super V, ? extends U> transformer; final Action<? super U> action; ForEachTransformedValueTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Fun<? super V, ? extends U> transformer, Action<? super U> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final Fun<? super V, ? extends U> transformer; final Action<? super U> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachTransformedValueTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, transformer, action).fork(); } for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p.val)) != null) action.apply(u); } propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachTransformedEntryTask<K,V,U> extends BulkTask<K,V,Void> { final Fun<Map.Entry<K,V>, ? extends U> transformer; final Action<? super U> action; ForEachTransformedEntryTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Fun<Map.Entry<K,V>, ? extends U> transformer, Action<? super U> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final Fun<Map.Entry<K,V>, ? extends U> transformer; final Action<? super U> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachTransformedEntryTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, transformer, action).fork(); } for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p)) != null) action.apply(u); } propagateCompletion(); } } } @SuppressWarnings("serial") static final class ForEachTransformedMappingTask<K,V,U> extends BulkTask<K,V,Void> { final BiFun<? super K, ? super V, ? extends U> transformer; final Action<? super U> action; ForEachTransformedMappingTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFun<? super K, ? super V, ? extends U> transformer, Action<? super U> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final BiFun<? super K, ? super V, ? extends U> transformer; final Action<? super U> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); new ForEachTransformedMappingTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, transformer, action).fork(); } for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p.key, p.val)) != null) action.apply(u); } propagateCompletion(); } } } @SuppressWarnings("serial") static final class SearchKeysTask<K,V,U> extends BulkTask<K,V,U> { final Fun<? super K, ? extends U> searchFunction; final AtomicReference<U> result; SearchKeysTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Fun<? super K, ? extends U> searchFunction, AtomicReference<U> result) { super(p, b, i, f, t); this.searchFunction = searchFunction; this.result = result; } public final U getRawResult() { return result.get(); } public final void compute() { final Fun<? super K, ? extends U> searchFunction; final AtomicReference<U> result; if ((searchFunction = this.searchFunction) != null && (result = this.result) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { if (result.get() != null) return; addToPendingCount(1); new SearchKeysTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, searchFunction, result).fork(); } while (result.get() == null) { U u; Node<K,V> p; if ((p = advance()) == null) { propagateCompletion(); break; } if ((u = searchFunction.apply(p.key)) != null) { if (result.compareAndSet(null, u)) quietlyCompleteRoot(); break; } } } } } @SuppressWarnings("serial") static final class SearchValuesTask<K,V,U> extends BulkTask<K,V,U> { final Fun<? super V, ? extends U> searchFunction; final AtomicReference<U> result; SearchValuesTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Fun<? super V, ? extends U> searchFunction, AtomicReference<U> result) { super(p, b, i, f, t); this.searchFunction = searchFunction; this.result = result; } public final U getRawResult() { return result.get(); } public final void compute() { final Fun<? super V, ? extends U> searchFunction; final AtomicReference<U> result; if ((searchFunction = this.searchFunction) != null && (result = this.result) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { if (result.get() != null) return; addToPendingCount(1); new SearchValuesTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, searchFunction, result).fork(); } while (result.get() == null) { U u; Node<K,V> p; if ((p = advance()) == null) { propagateCompletion(); break; } if ((u = searchFunction.apply(p.val)) != null) { if (result.compareAndSet(null, u)) quietlyCompleteRoot(); break; } } } } } @SuppressWarnings("serial") static final class SearchEntriesTask<K,V,U> extends BulkTask<K,V,U> { final Fun<Entry<K,V>, ? extends U> searchFunction; final AtomicReference<U> result; SearchEntriesTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Fun<Entry<K,V>, ? extends U> searchFunction, AtomicReference<U> result) { super(p, b, i, f, t); this.searchFunction = searchFunction; this.result = result; } public final U getRawResult() { return result.get(); } public final void compute() { final Fun<Entry<K,V>, ? extends U> searchFunction; final AtomicReference<U> result; if ((searchFunction = this.searchFunction) != null && (result = this.result) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { if (result.get() != null) return; addToPendingCount(1); new SearchEntriesTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, searchFunction, result).fork(); } while (result.get() == null) { U u; Node<K,V> p; if ((p = advance()) == null) { propagateCompletion(); break; } if ((u = searchFunction.apply(p)) != null) { if (result.compareAndSet(null, u)) quietlyCompleteRoot(); return; } } } } } @SuppressWarnings("serial") static final class SearchMappingsTask<K,V,U> extends BulkTask<K,V,U> { final BiFun<? super K, ? super V, ? extends U> searchFunction; final AtomicReference<U> result; SearchMappingsTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFun<? super K, ? super V, ? extends U> searchFunction, AtomicReference<U> result) { super(p, b, i, f, t); this.searchFunction = searchFunction; this.result = result; } public final U getRawResult() { return result.get(); } public final void compute() { final BiFun<? super K, ? super V, ? extends U> searchFunction; final AtomicReference<U> result; if ((searchFunction = this.searchFunction) != null && (result = this.result) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { if (result.get() != null) return; addToPendingCount(1); new SearchMappingsTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, searchFunction, result).fork(); } while (result.get() == null) { U u; Node<K,V> p; if ((p = advance()) == null) { propagateCompletion(); break; } if ((u = searchFunction.apply(p.key, p.val)) != null) { if (result.compareAndSet(null, u)) quietlyCompleteRoot(); break; } } } } } @SuppressWarnings("serial") static final class ReduceKeysTask<K,V> extends BulkTask<K,V,K> { final BiFun<? super K, ? super K, ? extends K> reducer; K result; ReduceKeysTask<K,V> rights, nextRight; ReduceKeysTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceKeysTask<K,V> nextRight, BiFun<? super K, ? super K, ? extends K> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.reducer = reducer; } public final K getRawResult() { return result; } public final void compute() { final BiFun<? super K, ? super K, ? extends K> reducer; if ((reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new ReduceKeysTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, reducer)).fork(); } K r = null; for (Node<K,V> p; (p = advance()) != null; ) { K u = p.key; r = (r == null) ? u : u == null ? r : reducer.apply(r, u); } result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") ReduceKeysTask<K,V> t = (ReduceKeysTask<K,V>)c, s = t.rights; while (s != null) { K tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class ReduceValuesTask<K,V> extends BulkTask<K,V,V> { final BiFun<? super V, ? super V, ? extends V> reducer; V result; ReduceValuesTask<K,V> rights, nextRight; ReduceValuesTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceValuesTask<K,V> nextRight, BiFun<? super V, ? super V, ? extends V> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.reducer = reducer; } public final V getRawResult() { return result; } public final void compute() { final BiFun<? super V, ? super V, ? extends V> reducer; if ((reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new ReduceValuesTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, reducer)).fork(); } V r = null; for (Node<K,V> p; (p = advance()) != null; ) { V v = p.val; r = (r == null) ? v : reducer.apply(r, v); } result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") ReduceValuesTask<K,V> t = (ReduceValuesTask<K,V>)c, s = t.rights; while (s != null) { V tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class ReduceEntriesTask<K,V> extends BulkTask<K,V,Map.Entry<K,V>> { final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; Map.Entry<K,V> result; ReduceEntriesTask<K,V> rights, nextRight; ReduceEntriesTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceEntriesTask<K,V> nextRight, BiFun<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.reducer = reducer; } public final Map.Entry<K,V> getRawResult() { return result; } public final void compute() { final BiFun<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; if ((reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new ReduceEntriesTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, reducer)).fork(); } Map.Entry<K,V> r = null; for (Node<K,V> p; (p = advance()) != null; ) r = (r == null) ? p : reducer.apply(r, p); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") ReduceEntriesTask<K,V> t = (ReduceEntriesTask<K,V>)c, s = t.rights; while (s != null) { Map.Entry<K,V> tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceKeysTask<K,V,U> extends BulkTask<K,V,U> { final Fun<? super K, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; U result; MapReduceKeysTask<K,V,U> rights, nextRight; MapReduceKeysTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysTask<K,V,U> nextRight, Fun<? super K, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.reducer = reducer; } public final U getRawResult() { return result; } public final void compute() { final Fun<? super K, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceKeysTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, reducer)).fork(); } U r = null; for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p.key)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U> t = (MapReduceKeysTask<K,V,U>)c, s = t.rights; while (s != null) { U tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceValuesTask<K,V,U> extends BulkTask<K,V,U> { final Fun<? super V, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; U result; MapReduceValuesTask<K,V,U> rights, nextRight; MapReduceValuesTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesTask<K,V,U> nextRight, Fun<? super V, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.reducer = reducer; } public final U getRawResult() { return result; } public final void compute() { final Fun<? super V, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceValuesTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, reducer)).fork(); } U r = null; for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p.val)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceValuesTask<K,V,U> t = (MapReduceValuesTask<K,V,U>)c, s = t.rights; while (s != null) { U tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceEntriesTask<K,V,U> extends BulkTask<K,V,U> { final Fun<Map.Entry<K,V>, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; U result; MapReduceEntriesTask<K,V,U> rights, nextRight; MapReduceEntriesTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesTask<K,V,U> nextRight, Fun<Map.Entry<K,V>, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.reducer = reducer; } public final U getRawResult() { return result; } public final void compute() { final Fun<Map.Entry<K,V>, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceEntriesTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, reducer)).fork(); } U r = null; for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceEntriesTask<K,V,U> t = (MapReduceEntriesTask<K,V,U>)c, s = t.rights; while (s != null) { U tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceMappingsTask<K,V,U> extends BulkTask<K,V,U> { final BiFun<? super K, ? super V, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; U result; MapReduceMappingsTask<K,V,U> rights, nextRight; MapReduceMappingsTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsTask<K,V,U> nextRight, BiFun<? super K, ? super V, ? extends U> transformer, BiFun<? super U, ? super U, ? extends U> reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.reducer = reducer; } public final U getRawResult() { return result; } public final void compute() { final BiFun<? super K, ? super V, ? extends U> transformer; final BiFun<? super U, ? super U, ? extends U> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceMappingsTask<K,V,U> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, reducer)).fork(); } U r = null; for (Node<K,V> p; (p = advance()) != null; ) { U u; if ((u = transformer.apply(p.key, p.val)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceMappingsTask<K,V,U> t = (MapReduceMappingsTask<K,V,U>)c, s = t.rights; while (s != null) { U tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceKeysToDoubleTask<K,V> extends BulkTask<K,V,Double> { final ObjectToDouble<? super K> transformer; final DoubleByDoubleToDouble reducer; final double basis; double result; MapReduceKeysToDoubleTask<K,V> rights, nextRight; MapReduceKeysToDoubleTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToDoubleTask<K,V> nextRight, ObjectToDouble<? super K> transformer, double basis, DoubleByDoubleToDouble reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Double getRawResult() { return result; } public final void compute() { final ObjectToDouble<? super K> transformer; final DoubleByDoubleToDouble reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceKeysToDoubleTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.key)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceKeysToDoubleTask<K,V> t = (MapReduceKeysToDoubleTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceValuesToDoubleTask<K,V> extends BulkTask<K,V,Double> { final ObjectToDouble<? super V> transformer; final DoubleByDoubleToDouble reducer; final double basis; double result; MapReduceValuesToDoubleTask<K,V> rights, nextRight; MapReduceValuesToDoubleTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToDoubleTask<K,V> nextRight, ObjectToDouble<? super V> transformer, double basis, DoubleByDoubleToDouble reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Double getRawResult() { return result; } public final void compute() { final ObjectToDouble<? super V> transformer; final DoubleByDoubleToDouble reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceValuesToDoubleTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.val)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceValuesToDoubleTask<K,V> t = (MapReduceValuesToDoubleTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceEntriesToDoubleTask<K,V> extends BulkTask<K,V,Double> { final ObjectToDouble<Map.Entry<K,V>> transformer; final DoubleByDoubleToDouble reducer; final double basis; double result; MapReduceEntriesToDoubleTask<K,V> rights, nextRight; MapReduceEntriesToDoubleTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToDoubleTask<K,V> nextRight, ObjectToDouble<Map.Entry<K,V>> transformer, double basis, DoubleByDoubleToDouble reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Double getRawResult() { return result; } public final void compute() { final ObjectToDouble<Map.Entry<K,V>> transformer; final DoubleByDoubleToDouble reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceEntriesToDoubleTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceEntriesToDoubleTask<K,V> t = (MapReduceEntriesToDoubleTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceMappingsToDoubleTask<K,V> extends BulkTask<K,V,Double> { final ObjectByObjectToDouble<? super K, ? super V> transformer; final DoubleByDoubleToDouble reducer; final double basis; double result; MapReduceMappingsToDoubleTask<K,V> rights, nextRight; MapReduceMappingsToDoubleTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToDoubleTask<K,V> nextRight, ObjectByObjectToDouble<? super K, ? super V> transformer, double basis, DoubleByDoubleToDouble reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Double getRawResult() { return result; } public final void compute() { final ObjectByObjectToDouble<? super K, ? super V> transformer; final DoubleByDoubleToDouble reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceMappingsToDoubleTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.key, p.val)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceMappingsToDoubleTask<K,V> t = (MapReduceMappingsToDoubleTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceKeysToLongTask<K,V> extends BulkTask<K,V,Long> { final ObjectToLong<? super K> transformer; final LongByLongToLong reducer; final long basis; long result; MapReduceKeysToLongTask<K,V> rights, nextRight; MapReduceKeysToLongTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToLongTask<K,V> nextRight, ObjectToLong<? super K> transformer, long basis, LongByLongToLong reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Long getRawResult() { return result; } public final void compute() { final ObjectToLong<? super K> transformer; final LongByLongToLong reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceKeysToLongTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.key)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceKeysToLongTask<K,V> t = (MapReduceKeysToLongTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceValuesToLongTask<K,V> extends BulkTask<K,V,Long> { final ObjectToLong<? super V> transformer; final LongByLongToLong reducer; final long basis; long result; MapReduceValuesToLongTask<K,V> rights, nextRight; MapReduceValuesToLongTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToLongTask<K,V> nextRight, ObjectToLong<? super V> transformer, long basis, LongByLongToLong reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Long getRawResult() { return result; } public final void compute() { final ObjectToLong<? super V> transformer; final LongByLongToLong reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceValuesToLongTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.val)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceValuesToLongTask<K,V> t = (MapReduceValuesToLongTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceEntriesToLongTask<K,V> extends BulkTask<K,V,Long> { final ObjectToLong<Map.Entry<K,V>> transformer; final LongByLongToLong reducer; final long basis; long result; MapReduceEntriesToLongTask<K,V> rights, nextRight; MapReduceEntriesToLongTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToLongTask<K,V> nextRight, ObjectToLong<Map.Entry<K,V>> transformer, long basis, LongByLongToLong reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Long getRawResult() { return result; } public final void compute() { final ObjectToLong<Map.Entry<K,V>> transformer; final LongByLongToLong reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceEntriesToLongTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceEntriesToLongTask<K,V> t = (MapReduceEntriesToLongTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceMappingsToLongTask<K,V> extends BulkTask<K,V,Long> { final ObjectByObjectToLong<? super K, ? super V> transformer; final LongByLongToLong reducer; final long basis; long result; MapReduceMappingsToLongTask<K,V> rights, nextRight; MapReduceMappingsToLongTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToLongTask<K,V> nextRight, ObjectByObjectToLong<? super K, ? super V> transformer, long basis, LongByLongToLong reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Long getRawResult() { return result; } public final void compute() { final ObjectByObjectToLong<? super K, ? super V> transformer; final LongByLongToLong reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceMappingsToLongTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.key, p.val)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceMappingsToLongTask<K,V> t = (MapReduceMappingsToLongTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceKeysToIntTask<K,V> extends BulkTask<K,V,Integer> { final ObjectToInt<? super K> transformer; final IntByIntToInt reducer; final int basis; int result; MapReduceKeysToIntTask<K,V> rights, nextRight; MapReduceKeysToIntTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToIntTask<K,V> nextRight, ObjectToInt<? super K> transformer, int basis, IntByIntToInt reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Integer getRawResult() { return result; } public final void compute() { final ObjectToInt<? super K> transformer; final IntByIntToInt reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceKeysToIntTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.key)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceKeysToIntTask<K,V> t = (MapReduceKeysToIntTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceValuesToIntTask<K,V> extends BulkTask<K,V,Integer> { final ObjectToInt<? super V> transformer; final IntByIntToInt reducer; final int basis; int result; MapReduceValuesToIntTask<K,V> rights, nextRight; MapReduceValuesToIntTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToIntTask<K,V> nextRight, ObjectToInt<? super V> transformer, int basis, IntByIntToInt reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Integer getRawResult() { return result; } public final void compute() { final ObjectToInt<? super V> transformer; final IntByIntToInt reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceValuesToIntTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.val)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceValuesToIntTask<K,V> t = (MapReduceValuesToIntTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceEntriesToIntTask<K,V> extends BulkTask<K,V,Integer> { final ObjectToInt<Map.Entry<K,V>> transformer; final IntByIntToInt reducer; final int basis; int result; MapReduceEntriesToIntTask<K,V> rights, nextRight; MapReduceEntriesToIntTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToIntTask<K,V> nextRight, ObjectToInt<Map.Entry<K,V>> transformer, int basis, IntByIntToInt reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Integer getRawResult() { return result; } public final void compute() { final ObjectToInt<Map.Entry<K,V>> transformer; final IntByIntToInt reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceEntriesToIntTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceEntriesToIntTask<K,V> t = (MapReduceEntriesToIntTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } @SuppressWarnings("serial") static final class MapReduceMappingsToIntTask<K,V> extends BulkTask<K,V,Integer> { final ObjectByObjectToInt<? super K, ? super V> transformer; final IntByIntToInt reducer; final int basis; int result; MapReduceMappingsToIntTask<K,V> rights, nextRight; MapReduceMappingsToIntTask (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToIntTask<K,V> nextRight, ObjectByObjectToInt<? super K, ? super V> transformer, int basis, IntByIntToInt reducer) { super(p, b, i, f, t); this.nextRight = nextRight; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final Integer getRawResult() { return result; } public final void compute() { final ObjectByObjectToInt<? super K, ? super V> transformer; final IntByIntToInt reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseIndex, f, h; batch > 0 && (h = ((f = baseLimit) + i) >>> 1) > i;) { addToPendingCount(1); (rights = new MapReduceMappingsToIntTask<K,V> (this, batch >>>= 1, baseLimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (Node<K,V> p; (p = advance()) != null; ) r = reducer.apply(r, transformer.apply(p.key, p.val)); result = r; CountedCompleter<?> c; for (c = firstComplete(); c != null; c = c.nextComplete()) { @SuppressWarnings("unchecked") MapReduceMappingsToIntTask<K,V> t = (MapReduceMappingsToIntTask<K,V>)c, s = t.rights; while (s != null) { t.result = reducer.apply(t.result, s.result); s = t.rights = s.nextRight; } } } } } /* ---------------- Counters -------------- */ // Adapted from LongAdder and Striped64. // See their internal docs for explanation. // A padded cell for distributing counts static final class CounterCell { volatile long p0, p1, p2, p3, p4, p5, p6; volatile long value; volatile long q0, q1, q2, q3, q4, q5, q6; CounterCell(long x) { value = x; } } /** * Holder for the thread-local hash code determining which * CounterCell to use. The code is initialized via the * counterHashCodeGenerator, but may be moved upon collisions. */ static final class CounterHashCode { int code; } /** * Generates initial value for per-thread CounterHashCodes. */ static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); /** * Increment for counterHashCodeGenerator. See class ThreadLocal * for explanation. */ static final int SEED_INCREMENT = 0x61c88647; /** * Per-thread counter hash codes. Shared across all instances. */ static final ThreadLocal<CounterHashCode> threadCounterHashCode = new ThreadLocal<CounterHashCode>(); final long sumCount() { CounterCell[] as = counterCells; CounterCell a; long sum = baseCount; if (as != null) { for (int i = 0; i < as.length; ++i) { if ((a = as[i]) != null) sum += a.value; } } return sum; } // See LongAdder version for explanation private final void fullAddCount(long x, CounterHashCode hc, boolean wasUncontended) { int h; if (hc == null) { hc = new CounterHashCode(); int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT); h = hc.code = (s == 0) ? 1 : s; // Avoid zero threadCounterHashCode.set(hc); } else h = hc.code; boolean collide = false; // True if last slot nonempty for (;;) { CounterCell[] as; CounterCell a; int n; long v; if ((as = counterCells) != null && (n = as.length) > 0) { if ((a = as[(n - 1) & h]) == null) { if (cellsBusy == 0) { // Try to attach new Cell CounterCell r = new CounterCell(x); // Optimistic create if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { boolean created = false; try { // Recheck under lock CounterCell[] rs; int m, j; if ((rs = counterCells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) { rs[j] = r; created = true; } } finally { cellsBusy = 0; } if (created) break; continue; // Slot is now non-empty } } collide = false; } else if (!wasUncontended) // CAS already known to fail wasUncontended = true; // Continue after rehash else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) break; else if (counterCells != as || n >= NCPU) collide = false; // At max size or stale else if (!collide) collide = true; else if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { try { if (counterCells == as) {// Expand table unless stale CounterCell[] rs = new CounterCell[n << 1]; for (int i = 0; i < n; ++i) rs[i] = as[i]; counterCells = rs; } } finally { cellsBusy = 0; } collide = false; continue; // Retry with expanded table } h ^= h << 13; // Rehash h ^= h >>> 17; h ^= h << 5; } else if (cellsBusy == 0 && counterCells == as && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { boolean init = false; try { // Initialize table if (counterCells == as) { CounterCell[] rs = new CounterCell[2]; rs[h & 1] = new CounterCell(x); counterCells = rs; init = true; } } finally { cellsBusy = 0; } if (init) break; } else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) break; // Fall back on using base } hc.code = h; // Record index for next time } // Unsafe mechanics private static final sun.misc.Unsafe U; private static final long SIZECTL; private static final long TRANSFERINDEX; private static final long BASECOUNT; private static final long CELLSBUSY; private static final long CELLVALUE; private static final long ABASE; private static final int ASHIFT; static { try { U = getUnsafe(); Class<?> k = ConcurrentHashMapV8.class; SIZECTL = U.objectFieldOffset (k.getDeclaredField("sizeCtl")); TRANSFERINDEX = U.objectFieldOffset (k.getDeclaredField("transferIndex")); BASECOUNT = U.objectFieldOffset (k.getDeclaredField("baseCount")); CELLSBUSY = U.objectFieldOffset (k.getDeclaredField("cellsBusy")); Class<?> ck = CounterCell.class; CELLVALUE = U.objectFieldOffset (ck.getDeclaredField("value")); Class<?> ak = Node[].class; ABASE = U.arrayBaseOffset(ak); int scale = U.arrayIndexScale(ak); if ((scale & (scale - 1)) != 0) throw new Error("data type scale not a power of two"); ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException tryReflectionInstead) {} try { return java.security.AccessController.doPrivileged (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() { public sun.misc.Unsafe run() throws Exception { Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class; for (java.lang.reflect.Field f : k.getDeclaredFields()) { f.setAccessible(true); Object x = f.get(null); if (k.isInstance(x)) return k.cast(x); } throw new NoSuchFieldError("the Unsafe"); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } }
0true
src_main_java_jsr166e_ConcurrentHashMapV8.java
3,083
static interface FailedEngineListener { void onFailedEngine(ShardId shardId, Throwable t); }
0true
src_main_java_org_elasticsearch_index_engine_Engine.java
2,561
clusterService.submitStateUpdateTask("local-disco-initial_connect(master)", new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { nodesBuilder.put(discovery.localNode); } nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id()); // remove the NO_MASTER block in this case ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(Discovery.NO_MASTER_BLOCK); return ClusterState.builder(currentState).nodes(nodesBuilder).blocks(blocks).build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { sendInitialStateEventIfNeeded(); } });
1no label
src_main_java_org_elasticsearch_discovery_local_LocalDiscovery.java
1,184
public interface BroadleafPaymentInfoTypeService { /** * Constructs a default entry in the payments map for each payment found on the order that matches * a PaymentInfoTypes. * * @param order * @return Map<PaymentInfo, Referenced> */ public Map<PaymentInfo, Referenced> getPaymentsMap(Order order); }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_BroadleafPaymentInfoTypeService.java
35
{ @Override public HostnamePort clusterServer() { return config.get( ClusterSettings.cluster_server ); } @Override public int defaultPort() { return 5001; } @Override public String name() { return null; } }, logging);
1no label
enterprise_cluster_src_main_java_org_neo4j_cluster_NetworkedServerFactory.java
1,471
public class HazelcastNaturalIdRegion extends AbstractTransactionalDataRegion<IMapRegionCache> implements NaturalIdRegion { public HazelcastNaturalIdRegion(final HazelcastInstance instance, final String regionName, final Properties props, final CacheDataDescription metadata) { super(instance, regionName, props, metadata, new IMapRegionCache(regionName, instance, props, metadata)); } public NaturalIdRegionAccessStrategy buildAccessStrategy(final AccessType accessType) throws CacheException { if (null == accessType) { throw new CacheException( "Got null AccessType while attempting to determine a proper NaturalIdRegionAccessStrategy. This can't happen!"); } if (AccessType.READ_ONLY.equals(accessType)) { return new NaturalIdRegionAccessStrategyAdapter( new ReadOnlyAccessDelegate<HazelcastNaturalIdRegion>(this, props)); } if (AccessType.NONSTRICT_READ_WRITE.equals(accessType)) { return new NaturalIdRegionAccessStrategyAdapter( new NonStrictReadWriteAccessDelegate<HazelcastNaturalIdRegion>(this, props)); } if (AccessType.READ_WRITE.equals(accessType)) { return new NaturalIdRegionAccessStrategyAdapter( new ReadWriteAccessDelegate<HazelcastNaturalIdRegion>(this, props)); } if (AccessType.TRANSACTIONAL.equals(accessType)) { throw new CacheException("Transactional access is not currently supported by Hazelcast."); } throw new CacheException("Got unknown AccessType \"" + accessType + "\" while attempting to build EntityRegionAccessStrategy."); } }
0true
hazelcast-hibernate_hazelcast-hibernate4_src_main_java_com_hazelcast_hibernate_region_HazelcastNaturalIdRegion.java
1,023
public class SemaphoreReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable { Map<String, Permit> migrationData; public SemaphoreReplicationOperation() { } public SemaphoreReplicationOperation(Map<String, Permit> migrationData) { this.migrationData = migrationData; } @Override public void run() throws Exception { SemaphoreService service = getService(); for (Permit permit : migrationData.values()) { permit.setInitialized(); } service.insertMigrationData(migrationData); } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { out.writeInt(migrationData.size()); for (Map.Entry<String, Permit> entry : migrationData.entrySet()) { String key = entry.getKey(); Permit value = entry.getValue(); out.writeUTF(key); value.writeData(out); } } @Override protected void readInternal(ObjectDataInput in) throws IOException { int size = in.readInt(); migrationData = new HashMap<String, Permit>(size); for (int i = 0; i < size; i++) { String name = in.readUTF(); Permit permit = new Permit(); permit.readData(in); migrationData.put(name, permit); } } @Override public int getFactoryId() { return SemaphoreDataSerializerHook.F_ID; } @Override public int getId() { return SemaphoreDataSerializerHook.SEMAPHORE_REPLICATION_OPERATION; } }
0true
hazelcast_src_main_java_com_hazelcast_concurrent_semaphore_operations_SemaphoreReplicationOperation.java
1,359
final MultiExecutionCallback callback = new MultiExecutionCallback() { public void onResponse(Member member, Object value) { count.incrementAndGet(); countDownLatch.countDown(); } public void onComplete(Map<Member, Object> values) { } };
0true
hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java
3,674
public static class Builder extends Mapper.Builder<Builder, ParentFieldMapper> { protected String indexName; private String type; protected PostingsFormatProvider postingsFormat; public Builder() { super(Defaults.NAME); this.indexName = name; } public Builder type(String type) { this.type = type; return builder; } protected Builder postingsFormat(PostingsFormatProvider postingsFormat) { this.postingsFormat = postingsFormat; return builder; } @Override public ParentFieldMapper build(BuilderContext context) { if (type == null) { throw new MapperParsingException("Parent mapping must contain the parent type"); } return new ParentFieldMapper(name, indexName, type, postingsFormat, null, context.indexSettings()); } }
0true
src_main_java_org_elasticsearch_index_mapper_internal_ParentFieldMapper.java
836
@RunWith(HazelcastParallelClassRunner.class) @Category(QuickTest.class) public class AtomicReferenceTest extends HazelcastTestSupport { @Test @ClientCompatibleTest public void getAndSet() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("getAndSet"); assertNull(ref.getAndSet("foo")); assertEquals("foo", ref.getAndSet("bar")); assertEquals("bar", ref.getAndSet("bar")); } @Test @ClientCompatibleTest public void isNull() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("isNull"); assertTrue(ref.isNull()); ref.set("foo"); assertFalse(ref.isNull()); } @Test @ClientCompatibleTest public void get() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("get"); assertNull(ref.get()); ref.set("foo"); assertEquals("foo", ref.get()); } @Test @ClientCompatibleTest public void setAndGet() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("setAndGet"); assertNull(ref.setAndGet(null)); assertNull(ref.get()); assertEquals("foo", ref.setAndGet("foo")); assertEquals("foo", ref.get()); assertEquals("bar", ref.setAndGet("bar")); assertEquals("bar", ref.get()); assertNull(ref.setAndGet(null)); assertNull(ref.get()); } @Test @ClientCompatibleTest public void set() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("set"); ref.set(null); assertNull(ref.get()); ref.set("foo"); assertEquals("foo", ref.get()); ref.setAndGet("bar"); assertEquals("bar", ref.get()); ref.set(null); assertNull(ref.get()); } @Test @ClientCompatibleTest public void clear() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("clear"); ref.clear(); assertNull(ref.get()); ref.set("foo"); ref.clear(); assertNull(ref.get()); ref.set(null); assertNull(ref.get()); } @Test @ClientCompatibleTest public void contains() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("clear"); assertTrue(ref.contains(null)); assertFalse(ref.contains("foo")); ref.set("foo"); assertFalse(ref.contains(null)); assertTrue(ref.contains("foo")); assertFalse(ref.contains("bar")); } @Test @ClientCompatibleTest public void compareAndSet() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("compareAndSet"); assertTrue(ref.compareAndSet(null, null)); assertNull(ref.get()); assertFalse(ref.compareAndSet("foo", "bar")); assertNull(ref.get()); assertTrue(ref.compareAndSet(null, "foo")); assertEquals("foo", ref.get()); ref.set("foo"); assertTrue(ref.compareAndSet("foo", "foo")); assertEquals("foo", ref.get()); assertTrue(ref.compareAndSet("foo", "bar")); assertEquals("bar", ref.get()); assertTrue(ref.compareAndSet("bar", null)); assertNull(ref.get()); } @Test(expected = IllegalArgumentException.class) @ClientCompatibleTest public void apply_whenCalledWithNullFunction() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("apply_whenCalledWithNullFunction"); ref.apply(null); } @Test @ClientCompatibleTest public void apply() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("apply"); assertEquals("null", ref.apply(new AppendFunction(""))); assertEquals(null, ref.get()); ref.set("foo"); assertEquals("foobar", ref.apply(new AppendFunction("bar"))); assertEquals("foo", ref.get()); assertEquals(null, ref.apply(new NullFunction())); assertEquals("foo", ref.get()); } @Test @ClientCompatibleTest public void apply_whenException() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("apply"); ref.set("foo"); try { ref.apply(new FailingFunction()); fail(); } catch (WoohaaException expected) { } assertEquals("foo", ref.get()); } @Test(expected = IllegalArgumentException.class) @ClientCompatibleTest public void alter_whenCalledWithNullFunction() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("alter_whenCalledWithNullFunction"); ref.alter(null); } @Test @ClientCompatibleTest public void alter_whenException() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("alter_whenException"); ref.set("foo"); try { ref.alter(new FailingFunction()); fail(); } catch (WoohaaException expected) { } assertEquals("foo", ref.get()); } @Test @ClientCompatibleTest public void alter() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("alter"); ref.alter(new NullFunction()); assertEquals(null, ref.get()); ref.set("foo"); ref.alter(new AppendFunction("bar")); assertEquals("foobar", ref.get()); ref.alter(new NullFunction()); assertEquals(null, ref.get()); } @Test(expected = IllegalArgumentException.class) @ClientCompatibleTest public void alterAndGet_whenCalledWithNullFunction() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("alterAndGet_whenCalledWithNullFunction"); ref.alterAndGet(null); } @Test @ClientCompatibleTest public void alterAndGet_whenException() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("alterAndGet_whenException"); ref.set("foo"); try { ref.alterAndGet(new FailingFunction()); fail(); } catch (WoohaaException expected) { } assertEquals("foo", ref.get()); } @Test @ClientCompatibleTest public void alterAndGet() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("alterAndGet"); assertNull(ref.alterAndGet(new NullFunction())); assertEquals(null, ref.get()); ref.set("foo"); assertEquals("foobar", ref.alterAndGet(new AppendFunction("bar"))); assertEquals("foobar", ref.get()); assertEquals(null, ref.alterAndGet(new NullFunction())); assertEquals(null, ref.get()); } @Test(expected = IllegalArgumentException.class) @ClientCompatibleTest public void getAndAlter_whenCalledWithNullFunction() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("getAndAlter_whenCalledWithNullFunction"); ref.getAndAlter(null); } @Test @ClientCompatibleTest public void getAndAlter_whenException() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("getAndAlter_whenException"); ref.set("foo"); try { ref.getAndAlter(new FailingFunction()); fail(); } catch (WoohaaException expected) { } assertEquals("foo", ref.get()); } @Test @ClientCompatibleTest public void getAndAlter() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("getAndAlter"); assertNull(ref.getAndAlter(new NullFunction())); assertEquals(null, ref.get()); ref.set("foo"); assertEquals("foo", ref.getAndAlter(new AppendFunction("bar"))); assertEquals("foobar", ref.get()); assertEquals("foobar", ref.getAndAlter(new NullFunction())); assertEquals(null, ref.get()); } private static class AppendFunction implements IFunction<String, String> { private String add; private AppendFunction(String add) { this.add = add; } @Override public String apply(String input) { return input + add; } } private static class NullFunction implements IFunction<String, String> { @Override public String apply(String input) { return null; } } private static class FailingFunction implements IFunction<String, String> { @Override public String apply(String input) { throw new WoohaaException(); } } private static class WoohaaException extends RuntimeException { } @Test public void testToString() { HazelcastInstance hazelcastInstance = createHazelcastInstance(); IAtomicReference<String> ref = hazelcastInstance.getAtomicReference("toString"); assertEquals("IAtomicReference{name='toString'}", ref.toString()); } }
0true
hazelcast_src_test_java_com_hazelcast_concurrent_atomicreference_AtomicReferenceTest.java
3,267
public class DocIdOrdinals implements Ordinals { private final int numDocs; /** * Constructs a new doc id ordinals. */ public DocIdOrdinals(int numDocs) { this.numDocs = numDocs; } @Override public long getMemorySizeInBytes() { return RamUsageEstimator.NUM_BYTES_OBJECT_REF; } @Override public boolean isMultiValued() { return false; } @Override public int getNumDocs() { return numDocs; } @Override public long getNumOrds() { return numDocs; } @Override public long getMaxOrd() { return 1L + numDocs; } @Override public Ordinals.Docs ordinals() { return new Docs(this); } public static class Docs implements Ordinals.Docs { private final DocIdOrdinals parent; private final LongsRef longsScratch = new LongsRef(new long[1], 0, 1); private int docId = -1; private long currentOrdinal = -1; public Docs(DocIdOrdinals parent) { this.parent = parent; } @Override public Ordinals ordinals() { return parent; } @Override public int getNumDocs() { return parent.getNumDocs(); } @Override public long getNumOrds() { return parent.getNumOrds(); } @Override public long getMaxOrd() { return parent.getMaxOrd(); } @Override public boolean isMultiValued() { return false; } @Override public long getOrd(int docId) { return currentOrdinal = docId + 1; } @Override public LongsRef getOrds(int docId) { longsScratch.longs[0] = currentOrdinal = docId + 1; return longsScratch; } @Override public long nextOrd() { assert docId >= 0; currentOrdinal = docId + 1; docId = -1; return currentOrdinal; } @Override public int setDocument(int docId) { this.docId = docId; return 1; } @Override public long currentOrd() { return currentOrdinal; } } }
0true
src_main_java_org_elasticsearch_index_fielddata_ordinals_DocIdOrdinals.java
99
public interface Page extends Serializable { public Long getId(); public void setId(Long id); public String getFullUrl(); public void setFullUrl(String fullUrl); public String getDescription(); public void setDescription(String description); public PageTemplate getPageTemplate(); public void setPageTemplate(PageTemplate pageTemplate); public Map<String, PageField> getPageFields(); public void setPageFields(Map<String, PageField> pageFields); public Boolean getDeletedFlag(); public void setDeletedFlag(Boolean deletedFlag); public Boolean getArchivedFlag(); public void setArchivedFlag(Boolean archivedFlag); public SandBox getSandbox(); public void setSandbox(SandBox sandbox); public Boolean getLockedFlag(); public void setLockedFlag(Boolean lockedFlag); public Long getOriginalPageId(); public void setOriginalPageId(Long originalPageId); public SandBox getOriginalSandBox(); public void setOriginalSandBox(SandBox originalSandBox); public AdminAuditable getAuditable(); public void setAuditable(AdminAuditable auditable); /** * Returns the offlineFlag. True indicates that the page should no longer appear on the site. * The item will still appear within the content administration program but no longer * be returned as part of the client facing APIs. * * @return true if this item is offline */ @Nullable public Boolean getOfflineFlag(); /** * Sets the offline flag. * * @param offlineFlag */ public void setOfflineFlag(@Nullable Boolean offlineFlag); /** * Gets the integer priority of this content item. Items with a lower priority should * be displayed before items with a higher priority. * * @return the priority as a numeric value */ @Nullable public Integer getPriority(); /** * Sets the display priority of this item. Lower priorities should be displayed first. * * @param priority */ public void setPriority(@Nullable Integer priority); /** * Returns a map of the targeting rules associated with this page. * * Targeting rules are defined in the content mangagement system and used to * enforce which page is returned to the client. * * @return */ @Nullable public Map<String, PageRule> getPageMatchRules(); /** * Sets the targeting rules for this content item. * * @param pageRules */ public void setPageMatchRules(@Nullable Map<String, PageRule> pageRules); /** * Returns the item (or cart) based rules associated with this content item. * * @return */ @Nullable public Set<PageItemCriteria> getQualifyingItemCriteria(); /** * Sets the item (e.g. cart) based rules associated with this content item. * * @param qualifyingItemCriteria */ public void setQualifyingItemCriteria(@Nullable Set<PageItemCriteria> qualifyingItemCriteria); public Page cloneEntity(); }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_Page.java
1,131
public class OrderItemType implements Serializable, BroadleafEnumerationType { private static final long serialVersionUID = 1L; private static final Map<String, OrderItemType> TYPES = new LinkedHashMap<String, OrderItemType>(); public static final OrderItemType BASIC = new OrderItemType("org.broadleafcommerce.core.order.domain.OrderItem", "Basic Order Item"); public static final OrderItemType DISCRETE = new OrderItemType("org.broadleafcommerce.core.order.domain.DiscreteOrderItem", "Discrete Order Item"); public static final OrderItemType EXTERNALLY_PRICED = new OrderItemType("org.broadleafcommerce.core.order.domain.DynamicPriceDiscreteOrderItem", "Externally Priced Discrete Order Item"); public static final OrderItemType BUNDLE = new OrderItemType("org.broadleafcommerce.core.order.domain.BundleOrderItem", "Bundle Order Item"); public static final OrderItemType GIFTWRAP = new OrderItemType("org.broadleafcommerce.core.order.domain.GiftWrapOrderItem", "Gift Wrap Order Item"); public static OrderItemType getInstance(final String type) { return TYPES.get(type); } private String type; private String friendlyType; public OrderItemType() { //do nothing } public OrderItemType(final String type, final String friendlyType) { this.friendlyType = friendlyType; setType(type); } public String getType() { return type; } public String getFriendlyType() { return friendlyType; } private void setType(final String type) { this.type = type; if (!TYPES.containsKey(type)) { TYPES.put(type, this); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((type == null) ? 0 : type.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; OrderItemType other = (OrderItemType) obj; if (type == null) { if (other.type != null) return false; } else if (!type.equals(other.type)) return false; return true; } }
1no label
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_type_OrderItemType.java
1,312
public static enum TrackMode { NONE, FULL, ROLLBACK_ONLY }
0true
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_ODurablePage.java
617
new OIndexEngine.EntriesResultListener() { @Override public boolean addResult(ODocument entry) { return indexEntriesResultListener.addResult(entry); } });
1no label
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
263
public interface OCommandDistributedReplicateRequest { public boolean isReplicated(); }
0true
core_src_main_java_com_orientechnologies_orient_core_command_OCommandDistributedReplicateRequest.java
1,010
public class DeadMemberBackupOperation extends SemaphoreBackupOperation implements IdentifiedDataSerializable { public DeadMemberBackupOperation() { } public DeadMemberBackupOperation(String name, String firstCaller) { super(name, -1, firstCaller); } @Override public void run() throws Exception { Permit permit = getPermit(); permit.memberRemoved(firstCaller); response = true; } @Override public int getFactoryId() { return SemaphoreDataSerializerHook.F_ID; } @Override public int getId() { return SemaphoreDataSerializerHook.DEAD_MEMBER_BACKUP_OPERATION; } }
0true
hazelcast_src_main_java_com_hazelcast_concurrent_semaphore_operations_DeadMemberBackupOperation.java
1,332
Future future = executorService.submit(new Callable<String>() { @Override public String call() { try { latch1.await(30, TimeUnit.SECONDS); return "success"; } catch (Exception e) { throw new RuntimeException(e); } } });
0true
hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java
101
@Entity @Inheritance(strategy = InheritanceType.JOINED) @Table(name = "BLC_PAGE_FLD") @EntityListeners(value = { AdminAuditableListener.class }) public class PageFieldImpl implements PageField { private static final long serialVersionUID = 1L; @Id @GeneratedValue(generator = "PageFieldId") @GenericGenerator( name="PageFieldId", strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator", parameters = { @Parameter(name="segment_value", value="PageFieldImpl"), @Parameter(name="entity_name", value="org.broadleafcommerce.cms.page.domain.PageFieldImpl") } ) @Column(name = "PAGE_FLD_ID") protected Long id; @Embedded @AdminPresentation(excluded = true) protected AdminAuditable auditable = new AdminAuditable(); @Column (name = "FLD_KEY") protected String fieldKey; @ManyToOne(targetEntity = PageImpl.class) @JoinColumn(name="PAGE_ID") protected Page page; @Column (name = "VALUE") protected String stringValue; @Column(name = "LOB_VALUE", length = Integer.MAX_VALUE-1) @Lob @Type(type = "org.hibernate.type.StringClobType") protected String lobValue; @Override public Long getId() { return id; } @Override public void setId(Long id) { this.id = id; } @Override public String getFieldKey() { return fieldKey; } @Override public void setFieldKey(String fieldKey) { this.fieldKey = fieldKey; } @Override public Page getPage() { return page; } @Override public void setPage(Page page) { this.page = page; } @Override public String getValue() { if (stringValue != null && stringValue.length() > 0) { return stringValue; } else { return lobValue; } } @Override public void setValue(String value) { if (value != null) { if (value.length() <= 256) { stringValue = value; lobValue = null; } else { stringValue = null; lobValue = value; } } else { lobValue = null; stringValue = null; } } @Override public AdminAuditable getAuditable() { return auditable; } @Override public void setAuditable(AdminAuditable auditable) { this.auditable = auditable; } @Override public PageField cloneEntity() { PageFieldImpl newPageField = new PageFieldImpl(); newPageField.fieldKey = fieldKey; newPageField.page = page; newPageField.lobValue = lobValue; newPageField.stringValue = stringValue; return newPageField; } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_domain_PageFieldImpl.java
74
@Entity @Inheritance(strategy = InheritanceType.JOINED) @EntityListeners(value = { AdminAuditableListener.class }) @Table(name = "BLC_IMG_STATIC_ASSET") @Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements") public class ImageStaticAssetImpl extends StaticAssetImpl implements ImageStaticAsset { @Column(name ="WIDTH") @AdminPresentation(friendlyName = "ImageStaticAssetImpl_Width", order = Presentation.FieldOrder.LAST + 1000, tab = Presentation.Tab.Name.File_Details, tabOrder = Presentation.Tab.Order.File_Details, readOnly = true) protected Integer width; @Column(name ="HEIGHT") @AdminPresentation(friendlyName = "ImageStaticAssetImpl_Height", order = Presentation.FieldOrder.LAST + 2000, tab = Presentation.Tab.Name.File_Details, tabOrder = Presentation.Tab.Order.File_Details, readOnly = true) protected Integer height; @Override public Integer getWidth() { return width; } @Override public void setWidth(Integer width) { this.width = width; } @Override public Integer getHeight() { return height; } @Override public void setHeight(Integer height) { this.height = height; } @Override public ImageStaticAsset cloneEntity() { ImageStaticAssetImpl asset = new ImageStaticAssetImpl(); asset.name = name; asset.site = site; asset.archivedFlag = archivedFlag; asset.deletedFlag = deletedFlag; asset.fullUrl = fullUrl; asset.fileSize = fileSize; asset.mimeType = mimeType; asset.sandbox = sandbox; asset.originalAssetId = originalAssetId; asset.width = width; asset.height = height; for (String key : contentMessageValues.keySet()) { StaticAssetDescription oldAssetDescription = contentMessageValues.get(key); StaticAssetDescription newAssetDescription = oldAssetDescription.cloneEntity(); asset.getContentMessageValues().put(key, newAssetDescription); } return asset; } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_ImageStaticAssetImpl.java
2,748
writeStateExecutor.execute(new Runnable() { @Override public void run() { Set<String> indicesDeleted = Sets.newHashSet(); if (event.localNodeMaster()) { logger.debug("writing to gateway {} ...", this); StopWatch stopWatch = new StopWatch().start(); try { write(event.state().metaData()); logger.debug("wrote to gateway {}, took {}", this, stopWatch.stop().totalTime()); // TODO, we need to remember that we failed, maybe add a retry scheduler? } catch (Exception e) { logger.error("failed to write to gateway", e); } if (currentMetaData != null) { for (IndexMetaData current : currentMetaData) { if (!event.state().metaData().hasIndex(current.index())) { delete(current); indicesDeleted.add(current.index()); } } } } if (nodeEnv != null && nodeEnv.hasNodeFile()) { if (currentMetaData != null) { for (IndexMetaData current : currentMetaData) { if (!event.state().metaData().hasIndex(current.index())) { FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index()))); indicesDeleted.add(current.index()); } } } } currentMetaData = event.state().metaData(); for (String indexDeleted : indicesDeleted) { try { nodeIndexDeletedAction.nodeIndexStoreDeleted(event.state(), indexDeleted, event.state().nodes().localNodeId()); } catch (Exception e) { logger.debug("[{}] failed to notify master on local index store deletion", e, indexDeleted); } } } });
0true
src_main_java_org_elasticsearch_gateway_shared_SharedStorageGateway.java
6,231
public class RestClient implements Closeable { private static final ESLogger logger = Loggers.getLogger(RestClient.class); private final RestSpec restSpec; private final CloseableHttpClient httpClient; private final String host; private final int port; private final String esVersion; public RestClient(String host, int port, RestSpec restSpec) throws IOException, RestException { this.restSpec = restSpec; this.httpClient = createHttpClient(); this.host = host; this.port = port; this.esVersion = readVersion(); logger.info("REST client initialized [{}:{}], elasticsearch version: [{}]", host, port, esVersion); } private String readVersion() throws IOException, RestException { //we make a manual call here without using callApi method, mainly because we are initializing //and the randomized context doesn't exist for the current thread (would be used to choose the method otherwise) RestApi restApi = restApi("info"); assert restApi.getPaths().size() == 1; assert restApi.getMethods().size() == 1; RestResponse restResponse = new RestResponse(httpRequestBuilder() .path(restApi.getPaths().get(0)) .method(restApi.getMethods().get(0)).execute()); checkStatusCode(restResponse); Object version = restResponse.evaluate("version.number"); if (version == null) { throw new RuntimeException("elasticsearch version not found in the response"); } return version.toString(); } public String getEsVersion() { return esVersion; } /** * Calls an api with the provided parameters * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored * according to the ignore parameter received as input (which won't get sent to elasticsearch) */ public RestResponse callApi(String apiName, String... params) throws IOException, RestException { if (params.length % 2 != 0) { throw new IllegalArgumentException("The number of params passed must be even but was [" + params.length + "]"); } Map<String, String> paramsMap = Maps.newHashMap(); for (int i = 0; i < params.length; i++) { paramsMap.put(params[i++], params[i]); } return callApi(apiName, paramsMap, null); } /** * Calls an api with the provided parameters and body * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored * according to the ignore parameter received as input (which won't get sent to elasticsearch) */ public RestResponse callApi(String apiName, Map<String, String> params, String body) throws IOException, RestException { List<Integer> ignores = Lists.newArrayList(); Map<String, String> requestParams = null; if (params != null) { //makes a copy of the parameters before modifying them for this specific request requestParams = Maps.newHashMap(params); //ignore is a special parameter supported by the clients, shouldn't be sent to es String ignoreString = requestParams.remove("ignore"); if (Strings.hasLength(ignoreString)) { try { ignores.add(Integer.valueOf(ignoreString)); } catch(NumberFormatException e) { throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead"); } } } HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body); logger.debug("calling api [{}]", apiName); HttpResponse httpResponse = httpRequestBuilder.execute(); //http HEAD doesn't support response body // For the few api (exists class of api) that use it we need to accept 404 too if (!httpResponse.supportsBody()) { ignores.add(404); } RestResponse restResponse = new RestResponse(httpResponse); checkStatusCode(restResponse, ignores); return restResponse; } private void checkStatusCode(RestResponse restResponse, List<Integer> ignores) throws RestException { //ignore is a catch within the client, to prevent the client from throwing error if it gets non ok codes back if (ignores.contains(restResponse.getStatusCode())) { if (logger.isDebugEnabled()) { logger.debug("ignored non ok status codes {} as requested", ignores); } return; } checkStatusCode(restResponse); } private void checkStatusCode(RestResponse restResponse) throws RestException { if (restResponse.isError()) { throw new RestException("non ok status code [" + restResponse.getStatusCode() + "] returned", restResponse); } } private HttpRequestBuilder callApiBuilder(String apiName, Map<String, String> params, String body) { //create doesn't exist in the spec but is supported in the clients (index with op_type=create) boolean indexCreateApi = "create".equals(apiName); String api = indexCreateApi ? "index" : apiName; RestApi restApi = restApi(api); HttpRequestBuilder httpRequestBuilder = httpRequestBuilder(); if (Strings.hasLength(body)) { if (!restApi.isBodySupported()) { throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api"); } httpRequestBuilder.body(body); } else { if (restApi.isBodyRequired()) { throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api"); } } //divide params between ones that go within query string and ones that go within path Map<String, String> pathParts = Maps.newHashMap(); if (params != null) { for (Map.Entry<String, String> entry : params.entrySet()) { if (restApi.getPathParts().contains(entry.getKey())) { pathParts.put(entry.getKey(), entry.getValue()); } else { if (!restApi.getParams().contains(entry.getKey())) { throw new IllegalArgumentException("param [" + entry.getKey() + "] not supported in [" + restApi.getName() + "] api"); } httpRequestBuilder.addParam(entry.getKey(), entry.getValue()); } } } if (indexCreateApi) { httpRequestBuilder.addParam("op_type", "create"); } //the http method is randomized (out of the available ones with the chosen api) return httpRequestBuilder.method(RandomizedTest.randomFrom(restApi.getSupportedMethods(pathParts.keySet()))) .path(RandomizedTest.randomFrom(restApi.getFinalPaths(pathParts))); } private RestApi restApi(String apiName) { RestApi restApi = restSpec.getApi(apiName); if (restApi == null) { throw new IllegalArgumentException("rest api [" + apiName + "] doesn't exist in the rest spec"); } return restApi; } protected HttpRequestBuilder httpRequestBuilder() { return new HttpRequestBuilder(httpClient).host(host).port(port); } protected CloseableHttpClient createHttpClient() { return HttpClients.createDefault(); } /** * Closes the REST client and the underlying http client */ public void close() { try { httpClient.close(); } catch(IOException e) { logger.error(e.getMessage(), e); } } }
1no label
src_test_java_org_elasticsearch_test_rest_client_RestClient.java
361
public class HBaseCompatLoader { private static final Logger log = LoggerFactory .getLogger(HBaseCompatLoader.class); // public static final String TITAN_HBASE_COMPAT_CLASS_KEY = "TITAN_HBASE_COMPAT_CLASS"; // private static final String TITAN_HBASE_COMPAT_CLASS; // // static { // // String s; // // if (null != (s = System.getProperty(TITAN_HBASE_COMPAT_CLASS_KEY))) { // log.info("Read {} from system properties: {}", TITAN_HBASE_COMPAT_CLASS_KEY, s); // } else if (null != (s = System.getenv(TITAN_HBASE_COMPAT_CLASS_KEY))) { // log.info("Read {} from process environment: {}", TITAN_HBASE_COMPAT_CLASS_KEY, s); // } else { // log.debug("Could not read {} from system properties or process environment; using HBase VersionInfo to resolve compat layer", TITAN_HBASE_COMPAT_CLASS_KEY); // } // // TITAN_HBASE_COMPAT_CLASS = s; // } private static HBaseCompat cachedCompat; public synchronized static HBaseCompat getCompat(String classOverride) { if (null != cachedCompat) { log.debug("Returning cached HBase compatibility layer: {}", cachedCompat); return cachedCompat; } HBaseCompat compat = null; String className = null; String classNameSource = null; if (null != classOverride) { className = classOverride; classNameSource = "from explicit configuration"; } else { String hbaseVersion = VersionInfo.getVersion(); for (String supportedVersion : Arrays.asList("0.94", "0.96", "0.98")) { if (hbaseVersion.startsWith(supportedVersion + ".")) { className = "com.thinkaurelius.titan.diskstorage.hbase.HBaseCompat" + supportedVersion.replaceAll("\\.", "_"); classNameSource = "supporting runtime HBase version " + hbaseVersion; break; } } if (null == className) { throw new RuntimeException("Unrecognized or unsupported HBase version " + hbaseVersion); } } final String errTemplate = " when instantiating HBase compatibility class " + className; try { compat = (HBaseCompat)Class.forName(className).newInstance(); log.info("Instantiated HBase compatibility layer {}: {}", classNameSource, compat.getClass().getCanonicalName()); } catch (IllegalAccessException e) { throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e); } catch (InstantiationException e) { throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e); } catch (ClassNotFoundException e) { throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e); } return cachedCompat = compat; } }
0true
titan-hbase-parent_titan-hbase-core_src_main_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseCompatLoader.java
202
addTask(new Runnable() { public void run() { live = false; shutdownLatch.countDown(); } });
1no label
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientAbstractIOSelector.java
433
static final class Fields { static final XContentBuilderString COUNT = new XContentBuilderString("count"); }
0true
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsIndices.java
1,294
@Repository("blSearchInterceptDao") @Deprecated public class SearchInterceptDaoImpl implements SearchInterceptDao { @PersistenceContext(unitName = "blPU") protected EntityManager em; @Override public SearchIntercept findInterceptByTerm(String term) { Query query = em.createNamedQuery("BC_READ_SEARCH_INTERCEPT_BY_TERM"); query.setParameter("searchTerm", term); SearchIntercept result; try { result = (SearchIntercept) query.getSingleResult(); } catch (NoResultException e) { result = null; } return result; } @Override @SuppressWarnings("unchecked") public List<SearchIntercept> findAllIntercepts() { Query query = em.createNamedQuery("BC_READ_ALL_SEARCH_INTERCEPTS"); List<SearchIntercept> result; try { result = query.getResultList(); } catch (NoResultException e) { result = null; } return result; } @Override public void createIntercept(SearchIntercept intercept) { em.persist(intercept); } @Override public void deleteIntercept(SearchIntercept intercept) { em.remove(intercept); } @Override public void updateIntercept(SearchIntercept intercept) { em.merge(intercept); } }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_dao_SearchInterceptDaoImpl.java
3,387
public static class Builder implements IndexFieldData.Builder { @Override public IndexFieldData<PagedBytesAtomicFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache, CircuitBreakerService breakerService) { return new PagedBytesIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService); } }
0true
src_main_java_org_elasticsearch_index_fielddata_plain_PagedBytesIndexFieldData.java
1,541
@Ignore public class XATransactionTest extends AbstractDeploymentTest { // @Inject // UserTransaction userTx; @Resource(mappedName = "java:jboss/UserTransaction") private UserTransaction userTx; @Resource(mappedName = "java:jboss/datasources/ExampleDS") // @Resource(mappedName = "java:/HazelcastDS") protected DataSource h2Datasource; private static boolean isDbInit=false; @Before public void Init() throws SQLException { if(!isDbInit){ isDbInit=true; Connection con = h2Datasource.getConnection(); Statement stmt = null; try { stmt = con.createStatement(); stmt.execute("create table TEST (A varchar(3))"); } finally { if (stmt != null) { stmt.close(); } con.close(); } } } @Test @InSequence(2) @Ignore public void testTransactionCommit() throws Throwable { userTx.begin(); HazelcastConnection c = getConnection(); try { TransactionalMap<String, String> m = c.getTransactionalMap("testTransactionCommit"); m.put("key", "value"); doSql(); assertEquals("value", m.get("key")); } finally { c.close(); } userTx.commit(); HazelcastConnection con2 = getConnection(); try { assertEquals("value", con2.getMap("testTransactionCommit").get("key")); validateSQLdata(true); } finally { con2.close(); } } @Test @InSequence(1) @Ignore public void testTransactionRollback() throws Throwable { userTx.begin(); HazelcastConnection c = getConnection(); try { TransactionalMap<String, String> m = c.getTransactionalMap("testTransactionRollback"); m.put("key", "value"); assertEquals("value", m.get("key")); doSql(); } finally { c.close(); } userTx.rollback(); HazelcastConnection con2 = getConnection(); try { assertEquals(null, con2.getMap("testTransactionRollback").get("key")); validateSQLdata(false); } finally { con2.close(); } } private void doSql() throws NamingException, SQLException { Connection con = h2Datasource.getConnection(); Statement stmt = null; try { stmt = con.createStatement(); stmt.execute("INSERT INTO TEST VALUES ('txt')"); } finally { if (stmt != null) { stmt.close(); } con.close(); } } private void validateSQLdata(boolean hasdata) throws NamingException, SQLException { Connection con = h2Datasource.getConnection(); Statement stmt = null; try { stmt = con.createStatement(); ResultSet resultSet = null; try { resultSet = stmt.executeQuery("SELECT * FROM TEST"); } catch (SQLException e) { } if(hasdata){ assertNotNull(resultSet); assertTrue(resultSet.first()); assertEquals("txt", resultSet.getString("A")); }else{ assertTrue(resultSet == null || resultSet.getFetchSize() == 0 ); } } finally { if (stmt != null) { stmt.close(); } con.close(); } } }
0true
hazelcast-ra_hazelcast-jca_src_test_java_com_hazelcast_jca_XATransactionTest.java
3,691
public static class Defaults extends LongFieldMapper.Defaults { public static final String NAME = TTLFieldMapper.CONTENT_TYPE; public static final FieldType TTL_FIELD_TYPE = new FieldType(LongFieldMapper.Defaults.FIELD_TYPE); static { TTL_FIELD_TYPE.setStored(true); TTL_FIELD_TYPE.setIndexed(true); TTL_FIELD_TYPE.setTokenized(false); TTL_FIELD_TYPE.freeze(); } public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED; public static final long DEFAULT = -1; }
1no label
src_main_java_org_elasticsearch_index_mapper_internal_TTLFieldMapper.java
1,668
public interface AdminPermissionDao { public List<AdminPermission> readAllAdminPermissions(); public AdminPermission readAdminPermissionById(Long id); public AdminPermission readAdminPermissionByName(String name); public AdminPermission saveAdminPermission(AdminPermission permission); public void deleteAdminPermission(AdminPermission permission); public boolean isUserQualifiedForOperationOnCeilingEntity(AdminUser adminUser, PermissionType permissionType, String ceilingEntityFullyQualifiedName); public boolean doesOperationExistForCeilingEntity(PermissionType permissionType, String ceilingEntityFullyQualifiedName); }
0true
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_security_dao_AdminPermissionDao.java
1,007
threadPool.executor(executor()).execute(new Runnable() { @Override public void run() { try { Response response = shardOperation(request, shard.id()); listener.onResponse(response); } catch (Throwable e) { shardsIt.reset(); onFailure(shard, e); } } });
0true
src_main_java_org_elasticsearch_action_support_single_custom_TransportSingleCustomOperationAction.java
1,237
public enum ALLOC_STRATEGY { MMAP_ALWAYS, MMAP_WRITE_ALWAYS_READ_IF_AVAIL_POOL, MMAP_WRITE_ALWAYS_READ_IF_IN_MEM, MMAP_ONLY_AVAIL_POOL, MMAP_NEVER }
0true
core_src_main_java_com_orientechnologies_orient_core_storage_fs_OMMapManager.java
1,898
return new MethodInvoker() { public Object invoke(Object target, Object... parameters) throws IllegalAccessException, InvocationTargetException { return method.invoke(target, parameters); } };
0true
src_main_java_org_elasticsearch_common_inject_SingleMethodInjector.java
2,991
public interface CacheKeyFilter { public static class Key { private final byte[] bytes; // we pre-compute the hashCode for better performance (especially in IdCache) private final int hashCode; public Key(byte[] bytes) { this.bytes = bytes; this.hashCode = Arrays.hashCode(bytes); } public Key(String str) { this(Strings.toUTF8Bytes(str)); } public byte[] bytes() { return this.bytes; } @Override public boolean equals(Object o) { if (this == o) return true; if (o.getClass() != this.getClass()) { return false; } Key bytesWrap = (Key) o; return Arrays.equals(bytes, bytesWrap.bytes); } @Override public int hashCode() { return hashCode; } } public static class Wrapper extends Filter implements CacheKeyFilter { private final Filter filter; private final Key key; public Wrapper(Filter filter, Key key) { this.filter = filter; this.key = key; } @Override public Key cacheKey() { return key; } public Filter wrappedFilter() { return filter; } @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { return filter.getDocIdSet(context, acceptDocs); } @Override public int hashCode() { return filter.hashCode(); } @Override public boolean equals(Object obj) { return filter.equals(obj); } @Override public String toString() { return filter.toString(); } } Object cacheKey(); }
0true
src_main_java_org_elasticsearch_index_cache_filter_support_CacheKeyFilter.java
3,482
public static class MergeFlags { public static MergeFlags mergeFlags() { return new MergeFlags(); } private boolean simulate = true; public MergeFlags() { } /** * A simulation run, don't perform actual modifications to the mapping. */ public boolean simulate() { return simulate; } public MergeFlags simulate(boolean simulate) { this.simulate = simulate; return this; } }
0true
src_main_java_org_elasticsearch_index_mapper_DocumentMapper.java
1,494
public class BroadleafOrderConfirmationController extends BroadleafAbstractController { @Resource(name = "blOrderService") protected OrderService orderService; protected static String orderConfirmationView = "checkout/confirmation"; public String displayOrderConfirmationByOrderNumber(String orderNumber, Model model, HttpServletRequest request, HttpServletResponse response) { Customer customer = CustomerState.getCustomer(); if (customer != null) { Order order = orderService.findOrderByOrderNumber(orderNumber); if (order != null && customer.equals(order.getCustomer())) { model.addAttribute("order", order); return getOrderConfirmationView(); } } return null; } public String displayOrderConfirmationByOrderId(Long orderId, Model model, HttpServletRequest request, HttpServletResponse response) { Customer customer = CustomerState.getCustomer(); if (customer != null) { Order order = orderService.findOrderById(orderId); if (order != null && customer.equals(order.getCustomer())) { model.addAttribute("order", order); return getOrderConfirmationView(); } } return null; } public String getOrderConfirmationView() { return orderConfirmationView; } }
0true
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_controller_checkout_BroadleafOrderConfirmationController.java
1,180
public interface LifecycleListener extends EventListener { /** * Called when instance's state changes * @param event Lifecycle event * */ void stateChanged(LifecycleEvent event); }
0true
hazelcast_src_main_java_com_hazelcast_core_LifecycleListener.java
366
@SuppressWarnings("unchecked") public class OGraphDatabasePooled extends OGraphDatabase implements ODatabasePooled { private OGraphDatabasePool ownerPool; public OGraphDatabasePooled(final OGraphDatabasePool iOwnerPool, final String iURL, final String iUserName, final String iUserPassword) { super(iURL); ownerPool = iOwnerPool; super.open(iUserName, iUserPassword); } public void reuse(final Object iOwner, final Object[] iAdditionalArgs) { ownerPool = (OGraphDatabasePool) iOwner; if (isClosed()) open((String) iAdditionalArgs[0], (String) iAdditionalArgs[1]); getLevel1Cache().invalidate(); // getMetadata().reload(); ODatabaseRecordThreadLocal.INSTANCE.set(this); checkForGraphSchema(); try { ODatabase current = underlying; while (!(current instanceof ODatabaseRaw) && ((ODatabaseComplex<?>) current).getUnderlying() != null) current = ((ODatabaseComplex<?>) current).getUnderlying(); ((ODatabaseRaw) current).callOnOpenListeners(); } catch (Exception e) { OLogManager.instance().error(this, "Error on reusing database '%s' in pool", e, getName()); } } @Override public OGraphDatabasePooled open(String iUserName, String iUserPassword) { throw new UnsupportedOperationException( "Database instance was retrieved from a pool. You cannot open the database in this way. Use directly a OGraphDatabase instance if you want to manually open the connection"); } @Override public OGraphDatabasePooled create() { throw new UnsupportedOperationException( "Database instance was retrieved from a pool. You cannot open the database in this way. Use directly a OGraphDatabase instance if you want to manually open the connection"); } public boolean isUnderlyingOpen() { return !super.isClosed(); } @Override public boolean isClosed() { return ownerPool == null || super.isClosed(); } /** * Avoid to close it but rather release itself to the owner pool. */ @Override public void close() { if (isClosed()) return; vertexBaseClass = null; edgeBaseClass = null; checkOpeness(); try { rollback(); } catch (Exception e) { OLogManager.instance().error(this, "Error on releasing database '%s' in pool", e, getName()); } try { ODatabase current = underlying; while (!(current instanceof ODatabaseRaw) && ((ODatabaseComplex<?>) current).getUnderlying() != null) current = ((ODatabaseComplex<?>) current).getUnderlying(); ((ODatabaseRaw) current).callOnCloseListeners(); } catch (Exception e) { OLogManager.instance().error(this, "Error on releasing database '%s' in pool", e, getName()); } getLevel1Cache().clear(); if (ownerPool != null) { final OGraphDatabasePool pool = ownerPool; ownerPool = null; pool.release(this); } } public void forceClose() { super.close(); } @Override protected void checkOpeness() { if (ownerPool == null) throw new ODatabaseException( "Database instance has been released to the pool. Get another database instance from the pool with the right username and password"); super.checkOpeness(); } }
0true
core_src_main_java_com_orientechnologies_orient_core_db_graph_OGraphDatabasePooled.java
1,573
public class CountMapReduce { public static final String CLASS = Tokens.makeNamespace(CountMapReduce.class) + ".class"; public enum Counters { VERTICES_COUNTED, EDGES_COUNTED } public static Configuration createConfiguration(final Class<? extends Element> klass) { final Configuration configuration = new EmptyConfiguration(); configuration.setClass(CLASS, klass, Element.class); return configuration; } public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, LongWritable> { private boolean isVertex; private final LongWritable longWritable = new LongWritable(); private SafeMapperOutputs outputs; @Override public void setup(final Mapper.Context context) throws IOException, InterruptedException { this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class); this.outputs = new SafeMapperOutputs(context); } @Override public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException { if (this.isVertex) { final long pathCount = value.pathCount(); this.longWritable.set(pathCount); context.write(NullWritable.get(), this.longWritable); DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_COUNTED, pathCount > 0 ? 1 : 0); } else { long edgesCounted = 0; long pathCount = 0; for (final Edge e : value.getEdges(Direction.OUT)) { final StandardFaunusEdge edge = (StandardFaunusEdge) e; if (edge.hasPaths()) { edgesCounted++; pathCount = pathCount + edge.pathCount(); } } this.longWritable.set(pathCount); context.write(NullWritable.get(), this.longWritable); DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_COUNTED, edgesCounted); } this.outputs.write(Tokens.GRAPH, NullWritable.get(), value); } @Override public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException { this.outputs.close(); } } public static class Combiner extends Reducer<NullWritable, LongWritable, NullWritable, LongWritable> { private final LongWritable longWritable = new LongWritable(); @Override public void reduce(final NullWritable key, final Iterable<LongWritable> values, final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException { long totalCount = 0; for (final LongWritable temp : values) { totalCount = totalCount + temp.get(); } this.longWritable.set(totalCount); context.write(key, this.longWritable); } } public static class Reduce extends Reducer<NullWritable, LongWritable, NullWritable, LongWritable> { private SafeReducerOutputs outputs; private LongWritable longWritable = new LongWritable(); @Override public void setup(final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) { this.outputs = new SafeReducerOutputs(context); } @Override public void reduce(final NullWritable key, final Iterable<LongWritable> values, final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException { long totalCount = 0; for (final LongWritable temp : values) { totalCount = totalCount + temp.get(); } this.longWritable.set(totalCount); this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.longWritable); } @Override public void cleanup(final Reducer<NullWritable, LongWritable, NullWritable, LongWritable>.Context context) throws IOException, InterruptedException { this.outputs.close(); } } }
1no label
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_util_CountMapReduce.java
1,447
public class OGremlinHelper { private static final String PARAM_OUTPUT = "output"; private static GremlinGroovyScriptEngineFactory factory = new GremlinGroovyScriptEngineFactory(); private static OGremlinHelper instance = new OGremlinHelper(); private int maxPool = 50; private OResourcePool<ODatabaseDocumentTx, OrientBaseGraph> graphPool; private long timeout; public static interface OGremlinCallback { public boolean call(ScriptEngine iEngine, OrientBaseGraph iGraph); } public OGremlinHelper() { OCommandManager.instance().registerRequester("gremlin", OCommandGremlin.class); OCommandManager.instance().registerExecutor(OCommandGremlin.class, OCommandGremlinExecutor.class); timeout = OGlobalConfiguration.STORAGE_LOCK_TIMEOUT.getValueAsLong(); } /** * Initializes the pools. */ public void create() { if (graphPool != null) // ALREADY CREATED return; graphPool = new OResourcePool<ODatabaseDocumentTx, OrientBaseGraph>(maxPool, new OResourcePoolListener<ODatabaseDocumentTx, OrientBaseGraph>() { @Override public OrientGraph createNewResource(final ODatabaseDocumentTx iKey, final Object... iAdditionalArgs) { return new OrientGraph(iKey); } @Override public boolean reuseResource(final ODatabaseDocumentTx iKey, final Object[] iAdditionalArgs, final OrientBaseGraph iReusedGraph) { iReusedGraph.reuse(iKey); return true; } }); } /** * Destroys the helper by cleaning all the in memory objects. */ public void destroy() { if (graphPool != null) { for (OrientBaseGraph graph : graphPool.getResources()) { graph.shutdown(); } graphPool.close(); } } public ScriptEngine acquireEngine() { checkStatus(); return new GremlinGroovyScriptEngine();// enginePool.getResource(ONE, Long.MAX_VALUE); } public void releaseEngine(final ScriptEngine engine) { checkStatus(); // engine.getBindings(ScriptContext.ENGINE_SCOPE).clear(); // enginePool.returnResource(engine); } public OrientGraph acquireGraph(final ODatabaseDocumentTx iDatabase) { checkStatus(); return (OrientGraph) ((OrientGraph) graphPool.getResource(iDatabase, timeout)); } public void releaseGraph(final OrientBaseGraph iGraph) { checkStatus(); graphPool.returnResource(iGraph); } @SuppressWarnings("unchecked") public static Object execute(final ODatabaseDocumentTx iDatabase, final String iText, final Map<Object, Object> iConfiguredParameters, Map<Object, Object> iCurrentParameters, final List<Object> iResult, final OGremlinCallback iBeforeExecution, final OGremlinCallback iAfterExecution) { return execute(OGremlinHelper.global().acquireGraph(iDatabase), iText, iConfiguredParameters, iCurrentParameters, iResult, iBeforeExecution, iAfterExecution); } public static Object execute(final OrientBaseGraph graph, final String iText, final Map<Object, Object> iConfiguredParameters, Map<Object, Object> iCurrentParameters, final List<Object> iResult, final OGremlinCallback iBeforeExecution, final OGremlinCallback iAfterExecution) { try { final ScriptEngine engine = getGremlinEngine(graph); try { final String output = OGremlinHelper.bindParameters(engine, iConfiguredParameters, iCurrentParameters); if (iBeforeExecution != null) if (!iBeforeExecution.call(engine, graph)) return null; final Object scriptResult = engine.eval(iText); if (iAfterExecution != null) if (!iAfterExecution.call(engine, graph)) return null; // Case of 1 output bound variable. Return as: // - Map -> ODocument if (output != null) { if (scriptResult instanceof GremlinPipeline) { Iterator<?> it = ((GremlinPipeline<?, ?>) scriptResult).iterator(); while (it.hasNext()) // ignore iCurrentRecord but traverse still required it.next(); } final Map<String, Object> map = (Map<String, Object>) engine.get(output); ODocument oDocument = new ODocument(map); iResult.add(oDocument); return oDocument; } // Case of no bound variables. Return as: // - List<ODocument> // - ODocument // - Integer // returned for this call in the last pipe if (scriptResult instanceof GremlinPipeline) { final Iterator<?> it = ((GremlinPipeline<?, ?>) scriptResult).iterator(); Object finalResult = null; List<Object> resultCollection = null; while (it.hasNext()) { Object current = it.next(); // if (current instanceof OrientElement) // current = ((OrientElement) current).getRawElement(); if (finalResult != null) { if (resultCollection == null) { // CONVERT IT INTO A COLLECTION resultCollection = new ArrayList<Object>(); resultCollection.add(finalResult); } resultCollection.add(current); } else finalResult = current; } if (resultCollection != null) { iResult.addAll(resultCollection); return resultCollection; } else { if (finalResult != null) iResult.add(finalResult); return finalResult; } } else if (scriptResult != null) iResult.add(scriptResult); return scriptResult; } catch (Exception e) { throw new OCommandExecutionException("Error on execution of the GREMLIN script", e); } finally { OGremlinHelper.global().releaseEngine(engine); } } finally { OGremlinHelper.global().releaseGraph(graph); } } protected static ScriptEngine getGremlinEngine(final OrientBaseGraph graph) { return OGremlinEngineThreadLocal.INSTANCE.get(graph); } public static String bindParameters(final ScriptEngine iEngine, final Map<Object, Object> iParameters, Map<Object, Object> iCurrentParameters) { if (iParameters != null && !iParameters.isEmpty()) // Every call to the function is a execution itself. Therefore, it requires a fresh set of input parameters. // Therefore, clone the parameters map trying to recycle previous instances for (Entry<Object, Object> param : iParameters.entrySet()) { final String key = (String) param.getKey(); final Object objectToClone = param.getValue(); final Object previousItem = iCurrentParameters.get(key); // try to recycle it final Object newItem = OGremlinHelper.cloneObject(objectToClone, previousItem); iCurrentParameters.put(key, newItem); } String output = null; if (iCurrentParameters != null) for (Entry<Object, Object> param : iCurrentParameters.entrySet()) { final String paramName = param.getKey().toString().trim(); if (paramName.equals(PARAM_OUTPUT)) { output = param.getValue().toString(); continue; } iEngine.getBindings(ScriptContext.ENGINE_SCOPE).put(paramName, param.getValue()); } return output; } /* * Tries to clone any Java object by using 3 techniques: - instanceof (most verbose but faster performance) - reflection (medium * performance) - serialization (applies for any object type but has a performance overhead) */ @SuppressWarnings({ "rawtypes", "unchecked" }) public static Object cloneObject(final Object objectToClone, final Object previousClone) { // *************************************************************************************************************************************** // 1. Class by class cloning (only clones known types) // *************************************************************************************************************************************** // Clone any Map (shallow clone should be enough at this level) if (objectToClone instanceof Map) { Map recycledMap = (Map) previousClone; if (recycledMap == null) recycledMap = new HashMap(); else recycledMap.clear(); recycledMap.putAll((Map<?, ?>) objectToClone); return recycledMap; // Clone any collection (shallow clone should be enough at this level) } else if (objectToClone instanceof Collection) { Collection recycledCollection = (Collection) previousClone; if (recycledCollection == null) recycledCollection = new ArrayList(); else recycledCollection.clear(); recycledCollection.addAll((Collection<?>) objectToClone); return recycledCollection; // Clone String } else if (objectToClone instanceof String) { return objectToClone; } else if (objectToClone instanceof Number) { return objectToClone; // Clone Date } else if (objectToClone instanceof Date) { Date clonedDate = (Date) ((Date) objectToClone).clone(); return clonedDate; } else { // *************************************************************************************************************************************** // 2. Polymorphic clone (by reflection, looks for a clone() method in hierarchy and invoke it) // *************************************************************************************************************************************** try { Object newClone = null; for (Class<?> obj = objectToClone.getClass(); !obj.equals(Object.class); obj = obj.getSuperclass()) { Method m[] = obj.getDeclaredMethods(); for (int i = 0; i < m.length; i++) { if (m[i].getName().equals("clone")) { m[i].setAccessible(true); newClone = m[i].invoke(objectToClone); System.out.println(objectToClone.getClass() + " cloned by Reflection. Performance can be improved by adding the class to the list of known types"); return newClone; } } } throw new Exception("Method clone not found"); // *************************************************************************************************************************************** // 3. Polymorphic clone (Deep cloning by Serialization) // *************************************************************************************************************************************** } catch (Throwable e1) { try { final ByteArrayOutputStream bytes = new ByteArrayOutputStream() { public synchronized byte[] toByteArray() { return buf; } }; final ObjectOutputStream out = new ObjectOutputStream(bytes); out.writeObject(objectToClone); out.close(); final ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes.toByteArray())); System.out.println(objectToClone.getClass() + " cloned by Serialization. Performance can be improved by adding the class to the list of known types"); return in.readObject(); // *************************************************************************************************************************************** // 4. Impossible to clone // *************************************************************************************************************************************** } catch (Throwable e2) { e2.printStackTrace(); return null; } } } } public static OGremlinHelper global() { return instance; } public int getMaxPool() { return maxPool; } public OGremlinHelper setMaxGraphPool(final int maxGraphs) { this.maxPool = maxGraphs; return this; } private void checkStatus() { if (graphPool == null) throw new IllegalStateException( "OGremlinHelper instance has been not created. Call OGremlinHelper.global().create() to iniziailze it"); } public static ODatabaseDocumentTx getGraphDatabase(final ODatabaseRecord iCurrentDatabase) { ODatabaseRecord currentDb = ODatabaseRecordThreadLocal.INSTANCE.get(); if (currentDb == null && iCurrentDatabase != null) // GET FROM THE RECORD currentDb = iCurrentDatabase; currentDb = (ODatabaseRecord) currentDb.getDatabaseOwner(); final ODatabaseDocumentTx db; if (currentDb instanceof ODatabaseDocumentTx) db = (ODatabaseDocumentTx) currentDb; else if (currentDb instanceof ODatabaseDocumentTx) { db = new ODatabaseDocumentTx((ODatabaseRecordTx) currentDb.getUnderlying()); ODatabaseRecordThreadLocal.INSTANCE.set(db); } else if (currentDb instanceof ODatabaseRecordTx) { db = new ODatabaseDocumentTx((ODatabaseRecordTx) currentDb); ODatabaseRecordThreadLocal.INSTANCE.set(db); } else throw new OCommandExecutionException("Cannot find a database of type ODatabaseDocumentTx or ODatabaseRecordTx"); return db; } public static String getEngineVersion() { return factory.getEngineVersion(); } protected ScriptEngine getGroovyEngine() { return factory.getScriptEngine(); } }
0true
graphdb_src_main_java_com_orientechnologies_orient_graph_gremlin_OGremlinHelper.java
1,527
@Component("blRelatedProductProcessor") public class RelatedProductProcessor extends AbstractModelVariableModifierProcessor { @Resource(name = "blRelatedProductsService") protected RelatedProductsService relatedProductsService; /** * Sets the name of this processor to be used in Thymeleaf template */ public RelatedProductProcessor() { super("related_products"); } @Override public int getPrecedence() { return 10000; } @Override /** * Controller method for the processor that readies the service call and adds the results to the model. */ protected void modifyModelAttributes(Arguments arguments, Element element) { List<? extends PromotableProduct> relatedProducts = relatedProductsService.findRelatedProducts(buildDTO(arguments, element)); addToModel(arguments, getRelatedProductsResultVar(element), relatedProducts); addToModel(arguments, getProductsResultVar(element), convertRelatedProductsToProducts(relatedProducts)); } protected List<Product> convertRelatedProductsToProducts(List<? extends PromotableProduct> relatedProducts) { List<Product> products = new ArrayList<Product>(); if (relatedProducts != null) { for (PromotableProduct product : relatedProducts) { products.add(product.getRelatedProduct()); } } return products; } private String getRelatedProductsResultVar(Element element) { String resultVar = element.getAttributeValue("relatedProductsResultVar"); if (resultVar == null) { resultVar = "relatedProducts"; } return resultVar; } private String getProductsResultVar(Element element) { String resultVar = element.getAttributeValue("productsResultVar"); if (resultVar == null) { resultVar = "products"; } return resultVar; } private RelatedProductDTO buildDTO(Arguments args, Element element) { RelatedProductDTO relatedProductDTO = new RelatedProductDTO(); String productIdStr = element.getAttributeValue("productId"); String categoryIdStr = element.getAttributeValue("categoryId"); String quantityStr = element.getAttributeValue("quantity"); String typeStr = element.getAttributeValue("type"); if (productIdStr != null) { Object productId = StandardExpressionProcessor.processExpression(args, productIdStr); if (productId instanceof BigDecimal) { productId = new Long(((BigDecimal) productId).toPlainString()); } relatedProductDTO.setProductId((Long) productId); } if (categoryIdStr != null) { Object categoryId = StandardExpressionProcessor.processExpression(args, categoryIdStr); if (categoryId instanceof BigDecimal) { categoryId = new Long(((BigDecimal) categoryId).toPlainString()); } relatedProductDTO.setCategoryId((Long) categoryId); } if (quantityStr != null) { relatedProductDTO.setQuantity(((BigDecimal) StandardExpressionProcessor.processExpression(args, quantityStr)).intValue()); } if (typeStr != null && RelatedProductTypeEnum.getInstance(typeStr) != null) { relatedProductDTO.setType(RelatedProductTypeEnum.getInstance(typeStr)); } if ("false".equalsIgnoreCase(element.getAttributeValue("cumulativeResults"))) { relatedProductDTO.setCumulativeResults(false); } return relatedProductDTO; } }
1no label
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_processor_RelatedProductProcessor.java
138
public static class Presentation { public static class Tab { public static class Name { public static final String Rules = "StructuredContentImpl_Rules_Tab"; } public static class Order { public static final int Rules = 1000; } } public static class Group { public static class Name { public static final String Description = "StructuredContentImpl_Description"; public static final String Internal = "StructuredContentImpl_Internal"; public static final String Rules = "StructuredContentImpl_Rules"; } public static class Order { public static final int Description = 1000; public static final int Internal = 2000; public static final int Rules = 1000; } } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentImpl.java
98
{ @Override protected TransactionStateFactory createTransactionStateFactory() { return (testStateFactory = new DeadlockProneTransactionStateFactory( logging )); } };
0true
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestCacheUpdateDeadlock.java
505
@RunWith(HazelcastSerialClassRunner.class) @Category(NightlyTest.class) public class AtomicLongStableReadStressTest extends StressTestSupport { public static final int CLIENT_THREAD_COUNT = 5; public static final int REFERENCE_COUNT = 10 * 1000; private HazelcastInstance client; private IAtomicLong[] references; private StressThread[] stressThreads; @Before public void setUp() { super.setUp(); ClientConfig clientConfig = new ClientConfig(); clientConfig.setRedoOperation(true); client = HazelcastClient.newHazelcastClient(clientConfig); references = new IAtomicLong[REFERENCE_COUNT]; for (int k = 0; k < references.length; k++) { references[k] = client.getAtomicLong("atomicreference:" + k); } stressThreads = new StressThread[CLIENT_THREAD_COUNT]; for (int k = 0; k < stressThreads.length; k++) { stressThreads[k] = new StressThread(); stressThreads[k].start(); } } @After public void tearDown() { super.tearDown(); if (client != null) { client.shutdown(); } } @Test public void testChangingCluster() { test(true); } @Test public void testFixedCluster() { test(false); } public void test(boolean clusterChangeEnabled) { setClusterChangeEnabled(clusterChangeEnabled); initializeReferences(); startAndWaitForTestCompletion(); joinAll(stressThreads); } private void initializeReferences() { System.out.println("=================================================================="); System.out.println("Initializing references"); System.out.println("=================================================================="); for (int k = 0; k < references.length; k++) { references[k].set(k); } System.out.println("=================================================================="); System.out.println("Completed with initializing references"); System.out.println("=================================================================="); } public class StressThread extends TestThread { @Override public void doRun() throws Exception { while (!isStopped()) { int key = random.nextInt(REFERENCE_COUNT); IAtomicLong reference = references[key]; long value = reference.get(); assertEquals(format("The value for atomic reference: %s was not consistent", reference), key, value); } } } }
0true
hazelcast-client_src_test_java_com_hazelcast_client_stress_AtomicLongStableReadStressTest.java
3,483
public static class MergeResult { private final String[] conflicts; public MergeResult(String[] conflicts) { this.conflicts = conflicts; } /** * Does the merge have conflicts or not? */ public boolean hasConflicts() { return conflicts.length > 0; } /** * The merge conflicts. */ public String[] conflicts() { return this.conflicts; } }
0true
src_main_java_org_elasticsearch_index_mapper_DocumentMapper.java
2,797
public abstract class AbstractTokenFilterFactory extends AbstractIndexComponent implements TokenFilterFactory { private final String name; protected final Version version; public AbstractTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, String name, Settings settings) { super(index, indexSettings); this.name = name; this.version = Analysis.parseAnalysisVersion(indexSettings, settings, logger); } @Override public String name() { return this.name; } public final Version version() { return version; } }
0true
src_main_java_org_elasticsearch_index_analysis_AbstractTokenFilterFactory.java
560
public class TypedQueryBuilderTest extends TestCase { public void testNoParameters() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); StringBuilder expected = new StringBuilder("SELECT test FROM " + String.class.getName() + " test"); assertEquals(q.toQueryString(), expected.toString()); } public void testSingleParameter() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); q.addRestriction("test.attr", "=", "sample"); StringBuilder expected = new StringBuilder("SELECT test FROM " + String.class.getName() + " test") .append(" WHERE (test.attr = :p0)"); assertEquals(q.toQueryString(), expected.toString()); assertEquals(q.getParamMap().get("p0"), "sample"); assertEquals(q.getParamMap().size(), 1); } public void testTwoParameters() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); q.addRestriction("test.attr", "=", "sample"); q.addRestriction("test.attr2", "=", "sample2"); StringBuilder expected = new StringBuilder("SELECT test FROM " + String.class.getName() + " test") .append(" WHERE (test.attr = :p0) AND (test.attr2 = :p1)"); assertEquals(q.toQueryString(), expected.toString()); assertEquals(q.getParamMap().get("p0"), "sample"); assertEquals(q.getParamMap().get("p1"), "sample2"); assertEquals(q.getParamMap().size(), 2); } public void testThreeParameters() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); q.addRestriction("test.attr", "=", "sample"); q.addRestriction("test.attr2", "=", "sample2"); q.addRestriction("test.attr3", "=", "sample3"); StringBuilder expected = new StringBuilder("SELECT test FROM " + String.class.getName() + " test") .append(" WHERE (test.attr = :p0) AND (test.attr2 = :p1) AND (test.attr3 = :p2)"); assertEquals(q.toQueryString(), expected.toString()); assertEquals(q.getParamMap().get("p0"), "sample"); assertEquals(q.getParamMap().get("p1"), "sample2"); assertEquals(q.getParamMap().get("p2"), "sample3"); assertEquals(q.getParamMap().size(), 3); } public void testOneNested() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); TQRestriction r = new TQRestriction(TQRestriction.Mode.AND) .addChildRestriction(new TQRestriction("test.startDate", "&lt;", "123")) .addChildRestriction(new TQRestriction(TQRestriction.Mode.OR) .addChildRestriction(new TQRestriction("test.endDate", "is null")) .addChildRestriction(new TQRestriction("test.endDate", "&gt;", "456"))); q.addRestriction("test.attr", "=", "sample"); q.addRestriction(r); StringBuilder expected = new StringBuilder("SELECT test FROM " + String.class.getName() + " test") .append(" WHERE (test.attr = :p0)") .append(" AND ((test.startDate &lt; :p1_0) AND ((test.endDate is null) OR (test.endDate &gt; :p1_1_1)))"); assertEquals(q.toQueryString(), expected.toString()); assertEquals(q.getParamMap().get("p0"), "sample"); assertEquals(q.getParamMap().get("p1_0"), "123"); assertEquals(q.getParamMap().get("p1_1"), null); assertEquals(q.getParamMap().get("p1_1_0"), null); assertEquals(q.getParamMap().get("p1_1_1"), "456"); assertEquals(q.getParamMap().size(), 5); } public void testTwoNested() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); TQRestriction r = new TQRestriction(TQRestriction.Mode.AND) .addChildRestriction(new TQRestriction("test.startDate", "&lt;", "123")) .addChildRestriction(new TQRestriction(TQRestriction.Mode.OR) .addChildRestriction(new TQRestriction("test.endDate", "is null")) .addChildRestriction(new TQRestriction("test.endDate", "&gt;", "456"))); TQRestriction r2 = new TQRestriction(TQRestriction.Mode.OR) .addChildRestriction(new TQRestriction("test.res1", "=", "333")) .addChildRestriction(new TQRestriction(TQRestriction.Mode.AND) .addChildRestriction(new TQRestriction("test.res2", "is null")) .addChildRestriction(new TQRestriction("test.res3", "&gt;", "456"))); q.addRestriction("test.attr", "=", "sample"); q.addRestriction(r); q.addRestriction(r2); System.out.println(q.toQueryString()); StringBuilder expected = new StringBuilder("SELECT test FROM " + String.class.getName() + " test") .append(" WHERE (test.attr = :p0)") .append(" AND ((test.startDate &lt; :p1_0) AND ((test.endDate is null) OR (test.endDate &gt; :p1_1_1)))") .append(" AND ((test.res1 = :p2_0) OR ((test.res2 is null) AND (test.res3 &gt; :p2_1_1)))"); assertEquals(q.toQueryString(), expected.toString()); assertEquals(q.getParamMap().get("p0"), "sample"); assertEquals(q.getParamMap().get("p1_0"), "123"); assertEquals(q.getParamMap().get("p1_1"), null); assertEquals(q.getParamMap().get("p1_1_0"), null); assertEquals(q.getParamMap().get("p1_1_1"), "456"); assertEquals(q.getParamMap().get("p2_0"), "333"); assertEquals(q.getParamMap().get("p2_1"), null); assertEquals(q.getParamMap().get("p2_1_0"), null); assertEquals(q.getParamMap().get("p2_1_1"), "456"); assertEquals(q.getParamMap().size(), 9); } public void testCountQuery() { TypedQueryBuilder<String> q = new TypedQueryBuilder<String>(String.class, "test"); StringBuilder expected = new StringBuilder("SELECT COUNT(*) FROM " + String.class.getName() + " test"); assertEquals(q.toQueryString(true), expected.toString()); } }
0true
common_src_test_java_org_broadleafcommerce_common_util_dao_TypedQueryBuilderTest.java
3,066
private static class OneTimeReleaseSnapshotIndexCommit extends SnapshotIndexCommit { private volatile boolean released = false; OneTimeReleaseSnapshotIndexCommit(SnapshotDeletionPolicy deletionPolicy, IndexCommit cp) throws IOException { super(deletionPolicy, cp); } @Override public boolean release() { if (released) { return false; } released = true; return ((SnapshotIndexCommit) delegate).release(); } }
0true
src_main_java_org_elasticsearch_index_deletionpolicy_SnapshotDeletionPolicy.java
1,681
runnable = new Runnable() { public void run() { map.putAsync("key", null, 1, TimeUnit.SECONDS); } };
0true
hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java
1,722
static class SampleEntryProcessor implements EntryProcessor, EntryBackupProcessor, Serializable { public Object process(Map.Entry entry) { entry.setValue((Integer) entry.getValue() + 1); return true; } public EntryBackupProcessor getBackupProcessor() { return SampleEntryProcessor.this; } public void processBackup(Map.Entry entry) { entry.setValue((Integer) entry.getValue() + 1); } }
0true
hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java
1,822
constructors[KEY_SET] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() { public IdentifiedDataSerializable createNew(Integer arg) { return new MapKeySet(); } };
0true
hazelcast_src_main_java_com_hazelcast_map_MapDataSerializerHook.java
2,240
public class InputStreamIndexInput extends InputStream { private final IndexInput indexInput; private final long limit; private final long actualSizeToRead; private long counter = 0; private long markPointer; private long markCounter; public InputStreamIndexInput(IndexInput indexInput, long limit) { this.indexInput = indexInput; this.limit = limit; if ((indexInput.length() - indexInput.getFilePointer()) > limit) { actualSizeToRead = limit; } else { actualSizeToRead = indexInput.length() - indexInput.getFilePointer(); } } public long actualSizeToRead() { return actualSizeToRead; } @Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); } else if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } if (indexInput.getFilePointer() >= indexInput.length()) { return -1; } if (indexInput.getFilePointer() + len > indexInput.length()) { len = (int) (indexInput.length() - indexInput.getFilePointer()); } if (counter + len > limit) { len = (int) (limit - counter); } if (len <= 0) { return -1; } indexInput.readBytes(b, off, len, false); counter += len; return len; } @Override public int read() throws IOException { if (counter++ >= limit) { return -1; } return (indexInput.getFilePointer() < indexInput.length()) ? (indexInput.readByte() & 0xff) : -1; } @Override public boolean markSupported() { return true; } @Override public synchronized void mark(int readlimit) { markPointer = indexInput.getFilePointer(); markCounter = counter; } @Override public synchronized void reset() throws IOException { indexInput.seek(markPointer); counter = markCounter; } }
0true
src_main_java_org_elasticsearch_common_lucene_store_InputStreamIndexInput.java
1,172
public class OQueryOperatorBetween extends OQueryOperatorEqualityNotNulls { public OQueryOperatorBetween() { super("BETWEEN", 5, false, 3); } @Override @SuppressWarnings("unchecked") protected boolean evaluateExpression(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft, final Object iRight, OCommandContext iContext) { validate(iRight); final Iterator<?> valueIterator = OMultiValue.getMultiValueIterator(iRight); final Object right1 = OType.convert(valueIterator.next(), iLeft.getClass()); if (right1 == null) return false; valueIterator.next(); final Object right2 = OType.convert(valueIterator.next(), iLeft.getClass()); if (right2 == null) return false; return ((Comparable<Object>) iLeft).compareTo(right1) >= 0 && ((Comparable<Object>) iLeft).compareTo(right2) <= 0; } private void validate(Object iRight) { if (!OMultiValue.isMultiValue(iRight.getClass())) { throw new IllegalArgumentException("Found '" + iRight + "' while was expected: " + getSyntax()); } if (OMultiValue.getSize(iRight) != 3) throw new IllegalArgumentException("Found '" + OMultiValue.toString(iRight) + "' while was expected: " + getSyntax()); } @Override public String getSyntax() { return "<left> " + keyword + " <minRange> AND <maxRange>"; } @Override public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) { return OIndexReuseType.INDEX_METHOD; } @Override public Object executeIndexQuery(OCommandContext iContext, OIndex<?> index, INDEX_OPERATION_TYPE iOperationType, List<Object> keyParams, IndexResultListener resultListener, int fetchLimit) { final OIndexDefinition indexDefinition = index.getDefinition(); final Object result; final OIndexInternal<?> internalIndex = index.getInternal(); if (!internalIndex.canBeUsedInEqualityOperators() || !internalIndex.hasRangeQuerySupport()) return null; if (indexDefinition.getParamCount() == 1) { final Object[] betweenKeys = (Object[]) keyParams.get(0); final Object keyOne = indexDefinition.createValue(Collections.singletonList(OSQLHelper.getValue(betweenKeys[0]))); final Object keyTwo = indexDefinition.createValue(Collections.singletonList(OSQLHelper.getValue(betweenKeys[2]))); if (keyOne == null || keyTwo == null) return null; if (iOperationType == INDEX_OPERATION_TYPE.COUNT) result = index.count(keyOne, true, keyTwo, true, fetchLimit); else { if (resultListener != null) { index.getValuesBetween(keyOne, true, keyTwo, true, resultListener); result = resultListener.getResult(); } else result = index.getValuesBetween(keyOne, true, keyTwo, true); } } else { final OCompositeIndexDefinition compositeIndexDefinition = (OCompositeIndexDefinition) indexDefinition; final Object[] betweenKeys = (Object[]) keyParams.get(keyParams.size() - 1); final Object betweenKeyOne = OSQLHelper.getValue(betweenKeys[0]); if (betweenKeyOne == null) return null; final Object betweenKeyTwo = OSQLHelper.getValue(betweenKeys[2]); if (betweenKeyTwo == null) return null; final List<Object> betweenKeyOneParams = new ArrayList<Object>(keyParams.size()); betweenKeyOneParams.addAll(keyParams.subList(0, keyParams.size() - 1)); betweenKeyOneParams.add(betweenKeyOne); final List<Object> betweenKeyTwoParams = new ArrayList<Object>(keyParams.size()); betweenKeyTwoParams.addAll(keyParams.subList(0, keyParams.size() - 1)); betweenKeyTwoParams.add(betweenKeyTwo); final Object keyOne = compositeIndexDefinition.createSingleValue(betweenKeyOneParams); if (keyOne == null) return null; final Object keyTwo = compositeIndexDefinition.createSingleValue(betweenKeyTwoParams); if (keyTwo == null) return null; if (iOperationType == INDEX_OPERATION_TYPE.COUNT) result = index.count(keyOne, true, keyTwo, true, fetchLimit); else { if (resultListener != null) { index.getValuesBetween(keyOne, true, keyTwo, true, resultListener); result = resultListener.getResult(); } else result = index.getValuesBetween(keyOne, true, keyTwo, true); } } updateProfiler(iContext, index, keyParams, indexDefinition); return result; } @Override public ORID getBeginRidRange(final Object iLeft, final Object iRight) { validate(iRight); if (iLeft instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot())) { final Iterator<?> valueIterator = OMultiValue.getMultiValueIterator(iRight); final Object right1 = valueIterator.next(); if (right1 != null) return (ORID) right1; valueIterator.next(); return (ORID) valueIterator.next(); } return null; } @Override public ORID getEndRidRange(final Object iLeft, final Object iRight) { validate(iRight); validate(iRight); if (iLeft instanceof OSQLFilterItemField && ODocumentHelper.ATTRIBUTE_RID.equals(((OSQLFilterItemField) iLeft).getRoot())) { final Iterator<?> valueIterator = OMultiValue.getMultiValueIterator(iRight); final Object right1 = valueIterator.next(); valueIterator.next(); final Object right2 = valueIterator.next(); if (right2 == null) return (ORID) right1; return (ORID) right2; } return null; } }
1no label
core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperatorBetween.java
20
final class KeyIterator extends AbstractEntryIterator<K, V, K> { KeyIterator(final OMVRBTreeEntry<K, V> first) { super(first); } @Override public K next() { return nextKey(); } }
0true
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
1,206
public static enum ATTRIBUTES { NAME, DATASEGMENT, USE_WAL, RECORD_GROW_FACTOR, RECORD_OVERFLOW_GROW_FACTOR, COMPRESSION }
0true
core_src_main_java_com_orientechnologies_orient_core_storage_OCluster.java
1,193
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SE_BAD_FIELD") public class MembershipEvent extends EventObject { private static final long serialVersionUID = -2010865371829087371L; /** * This event type is fired when a new member joins the cluster. */ public static final int MEMBER_ADDED = 1; /** * This event type is fired if a member left the cluster or was decided to be * unresponsive by other members for a extended time range. */ public static final int MEMBER_REMOVED = 2; /** * This event type is fired if a member attribute has been changed or removed. * * @since 3.2 */ public static final int MEMBER_ATTRIBUTE_CHANGED = 5; private final Member member; private final int eventType; private final Set<Member> members; public MembershipEvent(Cluster cluster, Member member, int eventType, Set<Member> members) { super(cluster); this.member = member; this.eventType = eventType; this.members = members; } /** * Returns a consistent view of the the members exactly after this MembershipEvent has been processed. So if a * member is removed, the returned set will not include this member. And if a member is added it will include * this member. * * The problem with calling the {@link com.hazelcast.core.Cluster#getMembers()} is that the content could already * have changed while processing this event so it becomes very difficult to write a deterministic algorithm since * you can't get a deterministic view of the members. This method solves that problem. * * The set is immutable and ordered. For more information see {@link com.hazelcast.core.Cluster#getMembers()}. * * @return the members at the moment after this event. */ public Set<Member> getMembers() { return members; } /** * Returns the cluster of the event. * * @return the current cluster instance */ public Cluster getCluster() { return (Cluster) getSource(); } /** * Returns the membership event type; * #MEMBER_ADDED * #MEMBER_REMOVED * #MEMBER_ATTRIBUTE_CHANGED * * @return the membership event type */ public int getEventType() { return eventType; } /** * Returns the removed or added member. * * @return member which is removed/added */ public Member getMember() { return member; } @Override public String toString() { String type; switch (eventType){ case MEMBER_ADDED: type = "added"; break; case MEMBER_REMOVED: type = "removed"; break; case MEMBER_ATTRIBUTE_CHANGED: type = "attributed_changes"; break; default: throw new IllegalStateException(); } return format("MembershipEvent {member=%s,type=%s}", member, type); } }
0true
hazelcast_src_main_java_com_hazelcast_core_MembershipEvent.java
1,326
@Service("blSearchRedirectService") public class SearchRedirectServiceImpl implements SearchRedirectService { @Resource(name = "blSearchRedirectDao") protected SearchRedirectDao SearchRedirectDao; /** * Checks the passed in URL to determine if there is a matching * SearchRedirect. Returns null if no handler was found. * * @param uri * @return */ @Override public SearchRedirect findSearchRedirectBySearchTerm(String uri) { SearchRedirect SearchRedirect = SearchRedirectDao .findSearchRedirectBySearchTerm(uri); if (SearchRedirect != null) { return SearchRedirect; } else { return null; } } }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_redirect_service_SearchRedirectServiceImpl.java
2,601
static class NoLongerMasterException extends ElasticsearchIllegalStateException { @Override public Throwable fillInStackTrace() { return null; } }
0true
src_main_java_org_elasticsearch_discovery_zen_fd_MasterFaultDetection.java
885
public interface OQuery<T extends Object> extends OCommandRequest { /** * Executes the query without limit about the result set. The limit will be bound to the maximum allowed. * * @return List of records if any record matches the query constraints, otherwise an empty List. */ public List<T> run(Object... iArgs); /** * Returns the first occurrence found if any * * @return Record if found, otherwise null */ public T runFirst(Object... iArgs); public void reset(); }
0true
core_src_main_java_com_orientechnologies_orient_core_query_OQuery.java
342
public abstract class ODatabasePoolBase<DB extends ODatabase> extends Thread { protected ODatabasePoolAbstract<DB> dbPool; protected final String url; protected final String userName; protected final String userPassword; protected ODatabasePoolBase() { url = userName = userPassword = null; } protected ODatabasePoolBase(final String iURL, final String iUserName, final String iUserPassword) { url = iURL; userName = iUserName; userPassword = iUserPassword; } public ODatabasePoolBase<DB> setup() { setup(OGlobalConfiguration.DB_POOL_MIN.getValueAsInteger(), OGlobalConfiguration.DB_POOL_MAX.getValueAsInteger()); return this; } protected abstract DB createResource(Object owner, String iDatabaseName, Object... iAdditionalArgs); public ODatabasePoolBase<DB> setup(final int iMinSize, final int iMaxSize) { return this.setup(iMinSize, iMaxSize, OGlobalConfiguration.DB_POOL_IDLE_TIMEOUT.getValueAsLong(), OGlobalConfiguration.DB_POOL_IDLE_CHECK_DELAY.getValueAsLong()); } public ODatabasePoolBase<DB> setup(final int iMinSize, final int iMaxSize, final long idleTimeout, final long timeBetweenEvictionRunsMillis) { if (dbPool == null) synchronized (this) { if (dbPool == null) { dbPool = new ODatabasePoolAbstract<DB>(this, iMinSize, iMaxSize, idleTimeout, timeBetweenEvictionRunsMillis) { public void onShutdown() { if (owner instanceof ODatabasePoolBase<?>) ((ODatabasePoolBase<?>) owner).close(); } public DB createNewResource(final String iDatabaseName, final Object... iAdditionalArgs) { if (iAdditionalArgs.length < 2) throw new OSecurityAccessException("Username and/or password missed"); return createResource(owner, iDatabaseName, iAdditionalArgs); } public boolean reuseResource(final String iKey, final Object[] iAdditionalArgs, final DB iValue) { if (((ODatabasePooled) iValue).isUnderlyingOpen()) { ((ODatabasePooled) iValue).reuse(owner, iAdditionalArgs); if (iValue.getStorage().isClosed()) // STORAGE HAS BEEN CLOSED: REOPEN IT iValue.getStorage().open((String) iAdditionalArgs[0], (String) iAdditionalArgs[1], null); else if (!((ODatabaseComplex<?>) iValue).getUser().checkPassword((String) iAdditionalArgs[1])) throw new OSecurityAccessException(iValue.getName(), "User or password not valid for database: '" + iValue.getName() + "'"); return true; } return false; } }; } } return this; } /** * Acquires a connection from the pool using the configured URL, user-name and user-password. If the pool is empty, then the * caller thread will wait for it. * * @return A pooled database instance */ public DB acquire() { setup(); return dbPool.acquire(url, userName, userPassword); } /** * Acquires a connection from the pool. If the pool is empty, then the caller thread will wait for it. * * @param iName * Database name * @param iUserName * User name * @param iUserPassword * User password * @return A pooled database instance */ public DB acquire(final String iName, final String iUserName, final String iUserPassword) { setup(); return dbPool.acquire(iName, iUserName, iUserPassword); } /** * Acquires a connection from the pool specifying options. If the pool is empty, then the caller thread will wait for it. * * @param iName * Database name * @param iUserName * User name * @param iUserPassword * User password * @return A pooled database instance */ public DB acquire(final String iName, final String iUserName, final String iUserPassword, final Map<String, Object> iOptionalParams) { setup(); return dbPool.acquire(iName, iUserName, iUserPassword, iOptionalParams); } /** * Don't call it directly but use database.close(). * * @param iDatabase */ public void release(final DB iDatabase) { if (dbPool != null) dbPool.release(iDatabase); } /** * Closes the entire pool freeing all the connections. */ public void close() { if (dbPool != null) { dbPool.close(); dbPool = null; } } /** * Returns the maximum size of the pool * */ public int getMaxSize() { setup(); return dbPool.getMaxSize(); } /** * Returns all the configured pools. * */ public Map<String, OResourcePool<String, DB>> getPools() { return dbPool.getPools(); } /** * Removes a pool by name/user * */ public void remove(final String iName, final String iUser) { dbPool.remove(iName, iUser); } @Override public void run() { close(); } }
1no label
core_src_main_java_com_orientechnologies_orient_core_db_ODatabasePoolBase.java
1,231
ex.submit(new Runnable() { public void run() { while (running) { int opId = random.nextInt(operations.size()); Runnable operation = operations.get(opId); operation.run(); // System.out.println("Runnning..." + Thread.currentThread()); } } });
0true
hazelcast_src_main_java_com_hazelcast_examples_AllTest.java
1,079
public class UpdateHelper extends AbstractComponent { private final IndicesService indicesService; private final ScriptService scriptService; @Inject public UpdateHelper(Settings settings, IndicesService indicesService, ScriptService scriptService) { super(settings); this.indicesService = indicesService; this.scriptService = scriptService; } /** * Prepares an update request by converting it into an index or delete request or an update response (no action). */ public Result prepare(UpdateRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexShard indexShard = indexService.shardSafe(request.shardId()); return prepare(request, indexShard); } public Result prepare(UpdateRequest request, IndexShard indexShard) { long getDate = System.currentTimeMillis(); final GetResult getResult = indexShard.getService().get(request.type(), request.id(), new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME}, true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE); if (!getResult.isExists()) { if (request.upsertRequest() == null && !request.docAsUpsert()) { throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id()); } IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest(); indexRequest.index(request.index()).type(request.type()).id(request.id()) // it has to be a "create!" .create(true) .routing(request.routing()) .refresh(request.refresh()) .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel()); indexRequest.operationThreaded(false); if (request.versionType() == VersionType.EXTERNAL) { // in external versioning mode, we want to create the new document using the given version. indexRequest.version(request.version()).versionType(VersionType.EXTERNAL); } return new Result(indexRequest, Operation.UPSERT, null, null); } long updateVersion = getResult.getVersion(); if (request.versionType() == VersionType.EXTERNAL) { updateVersion = request.version(); // remember, match_any is excluded by the conflict test } if (getResult.internalSourceRef() == null) { // no source, we can't do nothing, through a failure... throw new DocumentSourceMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id()); } Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); String operation = null; String timestamp = null; Long ttl = null; Object fetchedTTL = null; final Map<String, Object> updatedSourceAsMap; final XContentType updateSourceContentType = sourceAndContent.v1(); String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null; String parent = getResult.getFields().containsKey(ParentFieldMapper.NAME) ? getResult.field(ParentFieldMapper.NAME).getValue().toString() : null; if (request.script() == null && request.doc() != null) { IndexRequest indexRequest = request.doc(); updatedSourceAsMap = sourceAndContent.v2(); if (indexRequest.ttl() > 0) { ttl = indexRequest.ttl(); } timestamp = indexRequest.timestamp(); if (indexRequest.routing() != null) { routing = indexRequest.routing(); } if (indexRequest.parent() != null) { parent = indexRequest.parent(); } XContentHelper.update(updatedSourceAsMap, indexRequest.sourceAsMap()); } else { Map<String, Object> ctx = new HashMap<String, Object>(2); ctx.put("_source", sourceAndContent.v2()); try { ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptParams); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... ctx = (Map<String, Object>) script.unwrap(ctx); } catch (Exception e) { throw new ElasticsearchIllegalArgumentException("failed to execute script", e); } operation = (String) ctx.get("op"); timestamp = (String) ctx.get("_timestamp"); fetchedTTL = ctx.get("_ttl"); if (fetchedTTL != null) { if (fetchedTTL instanceof Number) { ttl = ((Number) fetchedTTL).longValue(); } else { ttl = TimeValue.parseTimeValue((String) fetchedTTL, null).millis(); } } updatedSourceAsMap = (Map<String, Object>) ctx.get("_source"); } // apply script to update the source // No TTL has been given in the update script so we keep previous TTL value if there is one if (ttl == null) { ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null; if (ttl != null) { ttl = ttl - (System.currentTimeMillis() - getDate); // It is an approximation of exact TTL value, could be improved } } if (operation == null || "index".equals(operation)) { final IndexRequest indexRequest = Requests.indexRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent) .source(updatedSourceAsMap, updateSourceContentType) .version(updateVersion).versionType(request.versionType()) .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel()) .timestamp(timestamp).ttl(ttl) .refresh(request.refresh()); indexRequest.operationThreaded(false); return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType); } else if ("delete".equals(operation)) { DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent) .version(updateVersion).versionType(request.versionType()) .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel()); deleteRequest.operationThreaded(false); return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType); } else if ("none".equals(operation)) { UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false); update.setGetResult(extractGetResult(request, getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, null)); return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType); } else { logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script); UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false); return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType); } } /** * Extracts the fields from the updated document to be returned in a update response */ public GetResult extractGetResult(final UpdateRequest request, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) { if (request.fields() == null || request.fields().length == 0) { return null; } boolean sourceRequested = false; Map<String, GetField> fields = null; if (request.fields() != null && request.fields().length > 0) { SourceLookup sourceLookup = new SourceLookup(); sourceLookup.setNextSource(source); for (String field : request.fields()) { if (field.equals("_source")) { sourceRequested = true; continue; } Object value = sourceLookup.extractValue(field); if (value != null) { if (fields == null) { fields = newHashMapWithExpectedSize(2); } GetField getField = fields.get(field); if (getField == null) { getField = new GetField(field, new ArrayList<Object>(2)); fields.put(field, getField); } getField.getValues().add(value); } } } // TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType) return new GetResult(request.index(), request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields); } public static class Result { private final Streamable action; private final Operation operation; private final Map<String, Object> updatedSourceAsMap; private final XContentType updateSourceContentType; public Result(Streamable action, Operation operation, Map<String, Object> updatedSourceAsMap, XContentType updateSourceContentType) { this.action = action; this.operation = operation; this.updatedSourceAsMap = updatedSourceAsMap; this.updateSourceContentType = updateSourceContentType; } @SuppressWarnings("unchecked") public <T extends Streamable> T action() { return (T) action; } public Operation operation() { return operation; } public Map<String, Object> updatedSourceAsMap() { return updatedSourceAsMap; } public XContentType updateSourceContentType() { return updateSourceContentType; } } public static enum Operation { UPSERT, INDEX, DELETE, NONE } }
1no label
src_main_java_org_elasticsearch_action_update_UpdateHelper.java
259
@RunWith(HazelcastSerialClassRunner.class) @Category(QuickTest.class) public class ExecutionDelayTest extends HazelcastTestSupport { private static final int NODES = 3; private final List<HazelcastInstance> hzs = new ArrayList<HazelcastInstance>(NODES); static final AtomicInteger counter = new AtomicInteger(); @Before public void init() { counter.set(0); for (int i = 0; i < NODES; i++) { hzs.add(Hazelcast.newHazelcastInstance()); } } @After public void destroy() throws InterruptedException { HazelcastClient.shutdownAll(); Hazelcast.shutdownAll(); } @Test public void testExecutorOneNodeFailsUnexpectedly() throws InterruptedException, ExecutionException { final int executions = 20; ScheduledExecutorService ex = Executors.newSingleThreadScheduledExecutor(); try { ex.schedule(new Runnable() { @Override public void run() { hzs.get(1).getLifecycleService().terminate(); } }, 1000, TimeUnit.MILLISECONDS); Task task = new Task(); runClient(task, executions); assertTrueEventually(new AssertTask() { @Override public void run() { assertEquals(executions, counter.get()); } }); } finally { ex.shutdown(); } } @Test public void testExecutorOneNodeShutdown() throws InterruptedException, ExecutionException { final int executions = 20; ScheduledExecutorService ex = Executors.newSingleThreadScheduledExecutor(); try { ex.schedule(new Runnable() { @Override public void run() { hzs.get(1).shutdown(); } }, 1000, TimeUnit.MILLISECONDS); Task task = new Task(); runClient(task, executions); assertTrueEventually(new AssertTask() { @Override public void run() { assertEquals(executions, counter.get()); } }); } finally { ex.shutdown(); } } private void runClient(Task task, int executions) throws InterruptedException, ExecutionException { final ClientConfig clientConfig = new ClientConfig(); clientConfig.getNetworkConfig().setRedoOperation(true); HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig); IExecutorService executor = client.getExecutorService("executor"); for (int i = 0; i < executions; i++) { Future future = executor.submitToKeyOwner(task, i); future.get(); Thread.sleep(100); } } private static class Task implements Serializable, Callable { @Override public Object call() throws Exception { counter.incrementAndGet(); return null; } } }
0true
hazelcast-client_src_test_java_com_hazelcast_client_executor_ExecutionDelayTest.java
596
public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder<IndicesSegmentsRequest, IndicesSegmentResponse, IndicesSegmentsRequestBuilder> { public IndicesSegmentsRequestBuilder(IndicesAdminClient indicesClient) { super((InternalIndicesAdminClient) indicesClient, new IndicesSegmentsRequest()); } @Override protected void doExecute(ActionListener<IndicesSegmentResponse> listener) { ((IndicesAdminClient) client).segments(request, listener); } }
0true
src_main_java_org_elasticsearch_action_admin_indices_segments_IndicesSegmentsRequestBuilder.java
1,313
public class OLocalPaginatedStorage extends OStorageLocalAbstract { private static final int ONE_KB = 1024; private final int DELETE_MAX_RETRIES; private final int DELETE_WAIT_TIME; private final Map<String, OCluster> clusterMap = new LinkedHashMap<String, OCluster>(); private OCluster[] clusters = new OCluster[0]; private String storagePath; private final OStorageVariableParser variableParser; private int defaultClusterId = -1; private static String[] ALL_FILE_EXTENSIONS = { ".ocf", ".pls", ".pcl", ".oda", ".odh", ".otx", ".ocs", ".oef", ".oem", ".oet", OWriteAheadLog.WAL_SEGMENT_EXTENSION, OWriteAheadLog.MASTER_RECORD_EXTENSION, OLocalHashTableIndexEngine.BUCKET_FILE_EXTENSION, OLocalHashTableIndexEngine.METADATA_FILE_EXTENSION, OLocalHashTableIndexEngine.TREE_FILE_EXTENSION, OClusterPositionMap.DEF_EXTENSION, OSBTreeIndexEngine.DATA_FILE_EXTENSION, OWOWCache.NAME_ID_MAP_EXTENSION, OIndexRIDContainer.INDEX_FILE_EXTENSION }; private OModificationLock modificationLock = new OModificationLock(); private ScheduledExecutorService fuzzyCheckpointExecutor; private ExecutorService checkpointExecutor; private volatile boolean wereDataRestoredAfterOpen = false; private boolean makeFullCheckPointAfterClusterCreate = OGlobalConfiguration.STORAGE_MAKE_FULL_CHECKPOINT_AFTER_CLUSTER_CREATE .getValueAsBoolean(); public OLocalPaginatedStorage(final String name, final String filePath, final String mode) throws IOException { super(name, filePath, mode); File f = new File(url); if (f.exists() || !exists(f.getParent())) { // ALREADY EXISTS OR NOT LEGACY storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getPath())); } else { // LEGACY DB storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getParent())); } storagePath = OIOUtils.getPathFromDatabaseName(storagePath); variableParser = new OStorageVariableParser(storagePath); configuration = new OStorageConfigurationSegment(this); DELETE_MAX_RETRIES = OGlobalConfiguration.FILE_MMAP_FORCE_RETRY.getValueAsInteger(); DELETE_WAIT_TIME = OGlobalConfiguration.FILE_MMAP_FORCE_DELAY.getValueAsInteger(); } private void initWal() throws IOException { if (OGlobalConfiguration.USE_WAL.getValueAsBoolean()) { fuzzyCheckpointExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = new Thread(r); thread.setDaemon(true); return thread; } }); checkpointExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = new Thread(r); thread.setDaemon(true); return thread; } }); writeAheadLog = new OWriteAheadLog(this); final int fuzzyCheckpointDelay = OGlobalConfiguration.WAL_FUZZY_CHECKPOINT_INTERVAL.getValueAsInteger(); fuzzyCheckpointExecutor.scheduleWithFixedDelay(new Runnable() { @Override public void run() { try { makeFuzzyCheckpoint(); } catch (Throwable e) { OLogManager.instance().error(this, "Error during background fuzzy checkpoint creation for storage " + name, e); } } }, fuzzyCheckpointDelay, fuzzyCheckpointDelay, TimeUnit.SECONDS); } else writeAheadLog = null; long diskCacheSize = OGlobalConfiguration.DISK_CACHE_SIZE.getValueAsLong() * 1024 * 1024; long writeCacheSize = (long) Math.floor((((double) OGlobalConfiguration.DISK_WRITE_CACHE_PART.getValueAsInteger()) / 100.0) * diskCacheSize); long readCacheSize = diskCacheSize - writeCacheSize; diskCache = new OReadWriteDiskCache(name, readCacheSize, writeCacheSize, OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * ONE_KB, OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_TTL.getValueAsLong() * 1000, OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_FLUSH_INTERVAL.getValueAsInteger(), this, writeAheadLog, false, true); } public void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) { lock.acquireExclusiveLock(); try { addUser(); if (status != STATUS.CLOSED) // ALREADY OPENED: THIS IS THE CASE WHEN A STORAGE INSTANCE IS // REUSED return; if (!exists()) throw new OStorageException("Cannot open the storage '" + name + "' because it does not exist in path: " + url); initWal(); status = STATUS.OPEN; // OPEN BASIC SEGMENTS int pos; addDefaultClusters(); // REGISTER CLUSTER for (int i = 0; i < configuration.clusters.size(); ++i) { final OStorageClusterConfiguration clusterConfig = configuration.clusters.get(i); if (clusterConfig != null) { pos = createClusterFromConfig(clusterConfig); try { if (pos == -1) { clusters[i].open(); } else { if (clusterConfig.getName().equals(CLUSTER_DEFAULT_NAME)) defaultClusterId = pos; clusters[pos].open(); } } catch (FileNotFoundException e) { OLogManager.instance().warn( this, "Error on loading cluster '" + clusters[i].getName() + "' (" + i + "): file not found. It will be excluded from current database '" + getName() + "'."); clusterMap.remove(clusters[i].getName()); clusters[i] = null; } } else { clusters = Arrays.copyOf(clusters, clusters.length + 1); clusters[i] = null; } } restoreIfNeeded(); } catch (Exception e) { close(true); throw new OStorageException("Cannot open local storage '" + url + "' with mode=" + mode, e); } finally { lock.releaseExclusiveLock(); } } private void restoreIfNeeded() throws IOException { boolean wasSoftlyClosed = true; for (OCluster cluster : clusters) if (cluster != null && !cluster.wasSoftlyClosed()) wasSoftlyClosed = false; if (!wasSoftlyClosed) { OLogManager.instance().warn(this, "Storage " + name + " was not closed properly. Will try to restore from write ahead log."); try { restoreFromWAL(); makeFullCheckpoint(); } catch (Exception e) { OLogManager.instance().error(this, "Exception during storage data restore.", e); } finally { OLogManager.instance().info(this, "Storage data restore was completed"); } } } private void restoreFromWAL() throws IOException { if (writeAheadLog == null) { OLogManager.instance().error(this, "Restore is not possible because write ahead logging is switched off."); return; } if (writeAheadLog.begin() == null) { OLogManager.instance().error(this, "Restore is not possible because write ahead log is empty."); return; } OLogManager.instance().info(this, "Try to find last checkpoint."); OLogSequenceNumber lastCheckPoint = writeAheadLog.getLastCheckpoint(); if (lastCheckPoint == null) { OLogManager.instance().info(this, "Checkpoints are absent will restore from beginning."); restoreFromBegging(); return; } OWALRecord checkPointRecord = writeAheadLog.read(lastCheckPoint); if (checkPointRecord == null) { OLogManager.instance().info(this, "Checkpoints are absent will restore from beginning."); restoreFromBegging(); return; } if (checkPointRecord instanceof OFuzzyCheckpointStartRecord) { OLogManager.instance().info(this, "Found checkpoint is fuzzy checkpoint."); boolean fuzzyCheckPointIsComplete = checkFuzzyCheckPointIsComplete(lastCheckPoint); if (!fuzzyCheckPointIsComplete) { OLogManager.instance().warn(this, "Fuzzy checkpoint is not complete."); OLogSequenceNumber previousCheckpoint = ((OFuzzyCheckpointStartRecord) checkPointRecord).getPreviousCheckpoint(); checkPointRecord = null; if (previousCheckpoint != null) checkPointRecord = writeAheadLog.read(previousCheckpoint); if (checkPointRecord != null) { OLogManager.instance().warn(this, "Will restore from previous checkpoint."); restoreFromCheckPoint((OAbstractCheckPointStartRecord) checkPointRecord); } else { OLogManager.instance().warn(this, "Will restore from beginning."); restoreFromBegging(); } } else restoreFromCheckPoint((OAbstractCheckPointStartRecord) checkPointRecord); return; } if (checkPointRecord instanceof OFullCheckpointStartRecord) { OLogManager.instance().info(this, "Found checkpoint is full checkpoint."); boolean fullCheckPointIsComplete = checkFullCheckPointIsComplete(lastCheckPoint); if (!fullCheckPointIsComplete) { OLogManager.instance().warn(this, "Full checkpoint is not complete."); OLogSequenceNumber previousCheckpoint = ((OFullCheckpointStartRecord) checkPointRecord).getPreviousCheckpoint(); checkPointRecord = null; if (previousCheckpoint != null) checkPointRecord = writeAheadLog.read(previousCheckpoint); if (checkPointRecord != null) { OLogManager.instance().warn(this, "Will restore from previous checkpoint."); } else { OLogManager.instance().warn(this, "Will restore from beginning."); restoreFromBegging(); } } else restoreFromCheckPoint((OAbstractCheckPointStartRecord) checkPointRecord); return; } throw new OStorageException("Unknown checkpoint record type " + checkPointRecord.getClass().getName()); } private boolean checkFullCheckPointIsComplete(OLogSequenceNumber lastCheckPoint) throws IOException { OLogSequenceNumber lsn = writeAheadLog.next(lastCheckPoint); while (lsn != null) { OWALRecord walRecord = writeAheadLog.read(lsn); if (walRecord instanceof OCheckpointEndRecord) return true; lsn = writeAheadLog.next(lsn); } return false; } private boolean checkFuzzyCheckPointIsComplete(OLogSequenceNumber lastCheckPoint) throws IOException { OLogSequenceNumber lsn = writeAheadLog.next(lastCheckPoint); while (lsn != null) { OWALRecord walRecord = writeAheadLog.read(lsn); if (walRecord instanceof OFuzzyCheckpointEndRecord) return true; lsn = writeAheadLog.next(lsn); } return false; } private void restoreFromCheckPoint(OAbstractCheckPointStartRecord checkPointRecord) throws IOException { if (checkPointRecord instanceof OFuzzyCheckpointStartRecord) { restoreFromFuzzyCheckPoint((OFuzzyCheckpointStartRecord) checkPointRecord); return; } if (checkPointRecord instanceof OFullCheckpointStartRecord) { restoreFromFullCheckPoint((OFullCheckpointStartRecord) checkPointRecord); return; } throw new OStorageException("Unknown checkpoint record type " + checkPointRecord.getClass().getName()); } private void restoreFromFullCheckPoint(OFullCheckpointStartRecord checkPointRecord) throws IOException { OLogManager.instance().info(this, "Data restore procedure from full checkpoint is started. Restore is performed from LSN %s", checkPointRecord.getLsn()); final OLogSequenceNumber lsn = writeAheadLog.next(checkPointRecord.getLsn()); restoreFrom(lsn); } private void restoreFromFuzzyCheckPoint(OFuzzyCheckpointStartRecord checkPointRecord) throws IOException { OLogManager.instance().info(this, "Data restore procedure from fuzzy checkpoint is started."); OLogSequenceNumber dirtyPagesLSN = writeAheadLog.next(checkPointRecord.getLsn()); ODirtyPagesRecord dirtyPagesRecord = (ODirtyPagesRecord) writeAheadLog.read(dirtyPagesLSN); OLogSequenceNumber startLSN; Set<ODirtyPage> dirtyPages = dirtyPagesRecord.getDirtyPages(); if (dirtyPages.isEmpty()) { startLSN = dirtyPagesLSN; } else { ODirtyPage[] pages = dirtyPages.toArray(new ODirtyPage[dirtyPages.size()]); Arrays.sort(pages, new Comparator<ODirtyPage>() { @Override public int compare(ODirtyPage pageOne, ODirtyPage pageTwo) { return pageOne.getLsn().compareTo(pageTwo.getLsn()); } }); startLSN = pages[0].getLsn(); } if (startLSN.compareTo(writeAheadLog.begin()) < 0) startLSN = writeAheadLog.begin(); restoreFrom(startLSN); } private void restoreFromBegging() throws IOException { OLogManager.instance().info(this, "Date restore procedure is started."); OLogSequenceNumber lsn = writeAheadLog.begin(); restoreFrom(lsn); } private void restoreFrom(OLogSequenceNumber lsn) throws IOException { wereDataRestoredAfterOpen = true; Map<OOperationUnitId, List<OWALRecord>> operationUnits = new HashMap<OOperationUnitId, List<OWALRecord>>(); while (lsn != null) { OWALRecord walRecord = writeAheadLog.read(lsn); if (walRecord instanceof OAtomicUnitStartRecord) { List<OWALRecord> operationList = new ArrayList<OWALRecord>(); operationUnits.put(((OAtomicUnitStartRecord) walRecord).getOperationUnitId(), operationList); operationList.add(walRecord); } else if (walRecord instanceof OOperationUnitRecord) { OOperationUnitRecord operationUnitRecord = (OOperationUnitRecord) walRecord; OOperationUnitId unitId = operationUnitRecord.getOperationUnitId(); List<OWALRecord> records = operationUnits.get(unitId); assert records != null; records.add(walRecord); if (operationUnitRecord instanceof OAtomicUnitEndRecord) { OAtomicUnitEndRecord atomicUnitEndRecord = (OAtomicUnitEndRecord) walRecord; if (atomicUnitEndRecord.isRollback()) undoOperation(records); else redoOperation(records); operationUnits.remove(unitId); } } else OLogManager.instance().warn(this, "Record %s will be skipped during data restore.", walRecord); lsn = writeAheadLog.next(lsn); } rollbackAllUnfinishedWALOperations(operationUnits); } private void redoOperation(List<OWALRecord> records) throws IOException { for (int i = 0; i < records.size(); i++) { OWALRecord record = records.get(i); if (checkFirstAtomicUnitRecord(i, record)) continue; if (checkLastAtomicUnitRecord(i, record, records.size())) continue; if (record instanceof OUpdatePageRecord) { final OUpdatePageRecord updatePageRecord = (OUpdatePageRecord) record; final long fileId = updatePageRecord.getFileId(); final long pageIndex = updatePageRecord.getPageIndex(); if (!diskCache.isOpen(fileId)) diskCache.openFile(fileId); final OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, true); final OCachePointer cachePointer = cacheEntry.getCachePointer(); cachePointer.acquireExclusiveLock(); try { ODurablePage durablePage = new ODurablePage(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE); durablePage.restoreChanges(updatePageRecord.getChanges()); durablePage.setLsn(updatePageRecord.getLsn()); cacheEntry.markDirty(); } finally { cachePointer.releaseExclusiveLock(); diskCache.release(cacheEntry); } } else { OLogManager.instance().error(this, "Invalid WAL record type was passed %s. Given record will be skipped.", record.getClass()); assert false : "Invalid WAL record type was passed " + record.getClass().getName(); } } } private void rollbackAllUnfinishedWALOperations(Map<OOperationUnitId, List<OWALRecord>> operationUnits) throws IOException { for (List<OWALRecord> operationUnit : operationUnits.values()) { if (operationUnit.isEmpty()) continue; final OAtomicUnitStartRecord atomicUnitStartRecord = (OAtomicUnitStartRecord) operationUnit.get(0); if (!atomicUnitStartRecord.isRollbackSupported()) continue; final OAtomicUnitEndRecord atomicUnitEndRecord = new OAtomicUnitEndRecord(atomicUnitStartRecord.getOperationUnitId(), true); writeAheadLog.log(atomicUnitEndRecord); operationUnit.add(atomicUnitEndRecord); undoOperation(operationUnit); } } public boolean wereDataRestoredAfterOpen() { return wereDataRestoredAfterOpen; } public void create(final Map<String, Object> iProperties) { lock.acquireExclusiveLock(); try { if (status != STATUS.CLOSED) throw new OStorageException("Cannot create new storage '" + name + "' because it is not closed"); addUser(); final File storageFolder = new File(storagePath); if (!storageFolder.exists()) storageFolder.mkdirs(); if (exists()) throw new OStorageException("Cannot create new storage '" + name + "' because it already exists"); initWal(); status = STATUS.OPEN; // ADD THE METADATA CLUSTER TO STORE INTERNAL STUFF doAddCluster(OMetadataDefault.CLUSTER_INTERNAL_NAME, null, false, null); // ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF // INDEXING doAddCluster(OMetadataDefault.CLUSTER_INDEX_NAME, null, false, null); // ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF // INDEXING doAddCluster(OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, false, null); // ADD THE DEFAULT CLUSTER defaultClusterId = doAddCluster(CLUSTER_DEFAULT_NAME, null, false, null); configuration.create(); if (OGlobalConfiguration.STORAGE_MAKE_FULL_CHECKPOINT_AFTER_CREATE.getValueAsBoolean()) makeFullCheckpoint(); } catch (OStorageException e) { close(); throw e; } catch (IOException e) { close(); throw new OStorageException("Error on creation of storage '" + name + "'", e); } finally { lock.releaseExclusiveLock(); } } public void reload() { } public boolean exists() { return exists(storagePath); } private boolean exists(String path) { return new File(path + "/" + OMetadataDefault.CLUSTER_INTERNAL_NAME + OPaginatedCluster.DEF_EXTENSION).exists(); } @Override public void close(final boolean force) { doClose(force, true); } private void doClose(boolean force, boolean flush) { final long timer = Orient.instance().getProfiler().startChrono(); lock.acquireExclusiveLock(); try { if (!checkForClose(force)) return; status = STATUS.CLOSING; makeFullCheckpoint(); if (writeAheadLog != null) { fuzzyCheckpointExecutor.shutdown(); if (!fuzzyCheckpointExecutor.awaitTermination( OGlobalConfiguration.WAL_FUZZY_CHECKPOINT_SHUTDOWN_TIMEOUT.getValueAsInteger(), TimeUnit.SECONDS)) throw new OStorageException("Can not terminate fuzzy checkpoint task"); checkpointExecutor.shutdown(); if (!checkpointExecutor.awaitTermination(OGlobalConfiguration.WAL_FULL_CHECKPOINT_SHUTDOWN_TIMEOUT.getValueAsInteger(), TimeUnit.SECONDS)) throw new OStorageException("Can not terminate full checkpoint task"); } for (OCluster cluster : clusters) if (cluster != null) cluster.close(flush); clusters = new OCluster[0]; clusterMap.clear(); if (configuration != null) configuration.close(); level2Cache.shutdown(); super.close(force); diskCache.close(); if (writeAheadLog != null) writeAheadLog.delete(); Orient.instance().unregisterStorage(this); status = STATUS.CLOSED; } catch (InterruptedException ie) { OLogManager.instance().error(this, "Error on closing of storage '" + name, ie, OStorageException.class); Thread.interrupted(); } catch (IOException e) { OLogManager.instance().error(this, "Error on closing of storage '" + name, e, OStorageException.class); } finally { lock.releaseExclusiveLock(); Orient.instance().getProfiler().stopChrono("db." + name + ".close", "Close a database", timer, "db.*.close"); } } public void delete() { // CLOSE THE DATABASE BY REMOVING THE CURRENT USER if (status != STATUS.CLOSED) { if (getUsers() > 0) { while (removeUser() > 0) ; } } doClose(true, false); try { Orient.instance().unregisterStorage(this); } catch (Exception e) { OLogManager.instance().error(this, "Cannot unregister storage", e); } final long timer = Orient.instance().getProfiler().startChrono(); // GET REAL DIRECTORY File dbDir = new File(OIOUtils.getPathFromDatabaseName(OSystemVariableResolver.resolveSystemVariables(url))); if (!dbDir.exists() || !dbDir.isDirectory()) dbDir = dbDir.getParentFile(); lock.acquireExclusiveLock(); try { if (writeAheadLog != null) writeAheadLog.delete(); if (diskCache != null) diskCache.delete(); // RETRIES for (int i = 0; i < DELETE_MAX_RETRIES; ++i) { if (dbDir.exists() && dbDir.isDirectory()) { int notDeletedFiles = 0; // TRY TO DELETE ALL THE FILES for (File f : dbDir.listFiles()) { // DELETE ONLY THE SUPPORTED FILES for (String ext : ALL_FILE_EXTENSIONS) if (f.getPath().endsWith(ext)) { if (!f.delete()) { notDeletedFiles++; } break; } } if (notDeletedFiles == 0) { // TRY TO DELETE ALSO THE DIRECTORY IF IT'S EMPTY dbDir.delete(); return; } } else return; OLogManager .instance() .debug( this, "Cannot delete database files because they are still locked by the OrientDB process: waiting %d ms and retrying %d/%d...", DELETE_WAIT_TIME, i, DELETE_MAX_RETRIES); // FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS OMemoryWatchDog.freeMemoryForResourceCleanup(DELETE_WAIT_TIME); } throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ". Database files seem locked"); } catch (IOException e) { throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ".", e); } finally { lock.releaseExclusiveLock(); Orient.instance().getProfiler().stopChrono("db." + name + ".drop", "Drop a database", timer, "db.*.drop"); } } public boolean check(final boolean verbose, final OCommandOutputListener listener) { lock.acquireExclusiveLock(); try { final long start = System.currentTimeMillis(); OPageDataVerificationError[] pageErrors = diskCache.checkStoredPages(verbose ? listener : null); listener.onMessage("Check of storage completed in " + (System.currentTimeMillis() - start) + "ms. " + (pageErrors.length > 0 ? pageErrors.length + " with errors." : " without errors.")); return pageErrors.length == 0; } finally { lock.releaseExclusiveLock(); } } public ODataLocal getDataSegmentById(final int dataSegmentId) { OLogManager.instance().error( this, "getDataSegmentById: Local paginated storage does not support data segments. " + "null will be returned for data segment %d.", dataSegmentId); return null; } public int getDataSegmentIdByName(final String dataSegmentName) { OLogManager.instance().error( this, "getDataSegmentIdByName: Local paginated storage does not support data segments. " + "-1 will be returned for data segment %s.", dataSegmentName); return -1; } /** * Add a new data segment in the default segment directory and with filename equals to the cluster name. */ public int addDataSegment(final String iDataSegmentName) { return addDataSegment(iDataSegmentName, null); } public void enableFullCheckPointAfterClusterCreate() { checkOpeness(); lock.acquireExclusiveLock(); try { makeFullCheckPointAfterClusterCreate = true; } finally { lock.releaseExclusiveLock(); } } public void disableFullCheckPointAfterClusterCreate() { checkOpeness(); lock.acquireExclusiveLock(); try { makeFullCheckPointAfterClusterCreate = false; } finally { lock.releaseExclusiveLock(); } } public boolean isMakeFullCheckPointAfterClusterCreate() { checkOpeness(); lock.acquireSharedLock(); try { return makeFullCheckPointAfterClusterCreate; } finally { lock.releaseSharedLock(); } } public int addDataSegment(String segmentName, final String directory) { OLogManager.instance().error( this, "addDataSegment: Local paginated storage does not support data" + " segments, segment %s will not be added in directory %s.", segmentName, directory); return -1; } public int addCluster(final String clusterType, String clusterName, final String location, final String dataSegmentName, boolean forceListBased, final Object... parameters) { checkOpeness(); lock.acquireExclusiveLock(); try { return doAddCluster(clusterName, location, true, parameters); } catch (Exception e) { OLogManager.instance().exception("Error in creation of new cluster '" + clusterName + "' of type: " + clusterType, e, OStorageException.class); } finally { lock.releaseExclusiveLock(); } return -1; } private int doAddCluster(String clusterName, String location, boolean fullCheckPoint, Object[] parameters) throws IOException { // FIND THE FIRST AVAILABLE CLUSTER ID int clusterPos = clusters.length; for (int i = 0; i < clusters.length; ++i) { if (clusters[i] == null) { clusterPos = i; break; } } return addClusterInternal(clusterName, clusterPos, location, fullCheckPoint, parameters); } public int addCluster(String clusterType, String clusterName, int requestedId, String location, String dataSegmentName, boolean forceListBased, Object... parameters) { lock.acquireExclusiveLock(); try { if (requestedId < 0) { throw new OConfigurationException("Cluster id must be positive!"); } if (requestedId < clusters.length && clusters[requestedId] != null) { throw new OConfigurationException("Requested cluster ID [" + requestedId + "] is occupied by cluster with name [" + clusters[requestedId].getName() + "]"); } return addClusterInternal(clusterName, requestedId, location, true, parameters); } catch (Exception e) { OLogManager.instance().exception("Error in creation of new cluster '" + clusterName + "' of type: " + clusterType, e, OStorageException.class); } finally { lock.releaseExclusiveLock(); } return -1; } private int addClusterInternal(String clusterName, int clusterPos, String location, boolean fullCheckPoint, Object... parameters) throws IOException { final OCluster cluster; if (clusterName != null) { clusterName = clusterName.toLowerCase(); cluster = OPaginatedClusterFactory.INSTANCE.createCluster(configuration.version); cluster.configure(this, clusterPos, clusterName, location, -1, parameters); if (clusterName.equals(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME.toLowerCase())) { cluster.set(OCluster.ATTRIBUTES.USE_WAL, false); cluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 5); cluster.set(OCluster.ATTRIBUTES.RECORD_OVERFLOW_GROW_FACTOR, 2); } } else { cluster = null; } final int createdClusterId = registerCluster(cluster); if (cluster != null) { if (!cluster.exists()) { cluster.create(-1); if (makeFullCheckPointAfterClusterCreate && fullCheckPoint) makeFullCheckpoint(); } else { cluster.open(); } configuration.update(); } return createdClusterId; } public boolean dropCluster(final int iClusterId, final boolean iTruncate) { lock.acquireExclusiveLock(); try { if (iClusterId < 0 || iClusterId >= clusters.length) throw new IllegalArgumentException("Cluster id '" + iClusterId + "' is outside the of range of configured clusters (0-" + (clusters.length - 1) + ") in database '" + name + "'"); final OCluster cluster = clusters[iClusterId]; if (cluster == null) return false; getLevel2Cache().freeCluster(iClusterId); if (iTruncate) cluster.truncate(); cluster.delete(); clusterMap.remove(cluster.getName()); clusters[iClusterId] = null; // UPDATE CONFIGURATION configuration.dropCluster(iClusterId); makeFullCheckpoint(); return true; } catch (Exception e) { OLogManager.instance().exception("Error while removing cluster '" + iClusterId + "'", e, OStorageException.class); } finally { lock.releaseExclusiveLock(); } return false; } public boolean dropDataSegment(final String iName) { throw new UnsupportedOperationException("dropDataSegment"); } public long count(final int iClusterId) { return count(iClusterId, false); } @Override public long count(int iClusterId, boolean countTombstones) { if (iClusterId == -1) throw new OStorageException("Cluster Id " + iClusterId + " is invalid in database '" + name + "'"); // COUNT PHYSICAL CLUSTER IF ANY checkOpeness(); lock.acquireSharedLock(); try { final OCluster cluster = clusters[iClusterId]; if (cluster == null) return 0; if (countTombstones) return cluster.getEntries(); return cluster.getEntries() - cluster.getTombstonesCount(); } finally { lock.releaseSharedLock(); } } public OClusterPosition[] getClusterDataRange(final int iClusterId) { if (iClusterId == -1) return new OClusterPosition[] { OClusterPosition.INVALID_POSITION, OClusterPosition.INVALID_POSITION }; checkOpeness(); lock.acquireSharedLock(); try { return clusters[iClusterId] != null ? new OClusterPosition[] { clusters[iClusterId].getFirstPosition(), clusters[iClusterId].getLastPosition() } : new OClusterPosition[0]; } catch (IOException ioe) { throw new OStorageException("Can not retrieve information about data range", ioe); } finally { lock.releaseSharedLock(); } } public long count(final int[] iClusterIds) { return count(iClusterIds, false); } @Override public long count(int[] iClusterIds, boolean countTombstones) { checkOpeness(); lock.acquireSharedLock(); try { long tot = 0; for (int iClusterId : iClusterIds) { if (iClusterId >= clusters.length) throw new OConfigurationException("Cluster id " + iClusterId + " was not found in database '" + name + "'"); if (iClusterId > -1) { final OCluster c = clusters[iClusterId]; if (c != null) tot += c.getEntries() - (countTombstones ? 0L : c.getTombstonesCount()); } } return tot; } finally { lock.releaseSharedLock(); } } public OStorageOperationResult<OPhysicalPosition> createRecord(final int dataSegmentId, final ORecordId rid, final byte[] content, ORecordVersion recordVersion, final byte recordType, final int mode, final ORecordCallback<OClusterPosition> callback) { checkOpeness(); final long timer = Orient.instance().getProfiler().startChrono(); final OCluster cluster = getClusterById(rid.clusterId); cluster.getExternalModificationLock().requestModificationLock(); try { modificationLock.requestModificationLock(); try { checkOpeness(); if (content == null) throw new IllegalArgumentException("Record is null"); OPhysicalPosition ppos = new OPhysicalPosition(-1, -1, recordType); try { lock.acquireSharedLock(); try { if (recordVersion.getCounter() > -1) recordVersion.increment(); else recordVersion = OVersionFactory.instance().createVersion(); ppos = cluster.createRecord(content, recordVersion, recordType); rid.clusterPosition = ppos.clusterPosition; if (callback != null) callback.call(rid, ppos.clusterPosition); return new OStorageOperationResult<OPhysicalPosition>(ppos); } finally { lock.releaseSharedLock(); } } catch (IOException ioe) { try { if (ppos.clusterPosition != null && ppos.clusterPosition.compareTo(OClusterPosition.INVALID_POSITION) != 0) cluster.deleteRecord(ppos.clusterPosition); } catch (IOException e) { OLogManager.instance().error(this, "Error on removing record in cluster: " + cluster, e); } OLogManager.instance().error(this, "Error on creating record in cluster: " + cluster, ioe); return null; } } finally { modificationLock.releaseModificationLock(); } } finally { cluster.getExternalModificationLock().releaseModificationLock(); Orient.instance().getProfiler().stopChrono(PROFILER_CREATE_RECORD, "Create a record in database", timer, "db.*.createRecord"); } } @Override public ORecordMetadata getRecordMetadata(ORID rid) { if (rid.isNew()) throw new OStorageException("Passed record with id " + rid + " is new and can not be stored."); checkOpeness(); final OCluster cluster = getClusterById(rid.getClusterId()); lock.acquireSharedLock(); try { lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED); try { final OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.getClusterPosition())); if (ppos == null) return null; return new ORecordMetadata(rid, ppos.recordVersion); } finally { lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED); } } catch (IOException ioe) { OLogManager.instance().error(this, "Retrieval of record '" + rid + "' cause: " + ioe.getMessage(), ioe); } finally { lock.releaseSharedLock(); } return null; } @Override public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, boolean iIgnoreCache, ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) { checkOpeness(); return new OStorageOperationResult<ORawBuffer>(readRecord(getClusterById(iRid.clusterId), iRid, true, loadTombstones)); } @Override protected ORawBuffer readRecord(final OCluster clusterSegment, final ORecordId rid, boolean atomicLock, boolean loadTombstones) { checkOpeness(); if (!rid.isPersistent()) throw new IllegalArgumentException("Cannot read record " + rid + " since the position is invalid in database '" + name + '\''); final long timer = Orient.instance().getProfiler().startChrono(); clusterSegment.getExternalModificationLock().requestModificationLock(); try { if (atomicLock) lock.acquireSharedLock(); try { lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED); try { return clusterSegment.readRecord(rid.clusterPosition); } finally { lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED); } } catch (IOException e) { OLogManager.instance().error(this, "Error on reading record " + rid + " (cluster: " + clusterSegment + ')', e); return null; } finally { if (atomicLock) lock.releaseSharedLock(); } } finally { clusterSegment.getExternalModificationLock().releaseModificationLock(); Orient.instance().getProfiler().stopChrono(PROFILER_READ_RECORD, "Read a record from database", timer, "db.*.readRecord"); } } public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId rid, final byte[] content, final ORecordVersion version, final byte recordType, final int mode, ORecordCallback<ORecordVersion> callback) { checkOpeness(); final long timer = Orient.instance().getProfiler().startChrono(); final OCluster cluster = getClusterById(rid.clusterId); cluster.getExternalModificationLock().requestModificationLock(); try { modificationLock.requestModificationLock(); try { lock.acquireSharedLock(); try { // GET THE SHARED LOCK AND GET AN EXCLUSIVE LOCK AGAINST THE RECORD lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE); try { // UPDATE IT final OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition)); if (!checkForRecordValidity(ppos)) { final ORecordVersion recordVersion = OVersionFactory.instance().createUntrackedVersion(); if (callback != null) callback.call(rid, recordVersion); return new OStorageOperationResult<ORecordVersion>(recordVersion); } // VERSION CONTROL CHECK switch (version.getCounter()) { // DOCUMENT UPDATE, NO VERSION CONTROL case -1: ppos.recordVersion.increment(); break; // DOCUMENT UPDATE, NO VERSION CONTROL, NO VERSION UPDATE case -2: ppos.recordVersion.setCounter(-2); break; default: // MVCC CONTROL AND RECORD UPDATE OR WRONG VERSION VALUE // MVCC TRANSACTION: CHECK IF VERSION IS THE SAME if (!version.equals(ppos.recordVersion)) if (OFastConcurrentModificationException.enabled()) throw OFastConcurrentModificationException.instance(); else throw new OConcurrentModificationException(rid, ppos.recordVersion, version, ORecordOperation.UPDATED); ppos.recordVersion.increment(); } cluster.updateRecord(rid.clusterPosition, content, ppos.recordVersion, recordType); if (callback != null) callback.call(rid, ppos.recordVersion); return new OStorageOperationResult<ORecordVersion>(ppos.recordVersion); } finally { lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE); } } catch (IOException e) { OLogManager.instance().error(this, "Error on updating record " + rid + " (cluster: " + cluster + ")", e); final ORecordVersion recordVersion = OVersionFactory.instance().createUntrackedVersion(); if (callback != null) callback.call(rid, recordVersion); return new OStorageOperationResult<ORecordVersion>(recordVersion); } finally { lock.releaseSharedLock(); } } finally { modificationLock.releaseModificationLock(); } } finally { cluster.getExternalModificationLock().releaseModificationLock(); Orient.instance().getProfiler().stopChrono(PROFILER_UPDATE_RECORD, "Update a record to database", timer, "db.*.updateRecord"); } } @Override public OStorageOperationResult<Boolean> deleteRecord(final ORecordId rid, final ORecordVersion version, final int mode, ORecordCallback<Boolean> callback) { checkOpeness(); final long timer = Orient.instance().getProfiler().startChrono(); final OCluster cluster = getClusterById(rid.clusterId); cluster.getExternalModificationLock().requestModificationLock(); try { modificationLock.requestModificationLock(); try { lock.acquireSharedLock(); try { lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE); try { final OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition)); if (ppos == null) // ALREADY DELETED return new OStorageOperationResult<Boolean>(false); // MVCC TRANSACTION: CHECK IF VERSION IS THE SAME if (version.getCounter() > -1 && !ppos.recordVersion.equals(version)) if (OFastConcurrentModificationException.enabled()) throw OFastConcurrentModificationException.instance(); else throw new OConcurrentModificationException(rid, ppos.recordVersion, version, ORecordOperation.DELETED); cluster.deleteRecord(ppos.clusterPosition); return new OStorageOperationResult<Boolean>(true); } finally { lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE); } } finally { lock.releaseSharedLock(); } } catch (IOException e) { OLogManager.instance().error(this, "Error on deleting record " + rid + "( cluster: " + cluster + ")", e); } finally { modificationLock.releaseModificationLock(); } } finally { cluster.getExternalModificationLock().releaseModificationLock(); Orient.instance().getProfiler() .stopChrono(PROFILER_DELETE_RECORD, "Delete a record from database", timer, "db.*.deleteRecord"); } return new OStorageOperationResult<Boolean>(false); } public boolean updateReplica(final int dataSegmentId, final ORecordId rid, final byte[] content, final ORecordVersion recordVersion, final byte recordType) throws IOException { throw new OStorageException("Support of hash based clusters is required."); } @Override public <V> V callInLock(Callable<V> iCallable, boolean iExclusiveLock) { if (iExclusiveLock) { modificationLock.requestModificationLock(); try { return super.callInLock(iCallable, iExclusiveLock); } finally { modificationLock.releaseModificationLock(); } } else { return super.callInLock(iCallable, iExclusiveLock); } } @Override public <V> V callInRecordLock(Callable<V> callable, ORID rid, boolean exclusiveLock) { if (exclusiveLock) modificationLock.requestModificationLock(); try { if (exclusiveLock) { lock.acquireExclusiveLock(); } else lock.acquireSharedLock(); try { lockManager .acquireLock(Thread.currentThread(), rid, exclusiveLock ? OLockManager.LOCK.EXCLUSIVE : OLockManager.LOCK.SHARED); try { return callable.call(); } finally { lockManager.releaseLock(Thread.currentThread(), rid, exclusiveLock ? OLockManager.LOCK.EXCLUSIVE : OLockManager.LOCK.SHARED); } } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new OException("Error on nested call in lock", e); } finally { if (exclusiveLock) { lock.releaseExclusiveLock(); } else lock.releaseSharedLock(); } } finally { if (exclusiveLock) modificationLock.releaseModificationLock(); } } public Set<String> getClusterNames() { checkOpeness(); lock.acquireSharedLock(); try { return clusterMap.keySet(); } finally { lock.releaseSharedLock(); } } public int getClusterIdByName(final String iClusterName) { checkOpeness(); if (iClusterName == null) throw new IllegalArgumentException("Cluster name is null"); if (iClusterName.length() == 0) throw new IllegalArgumentException("Cluster name is empty"); if (Character.isDigit(iClusterName.charAt(0))) return Integer.parseInt(iClusterName); // SEARCH IT BETWEEN PHYSICAL CLUSTERS lock.acquireSharedLock(); try { final OCluster segment = clusterMap.get(iClusterName.toLowerCase()); if (segment != null) return segment.getId(); } finally { lock.releaseSharedLock(); } return -1; } public String getClusterTypeByName(final String iClusterName) { checkOpeness(); if (iClusterName == null) throw new IllegalArgumentException("Cluster name is null"); // SEARCH IT BETWEEN PHYSICAL CLUSTERS lock.acquireSharedLock(); try { final OCluster segment = clusterMap.get(iClusterName.toLowerCase()); if (segment != null) return segment.getType(); } finally { lock.releaseSharedLock(); } return null; } public void commit(final OTransaction clientTx, Runnable callback) { modificationLock.requestModificationLock(); try { lock.acquireExclusiveLock(); try { if (writeAheadLog == null) throw new OStorageException("WAL mode is not active. Transactions are not supported in given mode"); startStorageTx(clientTx); final List<ORecordOperation> tmpEntries = new ArrayList<ORecordOperation>(); while (clientTx.getCurrentRecordEntries().iterator().hasNext()) { for (ORecordOperation txEntry : clientTx.getCurrentRecordEntries()) tmpEntries.add(txEntry); clientTx.clearRecordEntries(); for (ORecordOperation txEntry : tmpEntries) // COMMIT ALL THE SINGLE ENTRIES ONE BY ONE commitEntry(clientTx, txEntry); } if (callback != null) callback.run(); endStorageTx(); OTransactionAbstract.updateCacheFromEntries(clientTx, clientTx.getAllRecordEntries(), false); } catch (Exception e) { // WE NEED TO CALL ROLLBACK HERE, IN THE LOCK OLogManager.instance().debug(this, "Error during transaction commit, transaction will be rolled back (tx-id=%d)", e, clientTx.getId()); rollback(clientTx); if (e instanceof OException) throw ((OException) e); else throw new OStorageException("Error during transaction commit.", e); } finally { transaction = null; lock.releaseExclusiveLock(); } } finally { modificationLock.releaseModificationLock(); } } private void commitEntry(final OTransaction clientTx, final ORecordOperation txEntry) throws IOException { if (txEntry.type != ORecordOperation.DELETED && !txEntry.getRecord().isDirty()) return; final ORecordId rid = (ORecordId) txEntry.getRecord().getIdentity(); if (rid.clusterId == ORID.CLUSTER_ID_INVALID && txEntry.getRecord() instanceof ODocument && ((ODocument) txEntry.getRecord()).getSchemaClass() != null) { // TRY TO FIX CLUSTER ID TO THE DEFAULT CLUSTER ID DEFINED IN SCHEMA CLASS rid.clusterId = ((ODocument) txEntry.getRecord()).getSchemaClass().getDefaultClusterId(); } final OCluster cluster = getClusterById(rid.clusterId); if (cluster.getName().equals(OMetadataDefault.CLUSTER_INDEX_NAME) || cluster.getName().equals(OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME)) // AVOID TO COMMIT INDEX STUFF return; if (txEntry.getRecord() instanceof OTxListener) ((OTxListener) txEntry.getRecord()).onEvent(txEntry, OTxListener.EVENT.BEFORE_COMMIT); switch (txEntry.type) { case ORecordOperation.LOADED: break; case ORecordOperation.CREATED: { // CHECK 2 TIMES TO ASSURE THAT IT'S A CREATE OR AN UPDATE BASED ON RECURSIVE TO-STREAM METHOD byte[] stream = txEntry.getRecord().toStream(); if (stream == null) { OLogManager.instance().warn(this, "Null serialization on committing new record %s in transaction", rid); break; } final ORecordId oldRID = rid.isNew() ? rid.copy() : rid; if (rid.isNew()) { rid.clusterId = cluster.getId(); final OPhysicalPosition ppos; ppos = createRecord(-1, rid, stream, txEntry.getRecord().getRecordVersion(), txEntry.getRecord().getRecordType(), -1, null) .getResult(); rid.clusterPosition = ppos.clusterPosition; txEntry.getRecord().getRecordVersion().copyFrom(ppos.recordVersion); clientTx.updateIdentityAfterCommit(oldRID, rid); } else { txEntry .getRecord() .getRecordVersion() .copyFrom( updateRecord(rid, stream, txEntry.getRecord().getRecordVersion(), txEntry.getRecord().getRecordType(), -1, null) .getResult()); } break; } case ORecordOperation.UPDATED: { byte[] stream = txEntry.getRecord().toStream(); if (stream == null) { OLogManager.instance().warn(this, "Null serialization on committing updated record %s in transaction", rid); break; } txEntry .getRecord() .getRecordVersion() .copyFrom( updateRecord(rid, stream, txEntry.getRecord().getRecordVersion(), txEntry.getRecord().getRecordType(), -1, null) .getResult()); break; } case ORecordOperation.DELETED: { deleteRecord(rid, txEntry.getRecord().getRecordVersion(), -1, null); break; } } txEntry.getRecord().unsetDirty(); if (txEntry.getRecord() instanceof OTxListener) ((OTxListener) txEntry.getRecord()).onEvent(txEntry, OTxListener.EVENT.AFTER_COMMIT); } public void rollback(final OTransaction clientTx) { checkOpeness(); modificationLock.requestModificationLock(); try { lock.acquireExclusiveLock(); try { if (transaction == null) return; if (writeAheadLog == null) throw new OStorageException("WAL mode is not active. Transactions are not supported in given mode"); if (transaction.getClientTx().getId() != clientTx.getId()) throw new OStorageException( "Passed in and active transaction are different transactions. Passed in transaction can not be rolled back."); rollbackStorageTx(); OTransactionAbstract.updateCacheFromEntries(clientTx, clientTx.getAllRecordEntries(), false); } catch (IOException e) { throw new OStorageException("Error during transaction rollback.", e); } finally { transaction = null; lock.releaseExclusiveLock(); } } finally { modificationLock.releaseModificationLock(); } } @Override public boolean checkForRecordValidity(final OPhysicalPosition ppos) { return ppos != null && !ppos.recordVersion.isTombstone(); } public void synch() { checkOpeness(); final long timer = Orient.instance().getProfiler().startChrono(); lock.acquireExclusiveLock(); try { if (writeAheadLog != null) { makeFullCheckpoint(); return; } diskCache.flushBuffer(); if (configuration != null) configuration.synch(); } catch (IOException e) { throw new OStorageException("Error on synch storage '" + name + "'", e); } finally { lock.releaseExclusiveLock(); Orient.instance().getProfiler().stopChrono("db." + name + ".synch", "Synch a database", timer, "db.*.synch"); } } public void setDefaultClusterId(final int defaultClusterId) { this.defaultClusterId = defaultClusterId; } public String getPhysicalClusterNameById(final int iClusterId) { checkOpeness(); lock.acquireSharedLock(); try { if (iClusterId >= clusters.length) return null; return clusters[iClusterId] != null ? clusters[iClusterId].getName() : null; } finally { lock.releaseSharedLock(); } } @Override public OStorageConfiguration getConfiguration() { return configuration; } public int getDefaultClusterId() { return defaultClusterId; } public OCluster getClusterById(int iClusterId) { lock.acquireSharedLock(); try { if (iClusterId == ORID.CLUSTER_ID_INVALID) // GET THE DEFAULT CLUSTER iClusterId = defaultClusterId; checkClusterSegmentIndexRange(iClusterId); final OCluster cluster = clusters[iClusterId]; if (cluster == null) throw new IllegalArgumentException("Cluster " + iClusterId + " is null"); return cluster; } finally { lock.releaseSharedLock(); } } private void checkClusterSegmentIndexRange(final int iClusterId) { if (iClusterId > clusters.length - 1) throw new IllegalArgumentException("Cluster segment #" + iClusterId + " does not exist in database '" + name + "'"); } @Override public OCluster getClusterByName(final String iClusterName) { lock.acquireSharedLock(); try { final OCluster cluster = clusterMap.get(iClusterName.toLowerCase()); if (cluster == null) throw new IllegalArgumentException("Cluster " + iClusterName + " does not exist in database '" + name + "'"); return cluster; } finally { lock.releaseSharedLock(); } } @Override public String getURL() { return OEngineLocalPaginated.NAME + ":" + url; } public long getSize() { lock.acquireSharedLock(); try { long size = 0; for (OCluster c : clusters) if (c != null) size += c.getRecordsSize(); return size; } catch (IOException ioe) { throw new OStorageException("Can not calculate records size"); } finally { lock.releaseSharedLock(); } } public String getStoragePath() { return storagePath; } @Override protected OPhysicalPosition updateRecord(OCluster cluster, ORecordId rid, byte[] recordContent, ORecordVersion recordVersion, byte recordType) { throw new UnsupportedOperationException("updateRecord"); } @Override protected OPhysicalPosition createRecord(ODataLocal dataSegment, OCluster cluster, byte[] recordContent, byte recordType, ORecordId rid, ORecordVersion recordVersion) { throw new UnsupportedOperationException("createRecord"); } public String getMode() { return mode; } public OStorageVariableParser getVariableParser() { return variableParser; } public int getClusters() { lock.acquireSharedLock(); try { return clusterMap.size(); } finally { lock.releaseSharedLock(); } } public Set<OCluster> getClusterInstances() { final Set<OCluster> result = new HashSet<OCluster>(); lock.acquireSharedLock(); try { // ADD ALL THE CLUSTERS for (OCluster c : clusters) if (c != null) result.add(c); } finally { lock.releaseSharedLock(); } return result; } /** * Method that completes the cluster rename operation. <strong>IT WILL NOT RENAME A CLUSTER, IT JUST CHANGES THE NAME IN THE * INTERNAL MAPPING</strong> */ public void renameCluster(final String iOldName, final String iNewName) { clusterMap.put(iNewName, clusterMap.remove(iOldName)); } @Override public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) { return deleteRecord(recordId, recordVersion, iMode, callback).getResult(); } public void freeze(boolean throwException) { modificationLock.prohibitModifications(throwException); synch(); try { diskCache.setSoftlyClosed(true); if (configuration != null) configuration.setSoftlyClosed(true); } catch (IOException e) { throw new OStorageException("Error on freeze of storage '" + name + "'", e); } } public void release() { try { diskCache.setSoftlyClosed(false); if (configuration != null) configuration.setSoftlyClosed(false); } catch (IOException e) { throw new OStorageException("Error on release of storage '" + name + "'", e); } modificationLock.allowModifications(); } public boolean wasClusterSoftlyClosed(String clusterName) { lock.acquireSharedLock(); try { final OCluster indexCluster = clusterMap.get(clusterName); return indexCluster.wasSoftlyClosed(); } catch (IOException ioe) { throw new OStorageException("Error during index consistency check", ioe); } finally { lock.releaseSharedLock(); } } public void makeFuzzyCheckpoint() { // if (writeAheadLog == null) // return; // // try { // lock.acquireExclusiveLock(); // try { // writeAheadLog.flush(); // // writeAheadLog.logFuzzyCheckPointStart(); // // diskCache.forceSyncStoredChanges(); // diskCache.logDirtyPagesTable(); // // writeAheadLog.logFuzzyCheckPointEnd(); // // writeAheadLog.flush(); // } finally { // lock.releaseExclusiveLock(); // } // } catch (IOException ioe) { // throw new OStorageException("Error during fuzzy checkpoint creation for storage " + name, ioe); // } } public void makeFullCheckpoint() { if (writeAheadLog == null) return; lock.acquireExclusiveLock(); try { writeAheadLog.flush(); if (configuration != null) configuration.synch(); writeAheadLog.logFullCheckpointStart(); diskCache.flushBuffer(); writeAheadLog.logFullCheckpointEnd(); writeAheadLog.flush(); } catch (IOException ioe) { throw new OStorageException("Error during checkpoint creation for storage " + name, ioe); } finally { lock.releaseExclusiveLock(); } } public void scheduleFullCheckpoint() { if (checkpointExecutor != null) checkpointExecutor.execute(new Runnable() { @Override public void run() { try { makeFullCheckpoint(); } catch (Throwable t) { OLogManager.instance().error(this, "Error during background checkpoint creation for storage " + name, t); } } }); } @Override public String getType() { return OEngineLocalPaginated.NAME; } private int createClusterFromConfig(final OStorageClusterConfiguration iConfig) throws IOException { OCluster cluster = clusterMap.get(iConfig.getName()); if (cluster != null) { cluster.configure(this, iConfig); return -1; } cluster = OPaginatedClusterFactory.INSTANCE.createCluster(configuration.version); cluster.configure(this, iConfig); return registerCluster(cluster); } /** * Register the cluster internally. * * @param cluster * OCluster implementation * @return The id (physical position into the array) of the new cluster just created. First is 0. * @throws IOException */ private int registerCluster(final OCluster cluster) throws IOException { final int id; if (cluster != null) { // CHECK FOR DUPLICATION OF NAMES if (clusterMap.containsKey(cluster.getName())) throw new OConfigurationException("Cannot add segment '" + cluster.getName() + "' because it is already registered in database '" + name + "'"); // CREATE AND ADD THE NEW REF SEGMENT clusterMap.put(cluster.getName(), cluster); id = cluster.getId(); } else { id = clusters.length; } if (id >= clusters.length) { clusters = OArrays.copyOf(clusters, id + 1); } clusters[id] = cluster; return id; } private void addDefaultClusters() throws IOException { configuration.load(); final String storageCompression = OGlobalConfiguration.STORAGE_COMPRESSION_METHOD.getValueAsString(); createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length, OMetadataDefault.CLUSTER_INTERNAL_NAME, null, true, 20, 4, storageCompression)); createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length, OMetadataDefault.CLUSTER_INDEX_NAME, null, false, OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR, OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR, storageCompression)); createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length, OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, false, 1, 1, storageCompression)); defaultClusterId = createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length, CLUSTER_DEFAULT_NAME, null, true, OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR, OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR, storageCompression)); } public ODiskCache getDiskCache() { return diskCache; } public void freeze(boolean throwException, int clusterId) { final OCluster cluster = getClusterById(clusterId); final String name = cluster.getName(); if (OMetadataDefault.CLUSTER_INDEX_NAME.equals(name) || OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME.equals(name)) { throw new IllegalArgumentException("It is impossible to freeze and release index or manual index cluster!"); } cluster.getExternalModificationLock().prohibitModifications(throwException); try { cluster.synch(); cluster.setSoftlyClosed(true); } catch (IOException e) { throw new OStorageException("Error on synch cluster '" + name + "'", e); } } public void release(int clusterId) { final OCluster cluster = getClusterById(clusterId); final String name = cluster.getName(); if (OMetadataDefault.CLUSTER_INDEX_NAME.equals(name) || OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME.equals(name)) { throw new IllegalArgumentException("It is impossible to freeze and release index or manualindex cluster!"); } try { cluster.setSoftlyClosed(false); } catch (IOException e) { throw new OStorageException("Error on unfreeze storage '" + name + "'", e); } cluster.getExternalModificationLock().allowModifications(); } }
1no label
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_OLocalPaginatedStorage.java
908
Thread t = new Thread(new Runnable() { public void run() { ILock lock = instance2.getLock(key); ICondition condition = lock.newCondition(conditionName); lock.lock(); try { condition.await(); } catch (InterruptedException e) { e.printStackTrace(); } finally { lock.unlock(); } signalCounter.incrementAndGet(); } });
0true
hazelcast_src_test_java_com_hazelcast_concurrent_lock_ConditionTest.java
651
public class CategoryDaoDataProvider { @DataProvider(name = "basicCategory") public static Object[][] provideBasicCategory() { Category category = new CategoryImpl(); category.setName("Yuban"); category.setDescription("Yuban"); category.setId(1001L); return new Object[][] { { category } }; } }
0true
integration_src_test_java_org_broadleafcommerce_core_catalog_CategoryDaoDataProvider.java
1,096
public class OSQLFunctionRuntime extends OSQLFilterItemAbstract { public OSQLFunction function; public Object[] configuredParameters; public Object[] runtimeParameters; public OSQLFunctionRuntime(final OBaseParser iQueryToParse, final String iText) { super(iQueryToParse, iText); } public boolean aggregateResults() { return function.aggregateResults(); } public boolean filterResult() { return function.filterResult(); } /** * Execute a function. * * @param iCurrentRecord * Current record * @param iCurrentResult * TODO * @param iRequester * @return */ public Object execute(final OIdentifiable iCurrentRecord, final Object iCurrentResult, final OCommandContext iContext) { // RESOLVE VALUES USING THE CURRENT RECORD for (int i = 0; i < configuredParameters.length; ++i) { if (configuredParameters[i] instanceof OSQLFilterItemField) { runtimeParameters[i] = ((OSQLFilterItemField) configuredParameters[i]).getValue(iCurrentRecord, iContext); if (runtimeParameters[i] == null && iCurrentResult instanceof OIdentifiable) // LOOK INTO THE CURRENT RESULT runtimeParameters[i] = ((OSQLFilterItemField) configuredParameters[i]).getValue((OIdentifiable) iCurrentResult, iContext); } else if (configuredParameters[i] instanceof OSQLFunctionRuntime) runtimeParameters[i] = ((OSQLFunctionRuntime) configuredParameters[i]).execute(iCurrentRecord, iCurrentResult, iContext); else if (configuredParameters[i] instanceof OSQLFilterItemVariable) { runtimeParameters[i] = ((OSQLFilterItemVariable) configuredParameters[i]).getValue(iCurrentRecord, iContext); if (runtimeParameters[i] == null && iCurrentResult instanceof OIdentifiable) // LOOK INTO THE CURRENT RESULT runtimeParameters[i] = ((OSQLFilterItemVariable) configuredParameters[i]).getValue((OIdentifiable) iCurrentResult, iContext); } else if (configuredParameters[i] instanceof OCommandSQL) { try { runtimeParameters[i] = ((OCommandSQL) configuredParameters[i]).setContext(iContext).execute(); } catch (OCommandExecutorNotFoundException e) { // TRY WITH SIMPLE CONDITION final String text = ((OCommandSQL) configuredParameters[i]).getText(); final OSQLPredicate pred = new OSQLPredicate(text); runtimeParameters[i] = pred.evaluate(iCurrentRecord instanceof ORecord<?> ? (ORecord<?>) iCurrentRecord : null, (ODocument) iCurrentResult, iContext); // REPLACE ORIGINAL PARAM configuredParameters[i] = pred; } } else if (configuredParameters[i] instanceof OSQLPredicate) { runtimeParameters[i] = ((OSQLPredicate) configuredParameters[i]).evaluate(iCurrentRecord.getRecord(), (iCurrentRecord instanceof ODocument ? (ODocument) iCurrentResult : null), iContext); } else { // plain value runtimeParameters[i] = configuredParameters[i]; } } final Object functionResult = function.execute(iCurrentRecord, iCurrentResult, runtimeParameters, iContext); return transformValue(iCurrentRecord, iContext, functionResult); } public Object getResult() { return transformValue(null, null, function.getResult()); } public void setResult(final Object iValue) { function.setResult(iValue); } @Override public Object getValue(final OIdentifiable iRecord, OCommandContext iContext) { return execute(iRecord != null ? (ORecordSchemaAware<?>) iRecord.getRecord() : null, null, iContext); } @Override public String getRoot() { return function.getName(); } @Override protected void setRoot(final OBaseParser iQueryToParse, final String iText) { final int beginParenthesis = iText.indexOf('('); // SEARCH FOR THE FUNCTION final String funcName = iText.substring(0, beginParenthesis); final List<String> funcParamsText = OStringSerializerHelper.getParameters(iText); function = OSQLEngine.getInstance().getFunction(funcName); if (function == null) throw new OCommandSQLParsingException("Unknow function " + funcName + "()"); // STRICT CHECK ON PARAMETERS // if (function.getMinParams() > -1 && funcParamsText.size() < function.getMinParams() || function.getMaxParams() > -1 // && funcParamsText.size() > function.getMaxParams()) // throw new IllegalArgumentException("Syntax error. Expected: " + function.getSyntax()); // PARSE PARAMETERS this.configuredParameters = new Object[funcParamsText.size()]; for (int i = 0; i < funcParamsText.size(); ++i) { this.configuredParameters[i] = OSQLHelper.parseValue(null, iQueryToParse, funcParamsText.get(i), null); } function.config(configuredParameters); // COPY STATIC VALUES this.runtimeParameters = new Object[configuredParameters.length]; for (int i = 0; i < configuredParameters.length; ++i) { if (!(configuredParameters[i] instanceof OSQLFilterItemField) && !(configuredParameters[i] instanceof OSQLFunctionRuntime)) runtimeParameters[i] = configuredParameters[i]; } } public OSQLFunction getFunction() { return function; } public Object[] getConfiguredParameters() { return configuredParameters; } public Object[] getRuntimeParameters() { return runtimeParameters; } }
1no label
core_src_main_java_com_orientechnologies_orient_core_sql_functions_OSQLFunctionRuntime.java
1,676
runnable = new Runnable() { public void run() { map.getAll(keys); } };
0true
hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java
41
static class BaseIterator<K,V> extends Traverser<K,V> { final ConcurrentHashMapV8<K,V> map; Node<K,V> lastReturned; BaseIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMapV8<K,V> map) { super(tab, size, index, limit); this.map = map; advance(); } public final boolean hasNext() { return next != null; } public final boolean hasMoreElements() { return next != null; } public final void remove() { Node<K,V> p; if ((p = lastReturned) == null) throw new IllegalStateException(); lastReturned = null; map.replaceNode(p.key, null, null); } }
0true
src_main_java_jsr166e_ConcurrentHashMapV8.java
1,374
public abstract class CatalogEndpoint extends BaseEndpoint { @Resource(name="blCatalogService") protected CatalogService catalogService; @Resource(name = "blSearchService") protected SearchService searchService; @Resource(name = "blSearchFacetDTOService") protected SearchFacetDTOService facetService; @Resource(name = "blExploitProtectionService") protected ExploitProtectionService exploitProtectionService; //We don't inject this here because of a few dependency issues. Instead, we look this up dynamically //using the ApplicationContext protected StaticAssetService staticAssetService; /** * Search for {@code Product} by product id * * @param id the product id * @return the product instance with the given product id */ public ProductWrapper findProductById(HttpServletRequest request, Long id) { Product product = catalogService.findProductById(id); if (product != null) { ProductWrapper wrapper; wrapper = (ProductWrapper) context.getBean(ProductWrapper.class.getName()); wrapper.wrapDetails(product, request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } /** * This uses Broadleaf's search service to search for products within a category. * @param request * @param q * @param categoryId * @param pageSize * @param page * @return */ public SearchResultsWrapper findProductsByCategoryAndQuery(HttpServletRequest request, Long categoryId, String q, Integer pageSize, Integer page) { try { if (StringUtils.isNotEmpty(q)) { q = StringUtils.trim(q); q = exploitProtectionService.cleanString(q); } else { throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST) .type(MediaType.TEXT_PLAIN).entity("Search query was empty. Set parameter 'q' to query for a product. (e.g. q=My Product Name).").build()); } } catch (ServiceException e) { throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST) .type(MediaType.TEXT_PLAIN).entity("The search query: " + q + " was incorrect or malformed.").build()); } if (categoryId == null) { throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST) .type(MediaType.TEXT_PLAIN).entity("The categoryId was null.").build()); } Category category = null; category = catalogService.findCategoryById(categoryId); if (category == null) { throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST) .type(MediaType.TEXT_PLAIN).entity("Category ID, " + categoryId + ", was not associated with a category.").build()); } List<SearchFacetDTO> availableFacets = searchService.getSearchFacets(); ProductSearchCriteria searchCriteria = facetService.buildSearchCriteria(request, availableFacets); try { ProductSearchResult result = null; result = searchService.findProductsByCategoryAndQuery(category, q, searchCriteria); facetService.setActiveFacetResults(result.getFacets(), request); SearchResultsWrapper wrapper = (SearchResultsWrapper) context.getBean(SearchResultsWrapper.class.getName()); wrapper.wrapDetails(result, request); return wrapper; } catch (ServiceException e) { throw new WebApplicationException(Response.status(Response.Status.INTERNAL_SERVER_ERROR) .type(MediaType.TEXT_PLAIN).entity("Problem occured executing search.").build()); } } /** * Queries the products. The parameter q, which represents the query, is required. It can be any * string, but is typically a name or keyword, similar to a search engine search. * @param request * @param q * @param pageSize * @param page * @return */ public SearchResultsWrapper findProductsByQuery(HttpServletRequest request, String q, Integer pageSize, Integer page) { try { if (StringUtils.isNotEmpty(q)) { q = StringUtils.trim(q); q = exploitProtectionService.cleanString(q); } else { throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST) .type(MediaType.TEXT_PLAIN).entity("Search query was empty. Set parameter 'q' to query for a product. (e.g. q=My Product Name).").build()); } } catch (ServiceException e) { throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST) .type(MediaType.TEXT_PLAIN).entity("The search query: " + q + " was incorrect or malformed.").build()); } List<SearchFacetDTO> availableFacets = searchService.getSearchFacets(); ProductSearchCriteria searchCriteria = facetService.buildSearchCriteria(request, availableFacets); try { ProductSearchResult result = null; result = searchService.findProductsByQuery(q, searchCriteria); facetService.setActiveFacetResults(result.getFacets(), request); SearchResultsWrapper wrapper = (SearchResultsWrapper) context.getBean(SearchResultsWrapper.class.getName()); wrapper.wrapDetails(result, request); return wrapper; } catch (ServiceException e) { throw new WebApplicationException(Response.status(Response.Status.INTERNAL_SERVER_ERROR) .type(MediaType.TEXT_PLAIN).entity("Problem occured executing search.").build()); } } /** * Search for {@code Sku} instances for a given product * * @param id * @return the list of sku instances for the product */ public List<SkuWrapper> findSkusByProductById(HttpServletRequest request, Long id) { Product product = catalogService.findProductById(id); if (product != null) { List<Sku> skus = product.getAllSkus(); List<SkuWrapper> out = new ArrayList<SkuWrapper>(); if (skus != null) { for (Sku sku : skus) { SkuWrapper wrapper = (SkuWrapper)context.getBean(SkuWrapper.class.getName()); wrapper.wrapSummary(sku, request); out.add(wrapper); } return out; } } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } public SkuWrapper findDefaultSkuByProductId(HttpServletRequest request, Long id) { Product product = catalogService.findProductById(id); if (product != null && product.getDefaultSku() != null) { SkuWrapper wrapper = (SkuWrapper)context.getBean(SkuWrapper.class.getName()); wrapper.wrapDetails(product.getDefaultSku(), request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } public CategoriesWrapper findAllCategories(HttpServletRequest request, String name, int limit, int offset) { List<Category> categories; if (name != null) { categories = catalogService.findCategoriesByName(name, limit, offset); } else { categories = catalogService.findAllCategories(limit, offset); } CategoriesWrapper wrapper = (CategoriesWrapper)context.getBean(CategoriesWrapper.class.getName()); wrapper.wrapDetails(categories, request); return wrapper; } public CategoriesWrapper findSubCategories(HttpServletRequest request, Long id, int limit, int offset, boolean active) { Category category = catalogService.findCategoryById(id); if (category != null) { List<Category> categories; CategoriesWrapper wrapper = (CategoriesWrapper)context.getBean(CategoriesWrapper.class.getName()); if (active) { categories = catalogService.findActiveSubCategoriesByCategory(category, limit, offset); } else { categories = catalogService.findAllSubCategories(category, limit, offset); } wrapper.wrapDetails(categories, request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Category with Id " + id + " could not be found").build()); } public CategoriesWrapper findActiveSubCategories(HttpServletRequest request, Long id, int limit, int offset) { return findSubCategories(request, id, limit, offset, true); } public CategoryWrapper findCategoryById(HttpServletRequest request, Long id, int productLimit, int productOffset, int subcategoryLimit, int subcategoryOffset) { Category cat = catalogService.findCategoryById(id); if (cat != null) { //Explicitly setting these request attributes because the CategoryWrapper.wrap() method needs them request.setAttribute("productLimit", productLimit); request.setAttribute("productOffset", productOffset); request.setAttribute("subcategoryLimit", subcategoryLimit); request.setAttribute("subcategoryOffset", subcategoryOffset); CategoryWrapper wrapper = (CategoryWrapper)context.getBean(CategoryWrapper.class.getName()); wrapper.wrapDetails(cat, request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Category with Id " + id + " could not be found").build()); } /** * Allows you to search for a category by ID or by name. * @param request * @param searchParameter * @param productLimit * @param productOffset * @param subcategoryLimit * @param subcategoryOffset * @return */ public CategoryWrapper findCategoryByIdOrName(HttpServletRequest request, String searchParameter, int productLimit, int productOffset, int subcategoryLimit, int subcategoryOffset) { Category cat = null; if (searchParameter != null) { try { cat = catalogService.findCategoryById(Long.parseLong(searchParameter)); } catch (NumberFormatException e) { List<Category> categories = catalogService.findCategoriesByName(searchParameter); if (categories != null && !categories.isEmpty()) { cat = categories.get(0); } } } if (cat != null) { //Explicitly setting these request attributes because the CategoryWrapper.wrap() method needs them request.setAttribute("productLimit", productLimit); request.setAttribute("productOffset", productOffset); request.setAttribute("subcategoryLimit", subcategoryLimit); request.setAttribute("subcategoryOffset", subcategoryOffset); CategoryWrapper wrapper = (CategoryWrapper) context.getBean(CategoryWrapper.class.getName()); wrapper.wrapDetails(cat, request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND) .type(MediaType.TEXT_PLAIN).entity("Category with Id or name of " + searchParameter + " could not be found").build()); } public List<CategoryAttributeWrapper> findCategoryAttributesForCategory(HttpServletRequest request, Long id) { Category category = catalogService.findCategoryById(id); if (category != null) { ArrayList<CategoryAttributeWrapper> out = new ArrayList<CategoryAttributeWrapper>(); if (category.getCategoryAttributes() != null) { for (CategoryAttribute attribute : category.getCategoryAttributes()) { CategoryAttributeWrapper wrapper = (CategoryAttributeWrapper)context.getBean(CategoryAttributeWrapper.class.getName()); wrapper.wrapSummary(attribute, request); out.add(wrapper); } } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Category with Id " + id + " could not be found").build()); } public List<RelatedProductWrapper> findUpSaleProductsByProduct(HttpServletRequest request, Long id, int limit, int offset) { Product product = catalogService.findProductById(id); if (product != null) { List<RelatedProductWrapper> out = new ArrayList<RelatedProductWrapper>(); //TODO: Write a service method that accepts offset and limit List<RelatedProduct> relatedProds = product.getUpSaleProducts(); if (relatedProds != null) { for (RelatedProduct prod : relatedProds) { RelatedProductWrapper wrapper = (RelatedProductWrapper)context.getBean(RelatedProductWrapper.class.getName()); wrapper.wrapSummary(prod, request); out.add(wrapper); } } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } public List<RelatedProductWrapper> findCrossSaleProductsByProduct(HttpServletRequest request, Long id, int limit, int offset) { Product product = catalogService.findProductById(id); if (product != null) { List<RelatedProductWrapper> out = new ArrayList<RelatedProductWrapper>(); //TODO: Write a service method that accepts offset and limit List<RelatedProduct> xSellProds = product.getCrossSaleProducts(); if (xSellProds != null) { for (RelatedProduct prod : xSellProds) { RelatedProductWrapper wrapper = (RelatedProductWrapper)context.getBean(RelatedProductWrapper.class.getName()); wrapper.wrapSummary(prod, request); out.add(wrapper); } } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } public List<ProductAttributeWrapper> findProductAttributesForProduct(HttpServletRequest request, Long id) { Product product = catalogService.findProductById(id); if (product != null) { ArrayList<ProductAttributeWrapper> out = new ArrayList<ProductAttributeWrapper>(); if (product.getProductAttributes() != null) { for (Map.Entry<String, ProductAttribute> entry : product.getProductAttributes().entrySet()) { ProductAttributeWrapper wrapper = (ProductAttributeWrapper)context.getBean(ProductAttributeWrapper.class.getName()); wrapper.wrapSummary(entry.getValue(), request); out.add(wrapper); } } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } public List<SkuAttributeWrapper> findSkuAttributesForSku(HttpServletRequest request, Long id) { Sku sku = catalogService.findSkuById(id); if (sku != null) { ArrayList<SkuAttributeWrapper> out = new ArrayList<SkuAttributeWrapper>(); if (sku.getSkuAttributes() != null) { for (Map.Entry<String, SkuAttribute> entry : sku.getSkuAttributes().entrySet()) { SkuAttributeWrapper wrapper = (SkuAttributeWrapper)context.getBean(SkuAttributeWrapper.class.getName()); wrapper.wrapSummary(entry.getValue(), request); out.add(wrapper); } } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Sku with Id " + id + " could not be found").build()); } public List<MediaWrapper> findMediaForSku(HttpServletRequest request, Long id) { Sku sku = catalogService.findSkuById(id); if (sku != null) { List<MediaWrapper> medias = new ArrayList<MediaWrapper>(); if (sku.getSkuMedia() != null && ! sku.getSkuMedia().isEmpty()) { for (Media media : sku.getSkuMedia().values()) { MediaWrapper wrapper = (MediaWrapper)context.getBean(MediaWrapper.class.getName()); wrapper.wrapSummary(media, request); if (wrapper.isAllowOverrideUrl()){ wrapper.setUrl(getStaticAssetService().convertAssetPath(media.getUrl(), request.getContextPath(), request.isSecure())); } medias.add(wrapper); } } return medias; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Sku with Id " + id + " could not be found").build()); } public SkuWrapper findSkuById(HttpServletRequest request, Long id) { Sku sku = catalogService.findSkuById(id); if (sku != null) { SkuWrapper wrapper = (SkuWrapper)context.getBean(SkuWrapper.class.getName()); wrapper.wrapDetails(sku, request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Sku with Id " + id + " could not be found").build()); } public List<MediaWrapper> findMediaForProduct(HttpServletRequest request, Long id) { Product product = catalogService.findProductById(id); if (product != null) { ArrayList<MediaWrapper> out = new ArrayList<MediaWrapper>(); Map<String, Media> media = product.getMedia(); if (media != null) { for (Media med : media.values()) { MediaWrapper wrapper = (MediaWrapper)context.getBean(MediaWrapper.class.getName()); wrapper.wrapSummary(med, request); if (wrapper.isAllowOverrideUrl()){ wrapper.setUrl(getStaticAssetService().convertAssetPath(med.getUrl(), request.getContextPath(), request.isSecure())); } out.add(wrapper); } } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } public List<MediaWrapper> findMediaForCategory(HttpServletRequest request, Long id) { Category category = catalogService.findCategoryById(id); if (category != null) { ArrayList<MediaWrapper> out = new ArrayList<MediaWrapper>(); Map<String, Media> media = category.getCategoryMedia(); for (Media med : media.values()) { MediaWrapper wrapper = (MediaWrapper)context.getBean(MediaWrapper.class.getName()); wrapper.wrapSummary(med, request); out.add(wrapper); } return out; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Category with Id " + id + " could not be found").build()); } public CategoriesWrapper findParentCategoriesForProduct(HttpServletRequest request, Long id) { Product product = catalogService.findProductById(id); if (product != null) { CategoriesWrapper wrapper = (CategoriesWrapper)context.getBean(CategoriesWrapper.class.getName()); List<Category> categories = new ArrayList<Category>(); for (CategoryProductXref categoryXref : product.getAllParentCategoryXrefs()) { categories.add(categoryXref.getCategory()); } wrapper.wrapDetails(categories, request); return wrapper; } throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).type(MediaType.TEXT_PLAIN).entity("Product with Id " + id + " could not be found").build()); } protected StaticAssetService getStaticAssetService() { if (staticAssetService == null) { staticAssetService = (StaticAssetService)this.context.getBean("blStaticAssetService"); } return staticAssetService; } public static void main(String[] args) { System.out.println(StringUtils.isNotEmpty(null)); } }
0true
core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_endpoint_catalog_CatalogEndpoint.java
407
@Component("blEntityConfiguration") public class EntityConfiguration implements ApplicationContextAware { private static final Log LOG = LogFactory.getLog(EntityConfiguration.class); private ApplicationContext webApplicationContext; private final HashMap<String, Class<?>> entityMap = new HashMap<String, Class<?>>(50); private ApplicationContext applicationcontext; private Resource[] entityContexts; @javax.annotation.Resource(name="blMergedEntityContexts") protected Set<String> mergedEntityContexts; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.webApplicationContext = applicationContext; } @PostConstruct public void configureMergedItems() { Set<Resource> temp = new LinkedHashSet<Resource>(); if (mergedEntityContexts != null && !mergedEntityContexts.isEmpty()) { for (String location : mergedEntityContexts) { temp.add(webApplicationContext.getResource(location)); } } if (entityContexts != null) { for (Resource resource : entityContexts) { temp.add(resource); } } entityContexts = temp.toArray(new Resource[temp.size()]); applicationcontext = new GenericXmlApplicationContext(entityContexts); } public Class<?> lookupEntityClass(String beanId) { Class<?> clazz; if (entityMap.containsKey(beanId)) { clazz = entityMap.get(beanId); } else { Object object = applicationcontext.getBean(beanId); clazz = object.getClass(); entityMap.put(beanId, clazz); } if (LOG.isDebugEnabled()) { LOG.debug("Returning class (" + clazz.getName() + ") configured with bean id (" + beanId + ')'); } return clazz; } public String[] getEntityBeanNames() { return applicationcontext.getBeanDefinitionNames(); } public <T> Class<T> lookupEntityClass(String beanId, Class<T> resultClass) { Class<T> clazz; if (entityMap.containsKey(beanId)) { clazz = (Class<T>) entityMap.get(beanId); } else { Object object = applicationcontext.getBean(beanId); clazz = (Class<T>) object.getClass(); entityMap.put(beanId, clazz); } if (LOG.isDebugEnabled()) { LOG.debug("Returning class (" + clazz.getName() + ") configured with bean id (" + beanId + ')'); } return clazz; } public Object createEntityInstance(String beanId) { Object bean = applicationcontext.getBean(beanId); if (LOG.isDebugEnabled()) { LOG.debug("Returning instance of class (" + bean.getClass().getName() + ") configured with bean id (" + beanId + ')'); } return bean; } public <T> T createEntityInstance(String beanId, Class<T> resultClass) { T bean = (T) applicationcontext.getBean(beanId); if (LOG.isDebugEnabled()) { LOG.debug("Returning instance of class (" + bean.getClass().getName() + ") configured with bean id (" + beanId + ')'); } return bean; } public Resource[] getEntityContexts() { return entityContexts; } public void setEntityContexts(Resource[] entityContexts) { this.entityContexts = entityContexts; } }
0true
common_src_main_java_org_broadleafcommerce_common_persistence_EntityConfiguration.java
183
static final class Node { final boolean isData; // false if this is a request node volatile Object item; // initially non-null if isData; CASed to match volatile Node next; volatile Thread waiter; // null until waiting // CAS methods for fields final boolean casNext(Node cmp, Node val) { return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); } final boolean casItem(Object cmp, Object val) { // assert cmp == null || cmp.getClass() != Node.class; return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); } /** * Constructs a new node. Uses relaxed write because item can * only be seen after publication via casNext. */ Node(Object item, boolean isData) { UNSAFE.putObject(this, itemOffset, item); // relaxed write this.isData = isData; } /** * Links node to itself to avoid garbage retention. Called * only after CASing head field, so uses relaxed write. */ final void forgetNext() { UNSAFE.putObject(this, nextOffset, this); } /** * Sets item to self and waiter to null, to avoid garbage * retention after matching or cancelling. Uses relaxed writes * because order is already constrained in the only calling * contexts: item is forgotten only after volatile/atomic * mechanics that extract items. Similarly, clearing waiter * follows either CAS or return from park (if ever parked; * else we don't care). */ final void forgetContents() { UNSAFE.putObject(this, itemOffset, this); UNSAFE.putObject(this, waiterOffset, null); } /** * Returns true if this node has been matched, including the * case of artificial matches due to cancellation. */ final boolean isMatched() { Object x = item; return (x == this) || ((x == null) == isData); } /** * Returns true if this is an unmatched request node. */ final boolean isUnmatchedRequest() { return !isData && item == null; } /** * Returns true if a node with the given mode cannot be * appended to this node because this node is unmatched and * has opposite data mode. */ final boolean cannotPrecede(boolean haveData) { boolean d = isData; Object x; return d != haveData && (x = item) != this && (x != null) == d; } /** * Tries to artificially match a data node -- used by remove. */ final boolean tryMatchData() { // assert isData; Object x = item; if (x != null && x != this && casItem(x, null)) { LockSupport.unpark(waiter); return true; } return false; } private static final long serialVersionUID = -3375979862319811754L; // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long itemOffset; private static final long nextOffset; private static final long waiterOffset; static { try { UNSAFE = getUnsafe(); Class<?> k = Node.class; itemOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("item")); nextOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("next")); waiterOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("waiter")); } catch (Exception e) { throw new Error(e); } } }
0true
src_main_java_jsr166y_LinkedTransferQueue.java
2,359
class KeyValueSourceFacade<K, V> extends KeyValueSource<K, V> { private static final int UPDATE_PROCESSED_RECORDS_INTERVAL = 1000; private final ILogger logger; private final KeyValueSource<K, V> keyValueSource; private final JobSupervisor supervisor; private int processedRecords; KeyValueSourceFacade(KeyValueSource<K, V> keyValueSource, JobSupervisor supervisor) { this.keyValueSource = keyValueSource; this.supervisor = supervisor; this.logger = supervisor.getMapReduceService().getNodeEngine().getLogger(KeyValueSourceFacade.class); } @Override public boolean open(NodeEngine nodeEngine) { return keyValueSource.open(nodeEngine); } @Override public boolean hasNext() { return keyValueSource.hasNext(); } @Override public K key() { K key = keyValueSource.key(); processedRecords++; if (processedRecords == UPDATE_PROCESSED_RECORDS_INTERVAL) { notifyProcessStats(); processedRecords = 0; } return key; } @Override public Map.Entry<K, V> element() { return keyValueSource.element(); } @Override public boolean reset() { processedRecords = 0; return keyValueSource.reset(); } @Override public boolean isAllKeysSupported() { return keyValueSource.isAllKeysSupported(); } @Override protected Collection<K> getAllKeys0() { return keyValueSource.getAllKeys(); } @Override public void close() throws IOException { notifyProcessStats(); keyValueSource.close(); } private void notifyProcessStats() { if (processedRecords > 0) { try { MapReduceService mapReduceService = supervisor.getMapReduceService(); String name = supervisor.getConfiguration().getName(); String jobId = supervisor.getConfiguration().getJobId(); Address jobOwner = supervisor.getJobOwner(); mapReduceService.processRequest(jobOwner, new ProcessStatsUpdateOperation(name, jobId, processedRecords), name); } catch (Exception ignore) { // Don't care if wasn't executed properly logger.finest("ProcessedRecords update couldn't be executed", ignore); } } } }
1no label
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_KeyValueSourceFacade.java
639
public class CollectionAddAllBackupOperation extends CollectionOperation implements BackupOperation { protected Map<Long, Data> valueMap; public CollectionAddAllBackupOperation() { } public CollectionAddAllBackupOperation(String name, Map<Long, Data> valueMap) { super(name); this.valueMap = valueMap; } @Override public int getId() { return CollectionDataSerializerHook.COLLECTION_ADD_ALL_BACKUP; } @Override public void beforeRun() throws Exception { } @Override public void run() throws Exception { getOrCreateContainer().addAllBackup(valueMap); } @Override public void afterRun() throws Exception { } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { super.writeInternal(out); out.writeInt(valueMap.size()); for (Map.Entry<Long, Data> entry : valueMap.entrySet()) { out.writeLong(entry.getKey()); entry.getValue().writeData(out); } } @Override protected void readInternal(ObjectDataInput in) throws IOException { super.readInternal(in); final int size = in.readInt(); valueMap = new HashMap<Long, Data>(size); for (int i = 0; i < size; i++) { final long itemId = in.readLong(); final Data value = new Data(); value.readData(in); valueMap.put(itemId, value); } } }
0true
hazelcast_src_main_java_com_hazelcast_collection_CollectionAddAllBackupOperation.java
234
highlighter = new XPostingsHighlighter() { Iterator<String> valuesIterator = Arrays.asList(firstValue, secondValue, thirdValue).iterator(); Iterator<Integer> offsetsIterator = Arrays.asList(0, firstValue.length() + 1, firstValue.length() + secondValue.length() + 2).iterator(); @Override protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException { return new String[][]{new String[]{valuesIterator.next()}}; } @Override protected int getOffsetForCurrentValue(String field, int docId) { return offsetsIterator.next(); } @Override protected BreakIterator getBreakIterator(String field) { return new WholeBreakIterator(); } };
0true
src_test_java_org_apache_lucene_search_postingshighlight_XPostingsHighlighterTests.java
1,257
METRIC_TYPE.COUNTER, new OProfilerHookValue() { public Object getValue() { return metricReusedPages; } });
0true
core_src_main_java_com_orientechnologies_orient_core_storage_fs_OMMapManagerOld.java
878
@RunWith(HazelcastParallelClassRunner.class) @Category(QuickTest.class) public class CountDownLatchTest extends HazelcastTestSupport { @Test @ClientCompatibleTest public void testSimpleUsage() throws InterruptedException { final int k = 5; final Config config = new Config(); TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(k); final HazelcastInstance[] instances = factory.newInstances(config); ICountDownLatch latch = instances[0].getCountDownLatch("test"); latch.trySetCount(k - 1); assertEquals(k - 1, latch.getCount()); new Thread() { public void run() { for (int i = 1; i < k; i++) { try { sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } final ICountDownLatch l = instances[i].getCountDownLatch("test"); l.countDown(); assertEquals(k - 1 - i, l.getCount()); } } }.start(); assertTrue(latch.await(5000, TimeUnit.MILLISECONDS)); assertEquals(0, latch.getCount()); } @Test @ClientCompatibleTest public void testAwaitFail() throws InterruptedException { final int k = 3; final Config config = new Config(); TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(k); final HazelcastInstance[] instances = factory.newInstances(config); ICountDownLatch latch = instances[0].getCountDownLatch("test"); latch.trySetCount(k - 1); long t = System.currentTimeMillis(); assertFalse(latch.await(100, TimeUnit.MILLISECONDS)); final long elapsed = System.currentTimeMillis() - t; assertTrue(elapsed >= 100); } @Test(expected = DistributedObjectDestroyedException.class) @ClientCompatibleTest public void testLatchDestroyed() throws Exception { TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2); HazelcastInstance hz1 = factory.newHazelcastInstance(); HazelcastInstance hz2 = factory.newHazelcastInstance(); final ICountDownLatch latch = hz1.getCountDownLatch("test"); latch.trySetCount(2); new Thread() { public void run() { try { sleep(1000); } catch (InterruptedException e) { return; } latch.destroy(); } }.start(); hz2.getCountDownLatch("test").await(5, TimeUnit.SECONDS); } @Test @ClientCompatibleTest public void testLatchMigration() throws InterruptedException { TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(5); HazelcastInstance hz1 = factory.newHazelcastInstance(); HazelcastInstance hz2 = factory.newHazelcastInstance(); warmUpPartitions(hz2, hz1); ICountDownLatch latch1 = hz1.getCountDownLatch("test"); latch1.trySetCount(10); Thread.sleep(500); ICountDownLatch latch2 = hz2.getCountDownLatch("test"); assertEquals(10, latch2.getCount()); latch2.countDown(); assertEquals(9, latch1.getCount()); hz1.shutdown(); assertEquals(9, latch2.getCount()); HazelcastInstance hz3 = factory.newHazelcastInstance(); warmUpPartitions(hz3); ICountDownLatch latch3 = hz3.getCountDownLatch("test"); latch3.countDown(); assertEquals(8, latch3.getCount()); hz2.shutdown(); latch3.countDown(); assertEquals(7, latch3.getCount()); HazelcastInstance hz4 = factory.newHazelcastInstance(); HazelcastInstance hz5 = factory.newHazelcastInstance(); warmUpPartitions(hz5, hz4); Thread.sleep(250); hz3.shutdown(); ICountDownLatch latch4 = hz4.getCountDownLatch("test"); assertEquals(7, latch4.getCount()); ICountDownLatch latch5 = hz5.getCountDownLatch("test"); latch5.countDown(); assertEquals(6, latch5.getCount()); latch5.countDown(); assertEquals(5, latch4.getCount()); assertEquals(5, latch5.getCount()); } }
0true
hazelcast_src_test_java_com_hazelcast_concurrent_countdownlatch_CountDownLatchTest.java
1,142
private static class TestInitializingObjectService implements RemoteService { static final String NAME = "TestInitializingObjectService"; public DistributedObject createDistributedObject(final String objectName) { return new TestInitializingObject(objectName); } public void destroyDistributedObject(final String objectName) { } }
0true
hazelcast_src_test_java_com_hazelcast_core_DistributedObjectTest.java
5,440
public class ScriptDoubleValues extends DoubleValues implements ScriptValues { final SearchScript script; private Object value; private double[] values = new double[4]; private int valueCount; private int valueOffset; public ScriptDoubleValues(SearchScript script) { super(true); // assume multi-valued this.script = script; } @Override public SearchScript script() { return script; } @Override public int setDocument(int docId) { this.docId = docId; script.setNextDocId(docId); value = script.run(); if (value == null) { valueCount = 0; } else if (value instanceof Number) { valueCount = 1; values[0] = ((Number) value).doubleValue(); } else if (value.getClass().isArray()) { valueCount = Array.getLength(value); values = ArrayUtil.grow(values, valueCount); for (int i = 0; i < valueCount; ++i) { values[i] = ((Number) Array.get(value, i)).doubleValue(); } } else if (value instanceof Collection) { valueCount = ((Collection<?>) value).size(); int i = 0; for (Iterator<?> it = ((Collection<?>) value).iterator(); it.hasNext(); ++i) { values[i] = ((Number) it.next()).doubleValue(); } assert i == valueCount; } else { throw new AggregationExecutionException("Unsupported script value [" + value + "]"); } valueOffset = 0; return valueCount; } @Override public double nextValue() { assert valueOffset < valueCount; return values[valueOffset++]; } }
1no label
src_main_java_org_elasticsearch_search_aggregations_support_numeric_ScriptDoubleValues.java
1,124
public class OSQLFunctionIf extends OSQLFunctionAbstract { public static final String NAME = "if"; public OSQLFunctionIf() { super(NAME, 2, 3); } @Override public Object execute(final OIdentifiable iCurrentRecord, final Object iCurrentResult, final Object[] iFuncParams, final OCommandContext iContext) { boolean result; try { Object condition = iFuncParams[0]; if (condition instanceof Boolean) result = (Boolean) condition; else if (condition instanceof String) result = Boolean.parseBoolean(condition.toString()); else if (condition instanceof Number) result = ((Number) condition).intValue() > 0; else return null; return result ? iFuncParams[1] : iFuncParams[2]; } catch (Exception e) { return null; } } @Override public String getSyntax() { return "Syntax error: if(<field|value|expression>, <return_value_if_true> [,<return_value_if_false>])"; } }
1no label
core_src_main_java_com_orientechnologies_orient_core_sql_functions_misc_OSQLFunctionIf.java
2,361
public class MapCombineTask<KeyIn, ValueIn, KeyOut, ValueOut, Chunk> { private final AtomicBoolean cancelled = new AtomicBoolean(); private final Mapper<KeyIn, ValueIn, KeyOut, ValueOut> mapper; private final MappingPhase<KeyIn, ValueIn, KeyOut, ValueOut> mappingPhase; private final KeyValueSource<KeyIn, ValueIn> keyValueSource; private final MapReduceService mapReduceService; private final JobSupervisor supervisor; private final NodeEngine nodeEngine; private final String name; private final String jobId; private final int chunkSize; public MapCombineTask(JobTaskConfiguration configuration, JobSupervisor supervisor, MappingPhase<KeyIn, ValueIn, KeyOut, ValueOut> mappingPhase) { this.mappingPhase = mappingPhase; this.supervisor = supervisor; this.mapper = configuration.getMapper(); this.name = configuration.getName(); this.jobId = configuration.getJobId(); this.chunkSize = configuration.getChunkSize(); this.nodeEngine = configuration.getNodeEngine(); this.mapReduceService = supervisor.getMapReduceService(); this.keyValueSource = configuration.getKeyValueSource(); } public String getName() { return name; } public String getJobId() { return jobId; } public int getChunkSize() { return chunkSize; } public void cancel() { cancelled.set(true); mappingPhase.cancel(); } public void process() { ExecutorService es = mapReduceService.getExecutorService(name); if (keyValueSource instanceof PartitionIdAware) { es.submit(new PartitionProcessor()); } else { es.submit(new SingleExecutionProcessor()); } } public final void processMapping(int partitionId, DefaultContext<KeyOut, ValueOut> context, KeyValueSource<KeyIn, ValueIn> keyValueSource) throws Exception { context.setPartitionId(partitionId); if (mapper instanceof LifecycleMapper) { ((LifecycleMapper) mapper).initialize(context); } mappingPhase.executeMappingPhase(keyValueSource, mapper, context); if (mapper instanceof LifecycleMapper) { ((LifecycleMapper) mapper).finalized(context); } if (cancelled.get()) { return; } } void onEmit(DefaultContext<KeyOut, ValueOut> context, int partitionId) { // If we have a reducer let's test for chunk size otherwise // we need to collect all values locally and wait for final request if (supervisor.getConfiguration().getReducerFactory() != null) { if (context.getCollected() == chunkSize) { Map<KeyOut, Chunk> chunkMap = context.requestChunk(); // Wrap into IntermediateChunkNotification object Map<Address, Map<KeyOut, Chunk>> mapping = mapResultToMember(supervisor, chunkMap); // Register remote addresses and partitionId for receiving reducer events supervisor.registerReducerEventInterests(partitionId, mapping.keySet()); for (Map.Entry<Address, Map<KeyOut, Chunk>> entry : mapping.entrySet()) { mapReduceService.sendNotification(entry.getKey(), new IntermediateChunkNotification(entry.getKey(), name, jobId, entry.getValue(), partitionId)); } } } } private void finalizeMapping(int partitionId, DefaultContext<KeyOut, ValueOut> context) throws Exception { RequestPartitionResult result = mapReduceService .processRequest(supervisor.getJobOwner(), new RequestPartitionReducing(name, jobId, partitionId), name); if (result.getResultState() == SUCCESSFUL) { // If we have a reducer defined just send it over if (supervisor.getConfiguration().getReducerFactory() != null) { Map<KeyOut, Chunk> chunkMap = context.finish(); if (chunkMap.size() > 0) { sendLastChunkToAssignedReducers(partitionId, chunkMap); } else { finalizeProcessing(partitionId); } } } } private void finalizeProcessing(int partitionId) throws Exception { // If nothing to reduce we just set partition to processed RequestPartitionResult result = mapReduceService .processRequest(supervisor.getJobOwner(), new RequestPartitionProcessed(name, jobId, partitionId, REDUCING), name); if (result.getResultState() != SUCCESSFUL) { throw new RuntimeException("Could not finalize processing for partitionId " + partitionId); } } private void sendLastChunkToAssignedReducers(int partitionId, Map<KeyOut, Chunk> chunkMap) { Address sender = mapReduceService.getLocalAddress(); // Wrap into LastChunkNotification object Map<Address, Map<KeyOut, Chunk>> mapping = mapResultToMember(supervisor, chunkMap); // Register remote addresses and partitionId for receiving reducer events supervisor.registerReducerEventInterests(partitionId, mapping.keySet()); // Send LastChunk notifications for (Map.Entry<Address, Map<KeyOut, Chunk>> entry : mapping.entrySet()) { Address receiver = entry.getKey(); Map<KeyOut, Chunk> chunk = entry.getValue(); mapReduceService .sendNotification(receiver, new LastChunkNotification(receiver, name, jobId, sender, partitionId, chunk)); } // Send LastChunk notification to notify reducers that received at least one chunk Set<Address> addresses = mapping.keySet(); Collection<Address> reducerInterests = supervisor.getReducerEventInterests(partitionId); if (reducerInterests != null) { for (Address address : reducerInterests) { if (!addresses.contains(address)) { mapReduceService.sendNotification(address, new LastChunkNotification(address, name, jobId, sender, partitionId, Collections.emptyMap())); } } } } private void postponePartitionProcessing(int partitionId) throws Exception { RequestPartitionResult result = mapReduceService .processRequest(supervisor.getJobOwner(), new PostPonePartitionProcessing(name, jobId, partitionId), name); if (result.getResultState() != SUCCESSFUL) { throw new RuntimeException("Could not postpone processing for partitionId " + partitionId + " -> " + result.getResultState()); } } private void handleProcessorThrowable(Throwable t) { notifyRemoteException(supervisor, t); if (t instanceof Error) { ExceptionUtil.sneakyThrow(t); } } /** * This class implements the partitionId based mapping phase */ private class PartitionProcessor implements Runnable { @Override public void run() { KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource; if (supervisor.getConfiguration().isCommunicateStats()) { delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor); } while (true) { if (cancelled.get()) { return; } Integer partitionId = findNewPartitionProcessing(); if (partitionId == null) { // Job's done return; } // Migration event occurred, just retry if (partitionId == -1) { continue; } try { // This call cannot be delegated ((PartitionIdAware) keyValueSource).setPartitionId(partitionId); delegate.reset(); if (delegate.open(nodeEngine)) { DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this); processMapping(partitionId, context, delegate); delegate.close(); finalizeMapping(partitionId, context); } else { // Partition assignment might not be ready yet, postpone the processing and retry later postponePartitionProcessing(partitionId); } } catch (Throwable t) { handleProcessorThrowable(t); } } } private Integer findNewPartitionProcessing() { try { RequestPartitionResult result = mapReduceService .processRequest(supervisor.getJobOwner(), new RequestPartitionMapping(name, jobId), name); // JobSupervisor doesn't exists anymore on jobOwner, job done? if (result.getResultState() == NO_SUPERVISOR) { return null; } else if (result.getResultState() == CHECK_STATE_FAILED) { // retry return -1; } else if (result.getResultState() == NO_MORE_PARTITIONS) { return null; } else { return result.getPartitionId(); } } catch (Exception e) { throw new RuntimeException(e); } } } /** * This class implements the non partitionId based mapping phase */ private class SingleExecutionProcessor implements Runnable { @Override public void run() { try { RequestPartitionResult result = mapReduceService .processRequest(supervisor.getJobOwner(), new RequestMemberIdAssignment(name, jobId), name); // JobSupervisor doesn't exists anymore on jobOwner, job done? if (result.getResultState() == NO_SUPERVISOR) { return; } else if (result.getResultState() == NO_MORE_PARTITIONS) { return; } int partitionId = result.getPartitionId(); KeyValueSource<KeyIn, ValueIn> delegate = keyValueSource; if (supervisor.getConfiguration().isCommunicateStats()) { delegate = new KeyValueSourceFacade<KeyIn, ValueIn>(keyValueSource, supervisor); } delegate.reset(); if (delegate.open(nodeEngine)) { DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(MapCombineTask.this); processMapping(partitionId, context, delegate); delegate.close(); finalizeMapping(partitionId, context); } else { // Partition assignment might not be ready yet, postpone the processing and retry later postponePartitionProcessing(partitionId); } } catch (Throwable t) { handleProcessorThrowable(t); } } } }
1no label
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_MapCombineTask.java
2,157
static class IteratorBasedIterator extends DocIdSetIterator { int lastReturn = -1; private DocIdSetIterator[] iterators = null; private final long cost; IteratorBasedIterator(DocIdSet[] sets) throws IOException { iterators = new DocIdSetIterator[sets.length]; int j = 0; long cost = Integer.MAX_VALUE; for (DocIdSet set : sets) { if (set == null) { lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching break; } else { DocIdSetIterator dcit = set.iterator(); if (dcit == null) { lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching break; } iterators[j++] = dcit; cost = Math.min(cost, dcit.cost()); } } this.cost = cost; if (lastReturn != DocIdSetIterator.NO_MORE_DOCS) { lastReturn = (iterators.length > 0 ? -1 : DocIdSetIterator.NO_MORE_DOCS); } } @Override public final int docID() { return lastReturn; } @Override public final int nextDoc() throws IOException { if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS; DocIdSetIterator dcit = iterators[0]; int target = dcit.nextDoc(); int size = iterators.length; int skip = 0; int i = 1; while (i < size) { if (i != skip) { dcit = iterators[i]; int docid = dcit.advance(target); if (docid > target) { target = docid; if (i != 0) { skip = i; i = 0; continue; } else skip = 0; } } i++; } return (lastReturn = target); } @Override public final int advance(int target) throws IOException { if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS; DocIdSetIterator dcit = iterators[0]; target = dcit.advance(target); int size = iterators.length; int skip = 0; int i = 1; while (i < size) { if (i != skip) { dcit = iterators[i]; int docid = dcit.advance(target); if (docid > target) { target = docid; if (i != 0) { skip = i; i = 0; continue; } else { skip = 0; } } } i++; } return (lastReturn = target); } @Override public long cost() { return cost; } }
1no label
src_main_java_org_elasticsearch_common_lucene_docset_AndDocIdSet.java
53
public class SchemaViolationException extends TitanException { public SchemaViolationException(String msg) { super(msg); } public SchemaViolationException(String msg, Object... args) { super(String.format(msg,args)); } }
0true
titan-core_src_main_java_com_thinkaurelius_titan_core_SchemaViolationException.java
1,544
public class OObjectSerializerManager implements OObjectSerializerHelperInterface { private static final OObjectSerializerManager instance = new OObjectSerializerManager(); public ODocument toStream(Object iPojo, ODocument iRecord, OEntityManager iEntityManager, OClass schemaClass, OUserObject2RecordHandler iObj2RecHandler, ODatabaseObject db, boolean iSaveOnlyDirty) { return OObjectSerializerHelper.toStream(iPojo, iRecord, iEntityManager, schemaClass, iObj2RecHandler, db, iSaveOnlyDirty); } public String getDocumentBoundField(Class<?> iClass) { return OObjectSerializerHelper.getDocumentBoundField(iClass); } public Object getFieldValue(Object iPojo, String iProperty) { return OObjectSerializerHelper.getFieldValue(iPojo, iProperty); } public void invokeCallback(Object iPojo, ODocument iDocument, Class<?> iAnnotation) { OObjectSerializerHelper.invokeCallback(iPojo, iDocument, iAnnotation); } public static OObjectSerializerManager getInstance() { return instance; } }
0true
object_src_main_java_com_orientechnologies_orient_object_serialization_OObjectSerializerManager.java
2,113
public static final DurationFieldType Quarters = new DurationFieldType("quarters") { private static final long serialVersionUID = -8167713675442491871L; public DurationField getField(Chronology chronology) { return new ScaledDurationField(chronology.months(), Quarters, 3); } };
0true
src_main_java_org_elasticsearch_common_joda_Joda.java
236
public class CassandraTransactionTest { /* testRead/WriteConsistencyLevel have unnecessary code duplication * that could be avoided by creating a common helper method that takes * a ConfigOption parameter and a function that converts a * CassandraTransaction to a consistency level by calling either * ct.getReadConsistencyLevel() or .getWriteConsistencyLevel(), * but it doesn't seem worth the complexity. */ @Test public void testWriteConsistencyLevel() { int levelsChecked = 0; // Test whether CassandraTransaction honors the write consistency level option for (CLevel writeLevel : CLevel.values()) { StandardBaseTransactionConfig.Builder b = new StandardBaseTransactionConfig.Builder(); ModifiableConfiguration mc = GraphDatabaseConfiguration.buildConfiguration(); mc.set(CASSANDRA_WRITE_CONSISTENCY, writeLevel.name()); b.customOptions(mc); b.timestampProvider(Timestamps.MICRO); CassandraTransaction ct = new CassandraTransaction(b.build()); assertEquals(writeLevel, ct.getWriteConsistencyLevel()); levelsChecked++; } // Sanity check: if CLevel.values was empty, something is wrong with the test Preconditions.checkState(0 < levelsChecked); } @Test public void testReadConsistencyLevel() { int levelsChecked = 0; // Test whether CassandraTransaction honors the write consistency level option for (CLevel writeLevel : CLevel.values()) { StandardBaseTransactionConfig.Builder b = new StandardBaseTransactionConfig.Builder(); ModifiableConfiguration mc = GraphDatabaseConfiguration.buildConfiguration(); mc.set(CASSANDRA_READ_CONSISTENCY, writeLevel.name()); b.timestampProvider(Timestamps.MICRO); b.customOptions(mc); CassandraTransaction ct = new CassandraTransaction(b.build()); assertEquals(writeLevel, ct.getReadConsistencyLevel()); levelsChecked++; } // Sanity check: if CLevel.values was empty, something is wrong with the test Preconditions.checkState(0 < levelsChecked); } @Test public void testTimestampProvider() { BaseTransactionConfig txcfg = StandardBaseTransactionConfig.of(Timestamps.NANO); CassandraTransaction ct = new CassandraTransaction(txcfg); assertEquals(Timestamps.NANO, ct.getConfiguration().getTimestampProvider()); txcfg = StandardBaseTransactionConfig.of(Timestamps.MICRO); ct = new CassandraTransaction(txcfg); assertEquals(Timestamps.MICRO, ct.getConfiguration().getTimestampProvider()); txcfg = StandardBaseTransactionConfig.of(Timestamps.MILLI); ct = new CassandraTransaction(txcfg); assertEquals(Timestamps.MILLI, ct.getConfiguration().getTimestampProvider()); } }
0true
titan-cassandra_src_test_java_com_thinkaurelius_titan_diskstorage_cassandra_CassandraTransactionTest.java
2,048
public interface ProviderBinding<T extends Provider<?>> extends Binding<T> { /** * Returns the key whose binding is used to {@link Provider#get provide instances}. That binding * can be retrieved from an injector using {@link org.elasticsearch.common.inject.Injector#getBinding(Key) * Injector.getBinding(providedKey)} */ Key<?> getProvidedKey(); }
0true
src_main_java_org_elasticsearch_common_inject_spi_ProviderBinding.java
1,548
CollectionUtils.collect(activities, new Transformer() { @Override public Object transform(Object input) { return ((Activity) input).getBeanName(); } }, activityNames);
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_workflow_BaseProcessor.java
1,140
public class ValidateAddRequestActivity extends BaseActivity<CartOperationContext> { @Resource(name = "blOrderService") protected OrderService orderService; @Resource(name = "blCatalogService") protected CatalogService catalogService; @Resource(name = "blProductOptionValidationService") protected ProductOptionValidationService productOptionValidationService; @Override public CartOperationContext execute(CartOperationContext context) throws Exception { CartOperationRequest request = context.getSeedData(); OrderItemRequestDTO orderItemRequestDTO = request.getItemRequest(); // Quantity was not specified or was equal to zero. We will not throw an exception, // but we will preven the workflow from continuing to execute if (orderItemRequestDTO.getQuantity() == null || orderItemRequestDTO.getQuantity() == 0) { context.stopProcess(); return null; } // Throw an exception if the user tried to add a negative quantity of something if (orderItemRequestDTO.getQuantity() < 0) { throw new IllegalArgumentException("Quantity cannot be negative"); } // Throw an exception if the user did not specify an order to add the item to if (request.getOrder() == null) { throw new IllegalArgumentException("Order is required when adding item to order"); } // Validate that if the user specified a productId, it is a legitimate productId Product product = null; if (orderItemRequestDTO.getProductId() != null) { product = catalogService.findProductById(orderItemRequestDTO.getProductId()); if (product == null) { throw new IllegalArgumentException("Product was specified but no matching product was found for productId " + orderItemRequestDTO.getProductId()); } } Sku sku = determineSku(product, orderItemRequestDTO.getSkuId(), orderItemRequestDTO.getItemAttributes()); // If we couldn't find a sku, then we're unable to add to cart. if (sku == null && !(orderItemRequestDTO instanceof NonDiscreteOrderItemRequestDTO)) { StringBuilder sb = new StringBuilder(); for (Entry<String, String> entry : orderItemRequestDTO.getItemAttributes().entrySet()) { sb.append(entry.toString()); } throw new IllegalArgumentException("Could not find SKU for :" + " productId: " + (product == null ? "null" : product.getId()) + " skuId: " + orderItemRequestDTO.getSkuId() + " attributes: " + sb.toString()); } else if (sku == null) { NonDiscreteOrderItemRequestDTO ndr = (NonDiscreteOrderItemRequestDTO) orderItemRequestDTO; if (StringUtils.isBlank(ndr.getItemName())) { throw new IllegalArgumentException("Item name is required for non discrete order item add requests"); } if (ndr.getOverrideRetailPrice() == null && ndr.getOverrideSalePrice() == null) { throw new IllegalArgumentException("At least one override price is required for non discrete order item add requests"); } } else if (!sku.isActive()) { throw new IllegalArgumentException("The requested skuId of " + sku.getId() + " is no longer active"); } else { // We know definitively which sku we're going to add, so we can set this // value with certainty request.getItemRequest().setSkuId(sku.getId()); } if (!(orderItemRequestDTO instanceof NonDiscreteOrderItemRequestDTO) && request.getOrder().getCurrency() != null && sku.getCurrency() != null && !request.getOrder().getCurrency().equals(sku.getCurrency())) { throw new IllegalArgumentException("Cannot have items with differing currencies in one cart"); } return context; } protected Sku determineSku(Product product, Long skuId, Map<String,String> attributeValues) { // Check whether the sku is correct given the product options. Sku sku = findMatchingSku(product, attributeValues); if (sku == null && skuId != null) { sku = catalogService.findSkuById(skuId); } if (sku == null && product != null) { // Set to the default sku if (product.getAdditionalSkus() != null && product.getAdditionalSkus().size() > 0 && !product.getCanSellWithoutOptions()) { throw new RequiredAttributeNotProvidedException("Unable to find non-default sku matching given options and cannot sell default sku"); } sku = product.getDefaultSku(); } return sku; } protected Sku findMatchingSku(Product product, Map<String,String> attributeValues) { Map<String, String> attributeValuesForSku = new HashMap<String,String>(); // Verify that required product-option values were set. if (product != null && product.getProductOptions() != null && product.getProductOptions().size() > 0) { for (ProductOption productOption : product.getProductOptions()) { if (productOption.getRequired()) { if (StringUtils.isEmpty(attributeValues.get(productOption.getAttributeName()))) { throw new RequiredAttributeNotProvidedException("Unable to add to product ("+ product.getId() +") cart. Required attribute was not provided: " + productOption.getAttributeName()); } else if (productOption.getUseInSkuGeneration()) { attributeValuesForSku.put(productOption.getAttributeName(), attributeValues.get(productOption.getAttributeName())); } } if (!productOption.getRequired() && StringUtils.isEmpty(attributeValues.get(productOption.getAttributeName()))) { //if the productoption is not required, and user has not set the optional value, then we dont need to validate } else if (productOption.getProductOptionValidationType() != null) { productOptionValidationService.validate(productOption, attributeValues.get(productOption.getAttributeName())); } } if (product !=null && product.getSkus() != null) { for (Sku sku : product.getSkus()) { if (checkSkuForMatch(sku, attributeValuesForSku)) { return sku; } } } } return null; } protected boolean checkSkuForMatch(Sku sku, Map<String,String> attributeValues) { if (attributeValues == null || attributeValues.size() == 0) { return false; } for (String attributeName : attributeValues.keySet()) { boolean optionValueMatchFound = false; for (ProductOptionValue productOptionValue : sku.getProductOptionValues()) { if (productOptionValue.getProductOption().getAttributeName().equals(attributeName)) { if (productOptionValue.getAttributeValue().equals(attributeValues.get(attributeName))) { optionValueMatchFound = true; break; } else { return false; } } } if (optionValueMatchFound) { continue; } else { return false; } } return true; } }
0true
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_workflow_add_ValidateAddRequestActivity.java
1,303
@Test public class LocalPaginatedStorageUpdateCrashRestore { private ODatabaseDocumentTx baseDocumentTx; private ODatabaseDocumentTx testDocumentTx; private File buildDir; private int idGen = 0; private OLockManager<Integer, Thread> idLockManager = new OLockManager<Integer, Thread>(true, 1000); private ExecutorService executorService = Executors.newCachedThreadPool(); private Process process; @BeforeClass public void beforeClass() throws Exception { OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false); OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0); OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false); OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0); String buildDirectory = System.getProperty("buildDirectory", "."); buildDirectory += "/localPaginatedStorageUpdateCrashRestore"; buildDir = new File(buildDirectory); if (buildDir.exists()) buildDir.delete(); buildDir.mkdir(); String javaExec = System.getProperty("java.home") + "/bin/java"; System.setProperty("ORIENTDB_HOME", buildDirectory); ProcessBuilder processBuilder = new ProcessBuilder(javaExec, "-Xmx2048m", "-classpath", System.getProperty("java.class.path"), "-DORIENTDB_HOME=" + buildDirectory, RemoteDBRunner.class.getName()); processBuilder.inheritIO(); process = processBuilder.start(); Thread.sleep(5000); } public static final class RemoteDBRunner { public static void main(String[] args) throws Exception { OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false); OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0); OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false); OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0); OServer server = OServerMain.create(); server.startup(RemoteDBRunner.class .getResourceAsStream("/com/orientechnologies/orient/core/storage/impl/local/paginated/db-update-config.xml")); server.activate(); while (true) ; } } @AfterClass public void afterClass() { testDocumentTx.drop(); baseDocumentTx.drop(); Assert.assertTrue(buildDir.delete()); } @BeforeMethod public void beforeMethod() { baseDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDir.getAbsolutePath() + "/baseLocalPaginatedStorageUpdateCrashRestore"); if (baseDocumentTx.exists()) { baseDocumentTx.open("admin", "admin"); baseDocumentTx.drop(); } baseDocumentTx.create(); testDocumentTx = new ODatabaseDocumentTx("remote:localhost:3500/testLocalPaginatedStorageUpdateCrashRestore"); testDocumentTx.open("admin", "admin"); } public void testDocumentUpdate() throws Exception { createSchema(baseDocumentTx); createSchema(testDocumentTx); System.out.println("Schema was created."); System.out.println("Document creation was started."); createDocuments(); System.out.println("Document creation was finished."); System.out.println("Start documents update."); List<Future> futures = new ArrayList<Future>(); for (int i = 0; i < 5; i++) { futures.add(executorService.submit(new DataUpdateTask(baseDocumentTx, testDocumentTx))); } Thread.sleep(150000); long lastTs = System.currentTimeMillis(); process.destroy(); for (Future future : futures) { try { future.get(); } catch (Exception e) { e.printStackTrace(); } } System.out.println("Documents update was stopped."); testDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDir.getAbsolutePath() + "/testLocalPaginatedStorageUpdateCrashRestore"); testDocumentTx.open("admin", "admin"); testDocumentTx.close(); testDocumentTx.open("admin", "admin"); System.out.println("Start documents comparison."); compareDocuments(lastTs); } private void createSchema(ODatabaseDocumentTx dbDocumentTx) { ODatabaseRecordThreadLocal.INSTANCE.set(dbDocumentTx); OSchema schema = dbDocumentTx.getMetadata().getSchema(); if (!schema.existsClass("TestClass")) { OClass testClass = schema.createClass("TestClass"); testClass.createProperty("id", OType.LONG); testClass.createProperty("timestamp", OType.LONG); testClass.createProperty("stringValue", OType.STRING); testClass.createIndex("idIndex", OClass.INDEX_TYPE.UNIQUE, "id"); schema.save(); } } private void createDocuments() { Random random = new Random(); for (int i = 0; i < 1000000; i++) { final ODocument document = new ODocument("TestClass"); document.field("id", idGen++); document.field("timestamp", System.currentTimeMillis()); document.field("stringValue", "sfe" + random.nextLong()); saveDoc(document, baseDocumentTx, testDocumentTx); if (i % 10000 == 0) System.out.println(i + " documents were created."); } } private void saveDoc(ODocument document, ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDB) { ODatabaseRecordThreadLocal.INSTANCE.set(baseDB); ODocument testDoc = new ODocument(); document.copyTo(testDoc); document.save(); ODatabaseRecordThreadLocal.INSTANCE.set(testDB); testDoc.save(); ODatabaseRecordThreadLocal.INSTANCE.set(baseDB); } private void compareDocuments(long lastTs) { long minTs = Long.MAX_VALUE; int clusterId = baseDocumentTx.getClusterIdByName("TestClass"); OStorage baseStorage = baseDocumentTx.getStorage(); OPhysicalPosition[] physicalPositions = baseStorage.ceilingPhysicalPositions(clusterId, new OPhysicalPosition( OClusterPositionFactory.INSTANCE.valueOf(0))); int recordsRestored = 0; int recordsTested = 0; while (physicalPositions.length > 0) { final ORecordId rid = new ORecordId(clusterId); for (OPhysicalPosition physicalPosition : physicalPositions) { rid.clusterPosition = physicalPosition.clusterPosition; ODatabaseRecordThreadLocal.INSTANCE.set(baseDocumentTx); ODocument baseDocument = baseDocumentTx.load(rid); ODatabaseRecordThreadLocal.INSTANCE.set(testDocumentTx); List<ODocument> testDocuments = testDocumentTx.query(new OSQLSynchQuery<ODocument>("select from TestClass where id = " + baseDocument.field("id"))); Assert.assertTrue(!testDocuments.isEmpty()); ODocument testDocument = testDocuments.get(0); if (testDocument.field("timestamp").equals(baseDocument.field("timestamp")) && testDocument.field("stringValue").equals(baseDocument.field("stringValue"))) { recordsRestored++; } else { if (((Long) baseDocument.field("timestamp")) < minTs) minTs = baseDocument.field("timestamp"); } recordsTested++; if (recordsTested % 10000 == 0) System.out.println(recordsTested + " were tested, " + recordsRestored + " were restored ..."); } physicalPositions = baseStorage.higherPhysicalPositions(clusterId, physicalPositions[physicalPositions.length - 1]); } System.out.println(recordsRestored + " records were restored. Total records " + recordsTested + ". Max interval for lost records " + (lastTs - minTs)); } public class DataUpdateTask implements Callable<Void> { private ODatabaseDocumentTx baseDB; private ODatabaseDocumentTx testDB; public DataUpdateTask(ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDocumentTx) { this.baseDB = new ODatabaseDocumentTx(baseDB.getURL()); this.testDB = new ODatabaseDocumentTx(testDocumentTx.getURL()); } @Override public Void call() throws Exception { Random random = new Random(); baseDB.open("admin", "admin"); testDB.open("admin", "admin"); int counter = 0; try { while (true) { final int idToUpdate = random.nextInt(idGen); idLockManager.acquireLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE); try { OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<ODocument>("select from TestClass where id = " + idToUpdate); final List<ODocument> result = baseDB.query(query); Assert.assertTrue(!result.isEmpty()); final ODocument document = result.get(0); document.field("timestamp", System.currentTimeMillis()); document.field("stringValue", "vde" + random.nextLong()); saveDoc(document, baseDB, testDB); counter++; if (counter % 50000 == 0) System.out.println(counter + " records were updated."); } finally { idLockManager.releaseLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE); } } } finally { baseDB.close(); testDB.close(); } } } }
1no label
server_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedStorageUpdateCrashRestore.java