Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
87 | {
@Override
public void run()
{
dbr.getGraphDatabaseService().getNodeById( node.getId() );
dbr.getGraphDatabaseService().getRelationshipById( relationship.getId() );
}
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_ReadTransactionLogWritingTest.java |
1,789 | imap.addEntryListener(new EntryAdapter<Integer, Integer>() {
@Override
public void entryAdded(EntryEvent<Integer, Integer> event) {
events1.add(event);
}
}, true); | 0true
| hazelcast_src_test_java_com_hazelcast_map_IssuesTest.java |
508 | public class StressThread extends TestThread {
private final int[] increments = new int[REFERENCE_COUNT];
@Override
public void doRun() throws Exception {
while (!isStopped()) {
int index = random.nextInt(REFERENCE_COUNT);
int increment = random.nextInt(100);
increments[index] += increment;
IAtomicLong reference = references[index];
reference.addAndGet(increment);
}
}
public void addIncrements(int[] increments) {
for (int k = 0; k < increments.length; k++) {
increments[k] += this.increments[k];
}
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_stress_AtomicLongUpdateStressTest.java |
1,178 | public static class Order {
public static final int Address = 2000;
public static final int Log = 4000;
public static final int Advanced = 5000;
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_domain_PaymentInfoImpl.java |
1,661 | private enum NodeState {
JOIN, MERGING, STABLE
} | 0true
| distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_oldsharding_distributed_OLocalDHTNode.java |
917 | public final class LockProxySupport {
private final ObjectNamespace namespace;
public LockProxySupport(ObjectNamespace namespace) {
this.namespace = namespace;
}
public boolean isLocked(NodeEngine nodeEngine, Data key) {
IsLockedOperation operation = new IsLockedOperation(namespace, key);
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
return f.getSafely();
}
private InternalCompletableFuture invoke(NodeEngine nodeEngine, Operation operation, Data key) {
int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
return nodeEngine.getOperationService().invokeOnPartition(SERVICE_NAME, operation, partitionId);
}
public boolean isLockedByCurrentThread(NodeEngine nodeEngine, Data key) {
IsLockedOperation operation = new IsLockedOperation(namespace, key, getThreadId());
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
return f.getSafely();
}
public int getLockCount(NodeEngine nodeEngine, Data key) {
Operation operation = new GetLockCountOperation(namespace, key);
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
return f.getSafely().intValue();
}
public long getRemainingLeaseTime(NodeEngine nodeEngine, Data key) {
Operation operation = new GetRemainingLeaseTimeOperation(namespace, key);
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
return f.getSafely().longValue();
}
public void lock(NodeEngine nodeEngine, Data key) {
lock(nodeEngine, key, -1);
}
public void lock(NodeEngine nodeEngine, Data key, long ttl) {
LockOperation operation = new LockOperation(namespace, key, getThreadId(), ttl, -1);
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
if (!f.getSafely()) {
throw new IllegalStateException();
}
}
public boolean tryLock(NodeEngine nodeEngine, Data key) {
try {
return tryLock(nodeEngine, key, 0, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return false;
}
}
public boolean tryLock(NodeEngine nodeEngine, Data key, long timeout, TimeUnit timeunit)
throws InterruptedException {
LockOperation operation = new LockOperation(namespace, key, getThreadId(),
getTimeInMillis(timeout, timeunit));
InternalCompletableFuture<Boolean> f = invoke(nodeEngine, operation, key);
try {
return f.get();
} catch (Throwable t) {
throw rethrowAllowInterrupted(t);
}
}
private long getTimeInMillis(final long time, final TimeUnit timeunit) {
return timeunit != null ? timeunit.toMillis(time) : time;
}
public void unlock(NodeEngine nodeEngine, Data key) {
UnlockOperation operation = new UnlockOperation(namespace, key, getThreadId());
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
f.getSafely();
}
public void forceUnlock(NodeEngine nodeEngine, Data key) {
UnlockOperation operation = new UnlockOperation(namespace, key, -1, true);
InternalCompletableFuture<Number> f = invoke(nodeEngine, operation, key);
f.getSafely();
}
public ObjectNamespace getNamespace() {
return namespace;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_concurrent_lock_LockProxySupport.java |
1,313 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_SEARCH_FACET_RANGE")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blStandardElements")
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE)
@AdminPresentationOverrides({
@AdminPresentationOverride(name = "priceList.friendlyName", value = @AdminPresentation(excluded = false, friendlyName = "PriceListImpl_Friendly_Name", order=1, group = "SearchFacetRangeImpl_Description", prominent=true, visibility = VisibilityEnum.FORM_HIDDEN))
})
public class SearchFacetRangeImpl implements SearchFacetRange,Serializable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "SearchFacetRangeId")
@GenericGenerator(
name="SearchFacetRangeId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="SearchFacetRangeImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.search.domain.SearchFacetRangeImpl")
}
)
@Column(name = "SEARCH_FACET_RANGE_ID")
protected Long id;
@ManyToOne(targetEntity = SearchFacetImpl.class)
@JoinColumn(name = "SEARCH_FACET_ID")
@Index(name="SEARCH_FACET_INDEX", columnNames={"SEARCH_FACET_ID"})
@AdminPresentation(excluded = true, visibility = VisibilityEnum.HIDDEN_ALL)
protected SearchFacet searchFacet = new SearchFacetImpl();
@Column(name = "MIN_VALUE", precision=19, scale=5, nullable = false)
@AdminPresentation(friendlyName = "SearchFacetRangeImpl_minValue", order=2, group = "SearchFacetRangeImpl_Description", prominent=true)
protected BigDecimal minValue;
@Column(name = "MAX_VALUE", precision=19, scale=5)
@AdminPresentation(friendlyName = "SearchFacetRangeImpl_maxValue", order=3, group = "SearchFacetRangeImpl_Description", prominent=true)
protected BigDecimal maxValue;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public SearchFacet getSearchFacet() {
return searchFacet;
}
@Override
public void setSearchFacet(SearchFacet searchFacet) {
this.searchFacet = searchFacet;
}
@Override
public BigDecimal getMinValue() {
return minValue;
}
@Override
public void setMinValue(BigDecimal minValue) {
this.minValue = minValue;
}
@Override
public BigDecimal getMaxValue() {
return maxValue;
}
@Override
public void setMaxValue(BigDecimal maxValue) {
this.maxValue = maxValue;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_domain_SearchFacetRangeImpl.java |
821 | public class MultiSearchRequestBuilder extends ActionRequestBuilder<MultiSearchRequest, MultiSearchResponse, MultiSearchRequestBuilder> {
public MultiSearchRequestBuilder(Client client) {
super((InternalClient) client, new MultiSearchRequest());
}
/**
* Add a search request to execute. Note, the order is important, the search response will be returned in the
* same order as the search requests.
* <p/>
* If ignoreIndices has been set on the search request, then the indicesOptions of the multi search request
* will not be used (if set).
*/
public MultiSearchRequestBuilder add(SearchRequest request) {
if (request.indicesOptions() == IndicesOptions.strict() && request().indicesOptions() != IndicesOptions.strict()) {
request.indicesOptions(request().indicesOptions());
}
super.request.add(request);
return this;
}
/**
* Add a search request to execute. Note, the order is important, the search response will be returned in the
* same order as the search requests.
*/
public MultiSearchRequestBuilder add(SearchRequestBuilder request) {
if (request.request().indicesOptions() == IndicesOptions.strict() && request().indicesOptions() != IndicesOptions.strict()) {
request.request().indicesOptions(request().indicesOptions());
}
super.request.add(request);
return this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions.
* For example indices that don't exist.
*
* Invoke this method before invoking {@link #add(SearchRequestBuilder)}.
*/
public MultiSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
@Override
protected void doExecute(ActionListener<MultiSearchResponse> listener) {
((Client) client).multiSearch(request, listener);
}
} | 1no label
| src_main_java_org_elasticsearch_action_search_MultiSearchRequestBuilder.java |
3,295 | public class BinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData<BinaryDVAtomicFieldData> {
public BinaryDVIndexFieldData(Index index, Names fieldNames) {
super(index, fieldNames);
}
@Override
public boolean valuesOrdered() {
return false;
}
@Override
public BinaryDVAtomicFieldData load(AtomicReaderContext context) {
return new BinaryDVAtomicFieldData(context.reader(), fieldNames.indexName());
}
@Override
public BinaryDVAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
return load(context);
}
@Override
public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, SortMode sortMode) {
return new BytesRefFieldComparatorSource(this, missingValue, sortMode);
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_BinaryDVIndexFieldData.java |
69 | @RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class CloudyUtilityTest {
String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n" +
"<DescribeInstancesResponse xmlns=\"http://ec2.amazonaws.com/doc/2011-05-15/\">\n" +
" <requestId>c0f82bf8-b7f5-4cf1-bbfa-b95ea4bd38da</requestId>\n" +
" <reservationSet>\n" +
" <item>\n" +
" <reservationId>r-48ff3826</reservationId>\n" +
" <ownerId>665466731577</ownerId>\n" +
" <groupSet>\n" +
" <item>\n" +
" <groupId>sg-b67baddf</groupId>\n" +
" <groupName>hazelcast</groupName>\n" +
" </item>\n" +
" </groupSet>\n" +
" <instancesSet>\n" +
" <item>\n" +
" <instanceId>i-0a0c616a</instanceId>\n" +
" <imageId>ami-7f418316</imageId>\n" +
" <instanceState>\n" +
" <code>16</code>\n" +
" <name>running</name>\n" +
" </instanceState>\n" +
" <privateDnsName>domU-12-31-39-07-C5-C4.compute-1.internal</privateDnsName>\n" +
" <dnsName>ec2-50-17-19-37.compute-1.amazonaws.com</dnsName>\n" +
" <reason/>\n" +
" <keyName>hazelcast_key_pair</keyName>\n" +
" <amiLaunchIndex>0</amiLaunchIndex>\n" +
" <productCodes/>\n" +
" <instanceType>t1.micro</instanceType>\n" +
" <launchTime>2011-09-27T11:37:35.000Z</launchTime>\n" +
" <placement>\n" +
" <availabilityZone>us-east-1a</availabilityZone>\n" +
" <groupName/>\n" +
" <tenancy>default</tenancy>\n" +
" </placement>\n" +
" <kernelId>aki-805ea7e9</kernelId>\n" +
" <monitoring>\n" +
" <state>disabled</state>\n" +
" </monitoring>\n" +
" <privateIpAddress>10.209.198.50</privateIpAddress>\n" +
" <ipAddress>50.17.19.37</ipAddress>\n" +
" <groupSet>\n" +
" <item>\n" +
" <groupId>sg-b67baddf</groupId>\n" +
" <groupName>hazelcast</groupName>\n" +
" </item>\n" +
" </groupSet>\n" +
" <architecture>i386</architecture>\n" +
" <rootDeviceType>ebs</rootDeviceType>\n" +
" <rootDeviceName>/dev/sda1</rootDeviceName>\n" +
" <blockDeviceMapping>\n" +
" <item>\n" +
" <deviceName>/dev/sda1</deviceName>\n" +
" <ebs>\n" +
" <volumeId>vol-d5bdffbf</volumeId>\n" +
" <status>attached</status>\n" +
" <attachTime>2011-09-27T11:37:56.000Z</attachTime>\n" +
" <deleteOnTermination>true</deleteOnTermination>\n" +
" </ebs>\n" +
" </item>\n" +
" </blockDeviceMapping>\n" +
" <virtualizationType>paravirtual</virtualizationType>\n" +
" <clientToken/>\n" +
" <tagSet>\n" +
" <item>\n" +
" <key>name2</key>\n" +
" <value>value2</value>\n" +
" </item>\n" +
" <item>\n" +
" <key>Name1</key>\n" +
" <value>value1</value>\n" +
" </item>\n" +
" <item>\n" +
" <key>name</key>\n" +
" <value/>\n" +
" </item>\n" +
" </tagSet>\n" +
" <hypervisor>xen</hypervisor>\n" +
" </item>\n" +
" <item>\n" +
" <instanceId>i-0c0c616c</instanceId>\n" +
" <imageId>ami-7f418316</imageId>\n" +
" <instanceState>\n" +
" <code>16</code>\n" +
" <name>running</name>\n" +
" </instanceState>\n" +
" <privateDnsName>domU-12-31-39-07-C2-60.compute-1.internal</privateDnsName>\n" +
" <dnsName>ec2-50-16-102-143.compute-1.amazonaws.com</dnsName>\n" +
" <reason/>\n" +
" <keyName>hazelcast_key_pair</keyName>\n" +
" <amiLaunchIndex>1</amiLaunchIndex>\n" +
" <productCodes/>\n" +
" <instanceType>t1.micro</instanceType>\n" +
" <launchTime>2011-09-27T11:37:35.000Z</launchTime>\n" +
" <placement>\n" +
" <availabilityZone>us-east-1a</availabilityZone>\n" +
" <groupName/>\n" +
" <tenancy>default</tenancy>\n" +
" </placement>\n" +
" <kernelId>aki-805ea7e9</kernelId>\n" +
" <monitoring>\n" +
" <state>disabled</state>\n" +
" </monitoring>\n" +
" <privateIpAddress>10.209.193.170</privateIpAddress>\n" +
" <ipAddress>50.16.102.143</ipAddress>\n" +
" <groupSet>\n" +
" <item>\n" +
" <groupId>sg-b67baddf</groupId>\n" +
" <groupName>hazelcast</groupName>\n" +
" </item>\n" +
" </groupSet>\n" +
" <architecture>i386</architecture>\n" +
" <rootDeviceType>ebs</rootDeviceType>\n" +
" <rootDeviceName>/dev/sda1</rootDeviceName>\n" +
" <blockDeviceMapping>\n" +
" <item>\n" +
" <deviceName>/dev/sda1</deviceName>\n" +
" <ebs>\n" +
" <volumeId>vol-abbdffc1</volumeId>\n" +
" <status>attached</status>\n" +
" <attachTime>2011-09-27T11:37:57.000Z</attachTime>\n" +
" <deleteOnTermination>true</deleteOnTermination>\n" +
" </ebs>\n" +
" </item>\n" +
" </blockDeviceMapping>\n" +
" <virtualizationType>paravirtual</virtualizationType>\n" +
" <clientToken/>\n" +
" <tagSet>\n" +
" <item>\n" +
" <key>Name1</key>\n" +
" <value>value1</value>\n" +
" </item>\n" +
" <item>\n" +
" <key>name2</key>\n" +
" <value>value2</value>\n" +
" </item>\n" +
" </tagSet>\n" +
" <hypervisor>xen</hypervisor>\n" +
" </item>\n" +
" </instancesSet>\n" +
" <requesterId>058890971305</requesterId>\n" +
" </item>\n" +
" </reservationSet>\n" +
"</DescribeInstancesResponse>";
@Test
public void testNoTags() throws IOException {
InputStream is = new ByteArrayInputStream(xml.getBytes());
AwsConfig awsConfig = new AwsConfig();
awsConfig.setAccessKey("some-access-key");
awsConfig.setSecretKey("some-secret-key");
awsConfig.setSecurityGroupName("hazelcast");
List<String> result = (List<String>) CloudyUtility.unmarshalTheResponse(is, awsConfig);
assertEquals(2, result.size());
}
@Test
public void testTagsBothNodeHave() throws IOException {
InputStream is = new ByteArrayInputStream(xml.getBytes());
AwsConfig awsConfig = new AwsConfig();
awsConfig.setAccessKey("some-access-key");
awsConfig.setSecretKey("some-secret-key");
awsConfig.setSecurityGroupName("hazelcast");
awsConfig.setTagKey("Name1");
awsConfig.setTagValue("value1");
List<String> result = (List<String>) CloudyUtility.unmarshalTheResponse(is, awsConfig);
assertEquals(2, result.size());
}
@Test
public void testTagOnlyOneNodeHave() throws IOException {
InputStream is = new ByteArrayInputStream(xml.getBytes());
AwsConfig awsConfig = new AwsConfig();
awsConfig.setAccessKey("some-access-key");
awsConfig.setSecretKey("some-secret-key");
awsConfig.setSecurityGroupName("hazelcast");
awsConfig.setTagKey("name");
awsConfig.setTagValue("");
List<String> result = (List<String>) CloudyUtility.unmarshalTheResponse(is, awsConfig);
assertEquals(1, result.size());
}
} | 0true
| hazelcast-cloud_src_test_java_com_hazelcast_aws_utility_CloudyUtilityTest.java |
346 | static abstract class TestHelper extends Thread {
protected static final int ITERATIONS = 1000*10;
protected final Random random = new Random();
protected final IMap<String, Integer> map;
protected final String upKey;
protected final String downKey;
public TestHelper(IMap map, String upKey, String downKey){
this.map = map;
this.upKey = upKey;
this.downKey = downKey;
}
public void run() {
try{
for ( int i=0; i < ITERATIONS; i++ ) {
doRun();
}
}catch(Exception e){
throw new RuntimeException("Test Thread crashed with ", e);
}
}
abstract void doRun()throws Exception;
public void work(){
int upTotal = map.get(upKey);
int downTotal = map.get(downKey);
int dif = random.nextInt(1000);
upTotal += dif;
downTotal -= dif;
map.put(upKey, upTotal);
map.put(downKey, downTotal);
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTryLockConcurrentTests.java |
0 | public final class StandaloneRandomizedContext {
private StandaloneRandomizedContext() {
}
/**
* Creates a new {@link RandomizedContext} associated to the current thread
*/
public static void createRandomizedContext(Class<?> testClass, Randomness runnerRandomness) {
//the randomized runner is passed in as null, which is fine as long as we don't try to access it afterwards
RandomizedContext randomizedContext = RandomizedContext.create(Thread.currentThread().getThreadGroup(), testClass, null);
randomizedContext.push(runnerRandomness.clone(Thread.currentThread()));
}
/**
* Destroys the {@link RandomizedContext} associated to the current thread
*/
public static void disposeRandomizedContext() {
RandomizedContext.current().dispose();
}
public static void pushRandomness(Randomness randomness) {
RandomizedContext.current().push(randomness);
}
public static void popAndDestroy() {
RandomizedContext.current().popAndDestroy();
}
/**
* Returns the string formatted seed associated to the current thread's randomized context
*/
public static String getSeedAsString() {
return SeedUtils.formatSeed(RandomizedContext.current().getRandomness().getSeed());
}
/**
* Util method to extract the seed out of a {@link Randomness} instance
*/
public static long getSeed(Randomness randomness) {
return randomness.getSeed();
}
} | 0true
| src_test_java_com_carrotsearch_randomizedtesting_StandaloneRandomizedContext.java |
3,498 | public interface Mapper extends ToXContent {
public static final Mapper[] EMPTY_ARRAY = new Mapper[0];
public static class BuilderContext {
private final Settings indexSettings;
private final ContentPath contentPath;
public BuilderContext(@Nullable Settings indexSettings, ContentPath contentPath) {
this.contentPath = contentPath;
this.indexSettings = indexSettings;
}
public ContentPath path() {
return this.contentPath;
}
@Nullable
public Settings indexSettings() {
return this.indexSettings;
}
@Nullable
public Version indexCreatedVersion() {
if (indexSettings == null) {
return null;
}
return indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
}
}
public static abstract class Builder<T extends Builder, Y extends Mapper> {
public String name;
protected T builder;
protected Builder(String name) {
this.name = name;
}
public String name() {
return this.name;
}
public abstract Y build(BuilderContext context);
}
public interface TypeParser {
public static class ParserContext {
private final PostingsFormatService postingsFormatService;
private final DocValuesFormatService docValuesFormatService;
private final AnalysisService analysisService;
private final SimilarityLookupService similarityLookupService;
private final ImmutableMap<String, TypeParser> typeParsers;
private final Version indexVersionCreated;
public ParserContext(PostingsFormatService postingsFormatService, DocValuesFormatService docValuesFormatService,
AnalysisService analysisService, SimilarityLookupService similarityLookupService,
ImmutableMap<String, TypeParser> typeParsers, Version indexVersionCreated) {
this.postingsFormatService = postingsFormatService;
this.docValuesFormatService = docValuesFormatService;
this.analysisService = analysisService;
this.similarityLookupService = similarityLookupService;
this.typeParsers = typeParsers;
this.indexVersionCreated = indexVersionCreated;
}
public AnalysisService analysisService() {
return analysisService;
}
public PostingsFormatService postingFormatService() {
return postingsFormatService;
}
public DocValuesFormatService docValuesFormatService() {
return docValuesFormatService;
}
public SimilarityLookupService similarityLookupService() {
return similarityLookupService;
}
public TypeParser typeParser(String type) {
return typeParsers.get(Strings.toUnderscoreCase(type));
}
public Version indexVersionCreated() {
return indexVersionCreated;
}
}
Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException;
}
String name();
void parse(ParseContext context) throws IOException;
void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException;
void traverse(FieldMapperListener fieldMapperListener);
void traverse(ObjectMapperListener objectMapperListener);
void close();
} | 0true
| src_main_java_org_elasticsearch_index_mapper_Mapper.java |
1,767 | public class ShapeBuilderTests extends ElasticsearchTestCase {
@Test
public void testNewPoint() {
Point point = ShapeBuilder.newPoint(-100, 45).build();
assertEquals(-100D, point.getX(), 0.0d);
assertEquals(45D, point.getY(), 0.0d);
}
@Test
public void testNewRectangle() {
Rectangle rectangle = ShapeBuilder.newEnvelope().topLeft(-45, 30).bottomRight(45, -30).build();
assertEquals(-45D, rectangle.getMinX(), 0.0d);
assertEquals(-30D, rectangle.getMinY(), 0.0d);
assertEquals(45D, rectangle.getMaxX(), 0.0d);
assertEquals(30D, rectangle.getMaxY(), 0.0d);
}
@Test
public void testNewPolygon() {
Polygon polygon = ShapeBuilder.newPolygon()
.point(-45, 30)
.point(45, 30)
.point(45, -30)
.point(-45, -30)
.point(-45, 30).toPolygon();
LineString exterior = polygon.getExteriorRing();
assertEquals(exterior.getCoordinateN(0), new Coordinate(-45, 30));
assertEquals(exterior.getCoordinateN(1), new Coordinate(45, 30));
assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30));
assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30));
}
@Test
public void testLineStringBuilder() {
// Building a simple LineString
ShapeBuilder.newLineString()
.point(-130.0, 55.0)
.point(-130.0, -40.0)
.point(-15.0, -40.0)
.point(-20.0, 50.0)
.point(-45.0, 50.0)
.point(-45.0, -15.0)
.point(-110.0, -15.0)
.point(-110.0, 55.0).build();
// Building a linestring that needs to be wrapped
ShapeBuilder.newLineString()
.point(100.0, 50.0)
.point(110.0, -40.0)
.point(240.0, -40.0)
.point(230.0, 60.0)
.point(200.0, 60.0)
.point(200.0, -30.0)
.point(130.0, -30.0)
.point(130.0, 60.0)
.build();
// Building a lineString on the dateline
ShapeBuilder.newLineString()
.point(-180.0, 80.0)
.point(-180.0, 40.0)
.point(-180.0, -40.0)
.point(-180.0, -80.0)
.build();
// Building a lineString on the dateline
ShapeBuilder.newLineString()
.point(180.0, 80.0)
.point(180.0, 40.0)
.point(180.0, -40.0)
.point(180.0, -80.0)
.build();
}
@Test
public void testMultiLineString() {
ShapeBuilder.newMultiLinestring()
.linestring()
.point(-100.0, 50.0)
.point(50.0, 50.0)
.point(50.0, 20.0)
.point(-100.0, 20.0)
.end()
.linestring()
.point(-100.0, 20.0)
.point(50.0, 20.0)
.point(50.0, 0.0)
.point(-100.0, 0.0)
.end()
.build();
// LineString that needs to be wrappped
ShapeBuilder.newMultiLinestring()
.linestring()
.point(150.0, 60.0)
.point(200.0, 60.0)
.point(200.0, 40.0)
.point(150.0, 40.0)
.end()
.linestring()
.point(150.0, 20.0)
.point(200.0, 20.0)
.point(200.0, 0.0)
.point(150.0, 0.0)
.end()
.build();
}
@Test
public void testPolygonSelfIntersection() {
try {
ShapeBuilder.newPolygon()
.point(-40.0, 50.0)
.point(40.0, 50.0)
.point(-40.0, -50.0)
.point(40.0, -50.0)
.close().build();
fail("Polygon self-intersection");
} catch (Throwable e) {}
}
@Test
public void testGeoCircle() {
ShapeBuilder.newCircleBuilder().center(0, 0).radius("100m").build();
ShapeBuilder.newCircleBuilder().center(+180, 0).radius("100m").build();
ShapeBuilder.newCircleBuilder().center(-180, 0).radius("100m").build();
ShapeBuilder.newCircleBuilder().center(0, 90).radius("100m").build();
ShapeBuilder.newCircleBuilder().center(0, -90).radius("100m").build();
}
@Test
public void testPolygonWrapping() {
Shape shape = ShapeBuilder.newPolygon()
.point(-150.0, 65.0)
.point(-250.0, 65.0)
.point(-250.0, -65.0)
.point(-150.0, -65.0)
.close().build();
assertMultiPolygon(shape);
}
@Test
public void testLineStringWrapping() {
Shape shape = ShapeBuilder.newLineString()
.point(-150.0, 65.0)
.point(-250.0, 65.0)
.point(-250.0, -65.0)
.point(-150.0, -65.0)
.build();
assertMultiLineString(shape);
}
} | 0true
| src_test_java_org_elasticsearch_common_geo_ShapeBuilderTests.java |
1,836 | Thread t = new Thread(new Runnable() {
public void run() {
map.lock(1);
latch.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapLockTest.java |
1,171 | public interface ITopic<E> extends DistributedObject {
/**
* Returns the name of this ITopic instance
*
* @return name of this instance
*/
String getName();
/**
* Publishes the message to all subscribers of this topic
*
* @param message
*/
void publish(E message);
/**
* Subscribes to this topic. When someone publishes a message on this topic.
* onMessage() function of the given MessageListener is called. More than one message listener can be
* added on one instance.
*
* @param listener
*
* @return returns registration id.
*/
String addMessageListener(MessageListener<E> listener);
/**
* Stops receiving messages for the given message listener. If the given listener already removed,
* this method does nothing.
*
* @param registrationId Id of listener registration.
*
* @return true if registration is removed, false otherwise
*/
boolean removeMessageListener(final String registrationId );
/**
* Returns statistics of this topic,like total number of publishes/receives
*
* @return statistics
*/
LocalTopicStats getLocalTopicStats();
} | 0true
| hazelcast_src_main_java_com_hazelcast_core_ITopic.java |
487 | public class ClearIndicesCacheAction extends IndicesAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ClearIndicesCacheRequestBuilder> {
public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction();
public static final String NAME = "indices/cache/clear";
private ClearIndicesCacheAction() {
super(NAME);
}
@Override
public ClearIndicesCacheResponse newResponse() {
return new ClearIndicesCacheResponse();
}
@Override
public ClearIndicesCacheRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new ClearIndicesCacheRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_cache_clear_ClearIndicesCacheAction.java |
1,824 | public final class Guice {
private Guice() {
}
/**
* Creates an injector for the given set of modules.
*
* @throws CreationException if one or more errors occur during Injector
* construction
*/
public static Injector createInjector(Module... modules) {
return createInjector(Arrays.asList(modules));
}
/**
* Creates an injector for the given set of modules.
*
* @throws CreationException if one or more errors occur during Injector
* creation
*/
public static Injector createInjector(Iterable<? extends Module> modules) {
return createInjector(Stage.DEVELOPMENT, modules);
}
/**
* Creates an injector for the given set of modules, in a given development
* stage.
*
* @throws CreationException if one or more errors occur during Injector
* creation
*/
public static Injector createInjector(Stage stage, Module... modules) {
return createInjector(stage, Arrays.asList(modules));
}
/**
* Creates an injector for the given set of modules, in a given development
* stage.
*
* @throws CreationException if one or more errors occur during Injector
* construction
*/
public static Injector createInjector(Stage stage,
Iterable<? extends Module> modules) {
return new InjectorBuilder()
.stage(stage)
.addModules(modules)
.build();
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_Guice.java |
114 | HazelcastTestSupport.assertTrueEventually(new AssertTask() {
public void run() throws Exception {
final NearCacheStats stats = map.getLocalMapStats().getNearCacheStats();
assertEquals(expectedSize, stats.getOwnedEntryCount());
}
}); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_ClientNearCacheTest.java |
825 | public static class Item implements Streamable {
private SearchResponse response;
private String failureMessage;
Item() {
}
public Item(SearchResponse response, String failureMessage) {
this.response = response;
this.failureMessage = failureMessage;
}
/**
* Is it a failed search?
*/
public boolean isFailure() {
return failureMessage != null;
}
/**
* The actual failure message, null if its not a failure.
*/
@Nullable
public String getFailureMessage() {
return failureMessage;
}
/**
* The actual search response, null if its a failure.
*/
@Nullable
public SearchResponse getResponse() {
return this.response;
}
public static Item readItem(StreamInput in) throws IOException {
Item item = new Item();
item.readFrom(in);
return item;
}
@Override
public void readFrom(StreamInput in) throws IOException {
if (in.readBoolean()) {
this.response = new SearchResponse();
response.readFrom(in);
} else {
failureMessage = in.readString();
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (response != null) {
out.writeBoolean(true);
response.writeTo(out);
} else {
out.writeBoolean(false);
out.writeString(failureMessage);
}
}
} | 0true
| src_main_java_org_elasticsearch_action_search_MultiSearchResponse.java |
3,355 | static class GeoPointValuesSingle extends GeoPointValues {
private final BigDoubleArrayList lon;
private final BigDoubleArrayList lat;
private final GeoPoint scratch = new GeoPoint();
GeoPointValuesSingle(BigDoubleArrayList lon, BigDoubleArrayList lat) {
super(false);
this.lon = lon;
this.lat = lat;
}
@Override
public int setDocument(int docId) {
this.docId = docId;
return 1;
}
@Override
public GeoPoint nextValue() {
return scratch.reset(lat.get(docId), lon.get(docId));
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_GeoPointDoubleArrayAtomicFieldData.java |
754 | phasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(), that.getDeclarationModel());
}
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(), that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(), that.getDeclarationModel());
}
@Override
public void visit(ModuleDescriptor that) {
super.visit(that);
visitIt(that.getImportPath());
}
@Override
public void visit(PackageDescriptor that) {
super.visit(that);
visitIt(that.getImportPath());
}
private void visitIt(Tree.ImportPath importPath) {
if (formatPath(importPath.getIdentifiers()).equals(oldName)) {
edits.add(new ReplaceEdit(importPath.getStartIndex(),
oldName.length(), newName));
}
}
private void visitIt(Tree.Identifier id, Declaration dec) {
if (dec!=null && !declarations.contains(dec)) {
String pn = dec.getUnit().getPackage().getNameAsString();
if (pn.equals(oldName) && !pn.isEmpty() &&
!pn.equals(Module.LANGUAGE_MODULE_NAME)) {
imports.put(dec, id.getText());
}
}
}
}); | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_CopyFileRefactoringParticipant.java |
2,761 | public class HttpServerModule extends AbstractModule implements SpawnModules {
private final Settings settings;
public HttpServerModule(Settings settings) {
this.settings = settings;
}
@Override
public Iterable<? extends Module> spawnModules() {
return ImmutableList.of(Modules.createModule(settings.getAsClass("http.type", NettyHttpServerTransportModule.class, "org.elasticsearch.http.", "HttpServerTransportModule"), settings));
}
@SuppressWarnings({"unchecked"})
@Override
protected void configure() {
bind(HttpServer.class).asEagerSingleton();
}
} | 0true
| src_main_java_org_elasticsearch_http_HttpServerModule.java |
579 | public final class ClusterServiceImpl implements ClusterService, ConnectionListener, ManagedService,
EventPublishingService<MembershipEvent, MembershipListener> {
public static final String SERVICE_NAME = "hz:core:clusterService";
private final Node node;
private final NodeEngineImpl nodeEngine;
private final ILogger logger;
protected final Address thisAddress;
protected final MemberImpl thisMember;
private final long waitMillisBeforeJoin;
private final long maxWaitSecondsBeforeJoin;
private final long maxNoHeartbeatMillis;
private final long maxNoMasterConfirmationMillis;
private final boolean icmpEnabled;
private final int icmpTtl;
private final int icmpTimeout;
private final Lock lock = new ReentrantLock();
private final Set<MemberInfo> setJoins = new LinkedHashSet<MemberInfo>(100);
private final AtomicReference<Map<Address, MemberImpl>> membersMapRef
= new AtomicReference<Map<Address, MemberImpl>>(Collections.EMPTY_MAP);
private final AtomicReference<Set<MemberImpl>> membersRef = new AtomicReference<Set<MemberImpl>>(Collections.EMPTY_SET);
private final AtomicBoolean preparingToMerge = new AtomicBoolean(false);
private boolean joinInProgress = false;
private long timeToStartJoin = 0;
private long firstJoinRequest = 0;
private final ConcurrentMap<MemberImpl, Long> masterConfirmationTimes = new ConcurrentHashMap<MemberImpl, Long>();
private volatile long clusterTimeDiff = Long.MAX_VALUE;
public ClusterServiceImpl(final Node node) {
this.node = node;
nodeEngine = node.nodeEngine;
logger = node.getLogger(ClusterService.class.getName());
thisAddress = node.getThisAddress();
thisMember = node.getLocalMember();
setMembers(thisMember);
waitMillisBeforeJoin = node.groupProperties.WAIT_SECONDS_BEFORE_JOIN.getInteger() * 1000L;
maxWaitSecondsBeforeJoin = node.groupProperties.MAX_WAIT_SECONDS_BEFORE_JOIN.getInteger();
maxNoHeartbeatMillis = node.groupProperties.MAX_NO_HEARTBEAT_SECONDS.getInteger() * 1000L;
maxNoMasterConfirmationMillis = node.groupProperties.MAX_NO_MASTER_CONFIRMATION_SECONDS.getInteger() * 1000L;
icmpEnabled = node.groupProperties.ICMP_ENABLED.getBoolean();
icmpTtl = node.groupProperties.ICMP_TTL.getInteger();
icmpTimeout = node.groupProperties.ICMP_TIMEOUT.getInteger();
node.connectionManager.addConnectionListener(this);
}
@Override
public void init(final NodeEngine nodeEngine, Properties properties) {
long mergeFirstRunDelay = node.getGroupProperties().MERGE_FIRST_RUN_DELAY_SECONDS.getLong() * 1000;
mergeFirstRunDelay = mergeFirstRunDelay <= 0 ? 100 : mergeFirstRunDelay; // milliseconds
ExecutionService executionService = nodeEngine.getExecutionService();
String executorName = "hz:cluster";
executionService.register(executorName, 2, 1000, ExecutorType.CACHED);
long mergeNextRunDelay = node.getGroupProperties().MERGE_NEXT_RUN_DELAY_SECONDS.getLong() * 1000;
mergeNextRunDelay = mergeNextRunDelay <= 0 ? 100 : mergeNextRunDelay; // milliseconds
executionService.scheduleWithFixedDelay(executorName, new SplitBrainHandler(node),
mergeFirstRunDelay, mergeNextRunDelay, TimeUnit.MILLISECONDS);
long heartbeatInterval = node.groupProperties.HEARTBEAT_INTERVAL_SECONDS.getInteger();
heartbeatInterval = heartbeatInterval <= 0 ? 1 : heartbeatInterval;
executionService.scheduleWithFixedDelay(executorName, new Runnable() {
public void run() {
heartBeater();
}
}, heartbeatInterval, heartbeatInterval, TimeUnit.SECONDS);
long masterConfirmationInterval = node.groupProperties.MASTER_CONFIRMATION_INTERVAL_SECONDS.getInteger();
masterConfirmationInterval = masterConfirmationInterval <= 0 ? 1 : masterConfirmationInterval;
executionService.scheduleWithFixedDelay(executorName, new Runnable() {
public void run() {
sendMasterConfirmation();
}
}, masterConfirmationInterval, masterConfirmationInterval, TimeUnit.SECONDS);
long memberListPublishInterval = node.groupProperties.MEMBER_LIST_PUBLISH_INTERVAL_SECONDS.getInteger();
memberListPublishInterval = memberListPublishInterval <= 0 ? 1 : memberListPublishInterval;
executionService.scheduleWithFixedDelay(executorName, new Runnable() {
public void run() {
sendMemberListToOthers();
}
}, memberListPublishInterval, memberListPublishInterval, TimeUnit.SECONDS);
}
public boolean isJoinInProgress() {
lock.lock();
try {
return joinInProgress || !setJoins.isEmpty();
} finally {
lock.unlock();
}
}
public JoinRequest checkJoinInfo(Address target) {
Future f = nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME,
new JoinCheckOperation(node.createJoinRequest()), target)
.setTryCount(1).invoke();
try {
return (JoinRequest) nodeEngine.toObject(f.get());
} catch (Exception e) {
logger.warning("Error during join check!", e);
}
return null;
}
public boolean validateJoinMessage(JoinMessage joinMessage) throws Exception {
boolean valid = Packet.VERSION == joinMessage.getPacketVersion();
if (valid) {
try {
valid = node.createConfigCheck().isCompatible(joinMessage.getConfigCheck());
} catch (Exception e) {
final String message = "Invalid join request from: " + joinMessage.getAddress() + ", reason:" + e.getMessage();
logger.warning(message);
node.getSystemLogService().logJoin(message);
throw e;
}
}
return valid;
}
private void logMissingConnection(Address address) {
String msg = node.getLocalMember() + " has no connection to " + address;
logger.warning(msg);
}
public final void heartBeater() {
if (!node.joined() || !node.isActive()) return;
long now = Clock.currentTimeMillis();
final Collection<MemberImpl> members = getMemberList();
if (node.isMaster()) {
List<Address> deadAddresses = null;
for (MemberImpl memberImpl : members) {
final Address address = memberImpl.getAddress();
if (!thisAddress.equals(address)) {
try {
Connection conn = node.connectionManager.getOrConnect(address);
if (conn != null && conn.live()) {
if ((now - memberImpl.getLastRead()) >= (maxNoHeartbeatMillis)) {
if (deadAddresses == null) {
deadAddresses = new ArrayList<Address>();
}
logger.warning("Added " + address + " to list of dead addresses because of timeout since last read");
deadAddresses.add(address);
} else if ((now - memberImpl.getLastRead()) >= 5000 && (now - memberImpl.getLastPing()) >= 5000) {
ping(memberImpl);
}
if ((now - memberImpl.getLastWrite()) > 500) {
sendHeartbeat(address);
}
Long lastConfirmation = masterConfirmationTimes.get(memberImpl);
if (lastConfirmation == null ||
(now - lastConfirmation > maxNoMasterConfirmationMillis)) {
if (deadAddresses == null) {
deadAddresses = new ArrayList<Address>();
}
logger.warning("Added " + address +
" to list of dead addresses because it has not sent a master confirmation recently");
deadAddresses.add(address);
}
} else if (conn == null && (now - memberImpl.getLastRead()) > 5000) {
logMissingConnection(address);
memberImpl.didRead();
}
} catch (Exception e) {
logger.severe(e);
}
}
}
if (deadAddresses != null) {
for (Address address : deadAddresses) {
if (logger.isFinestEnabled()) {
logger.finest("No heartbeat should remove " + address);
}
removeAddress(address);
}
}
} else {
// send heartbeat to master
Address masterAddress = node.getMasterAddress();
if (masterAddress != null) {
node.connectionManager.getOrConnect(masterAddress);
MemberImpl masterMember = getMember(masterAddress);
boolean removed = false;
if (masterMember != null) {
if ((now - masterMember.getLastRead()) >= (maxNoHeartbeatMillis)) {
logger.warning("Master node has timed out its heartbeat and will be removed");
removeAddress(masterAddress);
removed = true;
} else if ((now - masterMember.getLastRead()) >= 5000 && (now - masterMember.getLastPing()) >= 5000) {
ping(masterMember);
}
}
if (!removed) {
sendHeartbeat(masterAddress);
}
}
for (MemberImpl member : members) {
if (!member.localMember()) {
Address address = member.getAddress();
Connection conn = node.connectionManager.getOrConnect(address);
if (conn != null) {
sendHeartbeat(address);
} else {
if (logger.isFinestEnabled()) {
logger.finest("Could not connect to " + address + " to send heartbeat");
}
}
}
}
}
}
private void ping(final MemberImpl memberImpl) {
memberImpl.didPing();
if (!icmpEnabled) return;
nodeEngine.getExecutionService().execute(ExecutionService.SYSTEM_EXECUTOR, new Runnable() {
public void run() {
try {
final Address address = memberImpl.getAddress();
logger.warning(thisAddress + " will ping " + address);
for (int i = 0; i < 5; i++) {
try {
if (address.getInetAddress().isReachable(null, icmpTtl, icmpTimeout)) {
logger.info(thisAddress + " pings successfully. Target: " + address);
return;
}
} catch (ConnectException ignored) {
// no route to host
// means we cannot connect anymore
}
}
logger.warning(thisAddress + " couldn't ping " + address);
// not reachable.
removeAddress(address);
} catch (Throwable ignored) {
}
}
});
}
private void sendHeartbeat(Address target) {
if (target == null) return;
try {
node.nodeEngine.getOperationService().send(new HeartbeatOperation(), target);
} catch (Exception e) {
if (logger.isFinestEnabled()) {
logger.finest("Error while sending heartbeat -> "
+ e.getClass().getName() + "[" + e.getMessage() + "]");
}
}
}
private void sendMasterConfirmation() {
if (!node.joined() || !node.isActive() || isMaster()) {
return;
}
final Address masterAddress = getMasterAddress();
if (masterAddress == null) {
logger.finest("Could not send MasterConfirmation, master is null!");
return;
}
final MemberImpl masterMember = getMember(masterAddress);
if (masterMember == null) {
logger.finest("Could not send MasterConfirmation, master is null!");
return;
}
if (logger.isFinestEnabled()) {
logger.finest("Sending MasterConfirmation to " + masterMember);
}
nodeEngine.getOperationService().send(new MasterConfirmationOperation(), masterAddress);
}
// Will be called just before this node becomes the master
private void resetMemberMasterConfirmations() {
final Collection<MemberImpl> memberList = getMemberList();
for (MemberImpl member : memberList) {
masterConfirmationTimes.put(member, Clock.currentTimeMillis());
}
}
private void sendMemberListToOthers() {
if (!isMaster()) {
return;
}
final Collection<MemberImpl> members = getMemberList();
MemberInfoUpdateOperation op = new MemberInfoUpdateOperation(createMemberInfos(members, false), getClusterTime(), false);
for (MemberImpl member : members) {
if (member.equals(thisMember)) {
continue;
}
nodeEngine.getOperationService().send(op, member.getAddress());
}
}
public void removeAddress(Address deadAddress) {
doRemoveAddress(deadAddress, true);
}
private void doRemoveAddress(Address deadAddress, boolean destroyConnection) {
if (preparingToMerge.get()) {
logger.warning("Cluster-merge process is ongoing, won't process member removal: " + deadAddress);
return;
}
if (!node.joined()) {
node.failedConnection(deadAddress);
return;
}
if (deadAddress.equals(thisAddress)) {
return;
}
lock.lock();
try {
if (deadAddress.equals(node.getMasterAddress())) {
assignNewMaster();
}
if (node.isMaster()) {
setJoins.remove(new MemberInfo(deadAddress));
resetMemberMasterConfirmations();
}
final Connection conn = node.connectionManager.getConnection(deadAddress);
if (destroyConnection && conn != null) {
node.connectionManager.destroyConnection(conn);
}
MemberImpl deadMember = getMember(deadAddress);
if (deadMember != null) {
removeMember(deadMember);
logger.info(membersString());
}
} finally {
lock.unlock();
}
}
private void assignNewMaster() {
final Address oldMasterAddress = node.getMasterAddress();
if (node.joined()) {
final Collection<MemberImpl> members = getMemberList();
MemberImpl newMaster = null;
final int size = members.size();
if (size > 1) {
final Iterator<MemberImpl> iter = members.iterator();
final MemberImpl member = iter.next();
if (member.getAddress().equals(oldMasterAddress)) {
newMaster = iter.next();
} else {
logger.severe("Old master " + oldMasterAddress
+ " is dead but the first of member list is a different member " +
member + "!");
newMaster = member;
}
} else {
logger.warning("Old master is dead and this node is not master " +
"but member list contains only " + size + " members! -> " + members);
}
logger.info("Master " + oldMasterAddress + " left the cluster. Assigning new master " + newMaster);
if (newMaster != null) {
node.setMasterAddress(newMaster.getAddress());
} else {
node.setMasterAddress(null);
}
} else {
node.setMasterAddress(null);
}
if (logger.isFinestEnabled()) {
logger.finest("Now Master " + node.getMasterAddress());
}
}
void handleJoinRequest(JoinRequestOperation joinRequest) {
lock.lock();
try {
final JoinRequest joinMessage = joinRequest.getMessage();
final long now = Clock.currentTimeMillis();
if (logger.isFinestEnabled()) {
String msg = "Handling join from " + joinMessage.getAddress() + ", inProgress: " + joinInProgress
+ (timeToStartJoin > 0 ? ", timeToStart: " + (timeToStartJoin - now) : "");
logger.finest(msg);
}
boolean validJoinRequest;
try {
validJoinRequest = validateJoinMessage(joinMessage);
} catch (Exception e) {
validJoinRequest = false;
}
final Connection conn = joinRequest.getConnection();
if (validJoinRequest) {
final MemberImpl member = getMember(joinMessage.getAddress());
if (member != null) {
if (joinMessage.getUuid().equals(member.getUuid())) {
if (logger.isFinestEnabled()) {
String message = "Ignoring join request, member already exists.. => " + joinMessage;
logger.finest(message);
}
// send members update back to node trying to join again...
nodeEngine.getOperationService().send(new MemberInfoUpdateOperation(createMemberInfos(getMemberList(), true), getClusterTime(), false),
member.getAddress());
return;
}
// If this node is master then remove old member and process join request.
// If requesting address is equal to master node's address, that means master node
// somehow disconnected and wants to join back.
// So drop old member and process join request if this node becomes master.
if (node.isMaster() || member.getAddress().equals(node.getMasterAddress())) {
logger.warning("New join request has been received from an existing endpoint! => " + member
+ " Removing old member and processing join request...");
// If existing connection of endpoint is different from current connection
// destroy it, otherwise keep it.
// final Connection existingConnection = node.connectionManager.getConnection(joinMessage.address);
// final boolean destroyExistingConnection = existingConnection != conn;
doRemoveAddress(member.getAddress(), false);
}
}
final boolean multicastEnabled = node.getConfig().getNetworkConfig().getJoin().getMulticastConfig().isEnabled();
if (!multicastEnabled && node.isActive() && node.joined() && node.getMasterAddress() != null && !node.isMaster()) {
sendMasterAnswer(joinMessage);
}
if (node.isMaster() && node.joined() && node.isActive()) {
final MemberInfo newMemberInfo = new MemberInfo(joinMessage.getAddress(), joinMessage.getUuid(), joinMessage.getAttributes());
if (node.securityContext != null && !setJoins.contains(newMemberInfo)) {
final Credentials cr = joinMessage.getCredentials();
ILogger securityLogger = node.loggingService.getLogger("com.hazelcast.security");
if (cr == null) {
securityLogger.severe("Expecting security credentials " +
"but credentials could not be found in JoinRequest!");
nodeEngine.getOperationService().send(new AuthenticationFailureOperation(), joinMessage.getAddress());
return;
} else {
try {
LoginContext lc = node.securityContext.createMemberLoginContext(cr);
lc.login();
} catch (LoginException e) {
securityLogger.severe("Authentication has failed for " + cr.getPrincipal()
+ '@' + cr.getEndpoint() + " => (" + e.getMessage() +
")");
securityLogger.finest(e);
nodeEngine.getOperationService().send(new AuthenticationFailureOperation(), joinMessage.getAddress());
return;
}
}
}
if (!joinInProgress) {
if (firstJoinRequest != 0 && now - firstJoinRequest >= maxWaitSecondsBeforeJoin * 1000) {
startJoin();
} else {
if (setJoins.add(newMemberInfo)) {
sendMasterAnswer(joinMessage);
if (firstJoinRequest == 0) {
firstJoinRequest = now;
}
if (now - firstJoinRequest < maxWaitSecondsBeforeJoin * 1000) {
timeToStartJoin = now + waitMillisBeforeJoin;
}
}
if (now > timeToStartJoin) {
startJoin();
}
}
}
}
} else {
conn.close();
}
} finally {
lock.unlock();
}
}
private void sendMasterAnswer(final JoinRequest joinRequest) {
nodeEngine.getOperationService().send(new SetMasterOperation(node.getMasterAddress()), joinRequest.getAddress());
}
void handleMaster(Address masterAddress) {
lock.lock();
try {
if (!node.joined() && !node.getThisAddress().equals(masterAddress)) {
if (logger.isFinestEnabled()) {
logger.finest("Handling master response: " + this);
}
final Address currentMaster = node.getMasterAddress();
if (currentMaster != null && !currentMaster.equals(masterAddress)) {
final Connection conn = node.connectionManager.getConnection(currentMaster);
if (conn != null && conn.live()) {
logger.warning("Ignoring master response from " + masterAddress +
", since this node has an active master: " + currentMaster);
return;
}
}
node.setMasterAddress(masterAddress);
node.connectionManager.getOrConnect(masterAddress);
if (!sendJoinRequest(masterAddress, true)) {
logger.warning("Could not create connection to possible master " + masterAddress);
}
}
} finally {
lock.unlock();
}
}
void acceptMasterConfirmation(MemberImpl member) {
if (member != null) {
if (logger.isFinestEnabled()) {
logger.finest("MasterConfirmation has been received from " + member);
}
masterConfirmationTimes.put(member, Clock.currentTimeMillis());
}
}
void prepareToMerge(final Address newTargetAddress) {
preparingToMerge.set(true);
node.getJoiner().setTargetAddress(newTargetAddress);
nodeEngine.getExecutionService().schedule(new Runnable() {
public void run() {
merge(newTargetAddress);
}
}, 10, TimeUnit.SECONDS);
}
void merge(Address newTargetAddress) {
if (preparingToMerge.compareAndSet(true, false)) {
node.getJoiner().setTargetAddress(newTargetAddress);
final LifecycleServiceImpl lifecycleService = node.hazelcastInstance.getLifecycleService();
lifecycleService.runUnderLifecycleLock(new Runnable() {
public void run() {
lifecycleService.fireLifecycleEvent(MERGING);
final NodeEngineImpl nodeEngine = node.nodeEngine;
final Collection<SplitBrainHandlerService> services = nodeEngine.getServices(SplitBrainHandlerService.class);
final Collection<Runnable> tasks = new LinkedList<Runnable>();
for (SplitBrainHandlerService service : services) {
final Runnable runnable = service.prepareMergeRunnable();
if (runnable != null) {
tasks.add(runnable);
}
}
final Collection<ManagedService> managedServices = nodeEngine.getServices(ManagedService.class);
for (ManagedService service : managedServices) {
service.reset();
}
node.onRestart();
node.connectionManager.restart();
node.rejoin();
final Collection<Future> futures = new LinkedList<Future>();
for (Runnable task : tasks) {
Future f = nodeEngine.getExecutionService().submit("hz:system", task);
futures.add(f);
}
long callTimeout = node.groupProperties.OPERATION_CALL_TIMEOUT_MILLIS.getLong();
for (Future f : futures) {
try {
waitOnFutureInterruptible(f, callTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.severe("While merging...", e);
}
}
lifecycleService.fireLifecycleEvent(MERGED);
}
});
}
}
private <V> V waitOnFutureInterruptible(Future<V> future, long timeout, TimeUnit timeUnit)
throws ExecutionException, InterruptedException, TimeoutException {
ValidationUtil.isNotNull(timeUnit, "timeUnit");
long deadline = Clock.currentTimeMillis() + timeUnit.toMillis(timeout);
while (true) {
long localTimeout = Math.min(1000 * 10, deadline);
try {
return future.get(localTimeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException te) {
deadline -= localTimeout;
if (deadline <= 0) {
throw te;
}
if (!node.isActive()) {
future.cancel(true);
throw new HazelcastInstanceNotActiveException();
}
}
}
}
private void joinReset() {
lock.lock();
try {
joinInProgress = false;
setJoins.clear();
timeToStartJoin = Clock.currentTimeMillis() + waitMillisBeforeJoin;
firstJoinRequest = 0;
} finally {
lock.unlock();
}
}
@Override
public void reset() {
lock.lock();
try {
joinInProgress = false;
setJoins.clear();
timeToStartJoin = 0;
setMembersRef(Collections.singletonMap(thisAddress, thisMember));
masterConfirmationTimes.clear();
} finally {
lock.unlock();
}
}
private void startJoin() {
logger.finest("Starting Join.");
lock.lock();
try {
try {
joinInProgress = true;
// pause migrations until join, member-update and post-join operations are completed.
node.getPartitionService().pauseMigration();
final Collection<MemberImpl> members = getMemberList();
final Collection<MemberInfo> memberInfos = createMemberInfos(members, true);
for (MemberInfo memberJoining : setJoins) {
memberInfos.add(memberJoining);
}
final long time = getClusterTime();
// Post join operations must be lock free; means no locks at all;
// no partition locks, no key-based locks, no service level locks!
final Operation[] postJoinOps = nodeEngine.getPostJoinOperations();
final PostJoinOperation postJoinOp = postJoinOps != null && postJoinOps.length > 0
? new PostJoinOperation(postJoinOps) : null;
final int count = members.size() - 1 + setJoins.size();
final List<Future> calls = new ArrayList<Future>(count);
for (MemberInfo member : setJoins) {
calls.add(invokeClusterOperation(new FinalizeJoinOperation(memberInfos, postJoinOp, time), member.getAddress()));
}
for (MemberImpl member : members) {
if (!member.getAddress().equals(thisAddress)) {
calls.add(invokeClusterOperation(new MemberInfoUpdateOperation(memberInfos, time, true), member.getAddress()));
}
}
updateMembers(memberInfos);
for (Future future : calls) {
try {
future.get(10, TimeUnit.SECONDS);
} catch (TimeoutException ignored) {
if (logger.isFinestEnabled()) {
logger.finest("Finalize join call timed-out: " + future);
}
} catch (Exception e) {
logger.warning("While waiting finalize join calls...", e);
}
}
} finally {
node.getPartitionService().resumeMigration();
}
} finally {
lock.unlock();
}
}
private static Collection<MemberInfo> createMemberInfos(Collection<MemberImpl> members, boolean joinOperation) {
final Collection<MemberInfo> memberInfos = new LinkedList<MemberInfo>();
for (MemberImpl member : members) {
if (joinOperation) {
memberInfos.add(new MemberInfo(member));
} else {
memberInfos.add(new MemberInfo(member.getAddress(), member.getUuid(), member.getAttributes()));
}
}
return memberInfos;
}
void updateMembers(Collection<MemberInfo> members) {
lock.lock();
try {
Map<Address, MemberImpl> oldMemberMap = membersMapRef.get();
if (oldMemberMap.size() == members.size()) {
boolean same = true;
for (MemberInfo memberInfo : members) {
MemberImpl member = oldMemberMap.get(memberInfo.getAddress());
if (member == null || !member.getUuid().equals(memberInfo.uuid)) {
same = false;
break;
}
}
if (same) {
logger.finest("No need to process member update...");
return;
}
}
MemberImpl[] newMembers = new MemberImpl[members.size()];
int k = 0;
for (MemberInfo memberInfo : members) {
MemberImpl member = oldMemberMap.get(memberInfo.address);
if (member == null) {
member = createMember(memberInfo.address, memberInfo.uuid, thisAddress.getScopeId(), memberInfo.attributes);
}
newMembers[k++] = member;
member.didRead();
}
setMembers(newMembers);
if (!getMemberList().contains(thisMember)) {
throw new HazelcastException("Member list doesn't contain local member!");
}
joinReset();
heartBeater();
node.setJoined();
logger.info(membersString());
} finally {
lock.unlock();
}
}
public void updateMemberAttribute(String uuid, MemberAttributeOperationType operationType, String key, Object value) {
lock.lock();
try {
Map<Address, MemberImpl> memberMap = membersMapRef.get();
for (MemberImpl member : memberMap.values()) {
if (member.getUuid().equals(uuid)) {
if (!member.equals(getLocalMember())) {
member.updateAttribute(operationType, key, value);
}
sendMemberAttributeEvent(member, operationType, key, value);
break;
}
}
} finally {
lock.unlock();
}
}
public boolean sendJoinRequest(Address toAddress, boolean withCredentials) {
if (toAddress == null) {
toAddress = node.getMasterAddress();
}
JoinRequestOperation joinRequest = new JoinRequestOperation(node.createJoinRequest(withCredentials));
nodeEngine.getOperationService().send(joinRequest, toAddress);
return true;
}
@Override
public void connectionAdded(final Connection connection) {
MemberImpl member = getMember(connection.getEndPoint());
if (member != null) {
member.didRead();
}
}
@Override
public void connectionRemoved(Connection connection) {
if (logger.isFinestEnabled()) {
logger.finest("Connection is removed " + connection.getEndPoint());
}
if (!node.joined()) {
final Address masterAddress = node.getMasterAddress();
if (masterAddress != null && masterAddress.equals(connection.getEndPoint())) {
node.setMasterAddress(null);
}
}
}
private Future invokeClusterOperation(Operation op, Address target) {
return nodeEngine.getOperationService().createInvocationBuilder(SERVICE_NAME, op, target)
.setTryCount(50).invoke();
}
public NodeEngineImpl getNodeEngine() {
return nodeEngine;
}
private void setMembers(MemberImpl... members) {
if (members == null || members.length == 0) return;
if (logger.isFinestEnabled()) {
logger.finest("Updating members -> " + Arrays.toString(members));
}
lock.lock();
try {
Map<Address, MemberImpl> oldMemberMap = membersMapRef.get();
final Map<Address, MemberImpl> memberMap = new LinkedHashMap<Address, MemberImpl>(); // ! ORDERED !
final Collection<MemberImpl> newMembers = new LinkedList<MemberImpl>();
for (MemberImpl member : members) {
MemberImpl currentMember = oldMemberMap.get(member.getAddress());
if (currentMember == null) {
newMembers.add(member);
masterConfirmationTimes.put(member, Clock.currentTimeMillis());
}
memberMap.put(member.getAddress(), member);
}
setMembersRef(memberMap);
if (!newMembers.isEmpty()) {
Set<Member> eventMembers = new LinkedHashSet<Member>(oldMemberMap.values());
if (newMembers.size() == 1) {
MemberImpl newMember = newMembers.iterator().next();
node.getPartitionService().memberAdded(newMember); // sync call
eventMembers.add(newMember);
sendMembershipEventNotifications(newMember, unmodifiableSet(eventMembers), true); // async events
} else {
for (MemberImpl newMember : newMembers) {
node.getPartitionService().memberAdded(newMember); // sync call
eventMembers.add(newMember);
sendMembershipEventNotifications(newMember, unmodifiableSet(new LinkedHashSet<Member>(eventMembers)), true); // async events
}
}
}
} finally {
lock.unlock();
}
}
private void removeMember(MemberImpl deadMember) {
logger.info("Removing " + deadMember);
lock.lock();
try {
final Map<Address, MemberImpl> members = membersMapRef.get();
if (members.containsKey(deadMember.getAddress())) {
Map<Address, MemberImpl> newMembers = new LinkedHashMap<Address, MemberImpl>(members); // ! ORDERED !
newMembers.remove(deadMember.getAddress());
masterConfirmationTimes.remove(deadMember);
setMembersRef(newMembers);
node.getPartitionService().memberRemoved(deadMember); // sync call
nodeEngine.onMemberLeft(deadMember); // sync call
sendMembershipEventNotifications(deadMember, unmodifiableSet(new LinkedHashSet<Member>(newMembers.values())), false); // async events
if (node.isMaster()) {
if (logger.isFinestEnabled()) {
logger.finest(deadMember + " is dead. Sending remove to all other members.");
}
invokeMemberRemoveOperation(deadMember.getAddress());
}
}
} finally {
lock.unlock();
}
}
private void invokeMemberRemoveOperation(final Address deadAddress) {
for (MemberImpl member : getMemberList()) {
Address address = member.getAddress();
if (!thisAddress.equals(address) && !address.equals(deadAddress)) {
nodeEngine.getOperationService().send(new MemberRemoveOperation(deadAddress), address);
}
}
}
public void sendShutdownMessage() {
invokeMemberRemoveOperation(thisAddress);
}
private void sendMembershipEventNotifications(final MemberImpl member, Set<Member> members, final boolean added) {
final int eventType = added ? MembershipEvent.MEMBER_ADDED : MembershipEvent.MEMBER_REMOVED;
final MembershipEvent membershipEvent = new MembershipEvent(getClusterProxy(), member, eventType, members);
final Collection<MembershipAwareService> membershipAwareServices = nodeEngine.getServices(MembershipAwareService.class);
if (membershipAwareServices != null && !membershipAwareServices.isEmpty()) {
final MembershipServiceEvent event = new MembershipServiceEvent(membershipEvent);
for (final MembershipAwareService service : membershipAwareServices) {
// service events should not block each other
nodeEngine.getExecutionService().execute(ExecutionService.SYSTEM_EXECUTOR, new Runnable() {
public void run() {
if (added) {
service.memberAdded(event);
} else {
service.memberRemoved(event);
}
}
});
}
}
final EventService eventService = nodeEngine.getEventService();
Collection<EventRegistration> registrations = eventService.getRegistrations(SERVICE_NAME, SERVICE_NAME);
for (EventRegistration reg : registrations) {
eventService.publishEvent(SERVICE_NAME, reg, membershipEvent, reg.getId().hashCode());
}
}
private void sendMemberAttributeEvent(MemberImpl member, MemberAttributeOperationType operationType, String key, Object value) {
final MemberAttributeEvent memberAttributeEvent = new MemberAttributeEvent(getClusterProxy(), member, operationType, key, value);
final Collection<MembershipAwareService> membershipAwareServices = nodeEngine.getServices(MembershipAwareService.class);
final MemberAttributeServiceEvent event = new MemberAttributeServiceEvent(getClusterProxy(), member, operationType, key, value);
if (membershipAwareServices != null && !membershipAwareServices.isEmpty()) {
for (final MembershipAwareService service : membershipAwareServices) {
// service events should not block each other
nodeEngine.getExecutionService().execute(ExecutionService.SYSTEM_EXECUTOR, new Runnable() {
public void run() {
service.memberAttributeChanged(event);
}
});
}
}
final EventService eventService = nodeEngine.getEventService();
Collection<EventRegistration> registrations = eventService.getRegistrations(SERVICE_NAME, SERVICE_NAME);
for (EventRegistration reg : registrations) {
eventService.publishEvent(SERVICE_NAME, reg, memberAttributeEvent, reg.getId().hashCode());
}
}
protected MemberImpl createMember(Address address, String nodeUuid, String ipV6ScopeId, Map<String, Object> attributes) {
address.setScopeId(ipV6ScopeId);
return new MemberImpl(address, thisAddress.equals(address), nodeUuid,
(HazelcastInstanceImpl) nodeEngine.getHazelcastInstance(), attributes);
}
@Override
public MemberImpl getMember(Address address) {
if (address == null) {
return null;
}
Map<Address, MemberImpl> memberMap = membersMapRef.get();
return memberMap.get(address);
}
@Override
public MemberImpl getMember(String uuid) {
if (uuid == null) {
return null;
}
Map<Address, MemberImpl> memberMap = membersMapRef.get();
for (MemberImpl member : memberMap.values()) {
if (uuid.equals(member.getUuid())) {
return member;
}
}
return null;
}
private void setMembersRef(Map<Address, MemberImpl> memberMap) {
memberMap = unmodifiableMap(memberMap);
// make values(), keySet() and entrySet() to be cached
memberMap.values();
memberMap.keySet();
memberMap.entrySet();
membersMapRef.set(memberMap);
membersRef.set(unmodifiableSet(new LinkedHashSet<MemberImpl>(memberMap.values())));
}
@Override
public Collection<MemberImpl> getMemberList() {
return membersRef.get();
}
@Override
public Set<Member> getMembers() {
return (Set) membersRef.get();
}
@Override
public void shutdown(boolean terminate) {
reset();
}
@Override
public Address getMasterAddress() {
return node.getMasterAddress();
}
@Override
public boolean isMaster() {
return node.isMaster();
}
@Override
public Address getThisAddress() {
return thisAddress;
}
public Member getLocalMember() {
return node.getLocalMember();
}
@Override
public int getSize() {
final Collection<MemberImpl> members = getMemberList();
return members != null ? members.size() : 0;
}
@Override
public long getClusterTime() {
return Clock.currentTimeMillis() + ((clusterTimeDiff == Long.MAX_VALUE) ? 0 : clusterTimeDiff);
}
public void setMasterTime(long masterTime) {
long diff = masterTime - Clock.currentTimeMillis();
if (Math.abs(diff) < Math.abs(clusterTimeDiff)) {
this.clusterTimeDiff = diff;
}
}
//todo: remove since unused?
public long getClusterTimeFor(long localTime) {
return localTime + ((clusterTimeDiff == Long.MAX_VALUE) ? 0 : clusterTimeDiff);
}
public String addMembershipListener(MembershipListener listener) {
if (listener instanceof InitialMembershipListener) {
lock.lock();
try {
((InitialMembershipListener) listener).init(new InitialMembershipEvent(getClusterProxy(), getMembers()));
final EventRegistration registration = nodeEngine.getEventService().registerLocalListener(SERVICE_NAME, SERVICE_NAME, listener);
return registration.getId();
} finally {
lock.unlock();
}
} else {
final EventRegistration registration = nodeEngine.getEventService().registerLocalListener(SERVICE_NAME, SERVICE_NAME, listener);
return registration.getId();
}
}
public boolean removeMembershipListener(final String registrationId) {
return nodeEngine.getEventService().deregisterListener(SERVICE_NAME, SERVICE_NAME, registrationId);
}
@edu.umd.cs.findbugs.annotations.SuppressWarnings("BC_UNCONFIRMED_CAST")
@Override
public void dispatchEvent(MembershipEvent event, MembershipListener listener) {
switch (event.getEventType()) {
case MembershipEvent.MEMBER_ADDED:
listener.memberAdded(event);
break;
case MembershipEvent.MEMBER_REMOVED:
listener.memberRemoved(event);
break;
case MembershipEvent.MEMBER_ATTRIBUTE_CHANGED:
MemberAttributeEvent memberAttributeEvent = (MemberAttributeEvent) event;
listener.memberAttributeChanged(memberAttributeEvent);
break;
default:
throw new IllegalArgumentException("Unhandled event:" + event);
}
}
public Cluster getClusterProxy() {
return new ClusterProxy(this);
}
public String membersString() {
StringBuilder sb = new StringBuilder("\n\nMembers [");
final Collection<MemberImpl> members = getMemberList();
sb.append(members != null ? members.size() : 0);
sb.append("] {");
if (members != null) {
for (Member member : members) {
sb.append("\n\t").append(member);
}
}
sb.append("\n}\n");
return sb.toString();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("ClusterService");
sb.append("{address=").append(thisAddress);
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_cluster_ClusterServiceImpl.java |
981 | public class SignalBackupOperation extends BaseSignalOperation implements BackupOperation {
public SignalBackupOperation() {
}
public SignalBackupOperation(ObjectNamespace namespace, Data key, long threadId, String conditionId, boolean all) {
super(namespace, key, threadId, conditionId, all);
}
@Override
public int getId() {
return LockDataSerializerHook.SIGNAL_BACKUP;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_lock_operations_SignalBackupOperation.java |
1,123 | @SuppressWarnings("deprecation")
public class LegacyOrderTest extends LegacyOrderBaseTest {
private Long orderId = null;
private int numOrderItems = 0;
private Long fulfillmentGroupId;
private Long bundleOrderItemId;
@Resource(name = "blOrderItemService")
private OrderItemService orderItemService;
@Resource
private SkuDao skuDao;
@Resource
private ShippingRateService shippingRateService;
@Test(groups = { "createCartForCustomerLegacy" }, dependsOnGroups = { "readCustomer", "createPhone" })
@Transactional
@Rollback(false)
public void createCartForCustomer() {
String userName = "customer1";
Customer customer = customerService.readCustomerByUsername(userName);
Order order = cartService.createNewCartForCustomer(customer);
assert order != null;
assert order.getId() != null;
this.orderId = order.getId();
}
@Test(groups = { "findCurrentCartForCustomerLegacy" }, dependsOnGroups = { "readCustomer", "createPhone", "createCartForCustomerLegacy" })
@Transactional
@Rollback(false)
public void findCurrentCartForCustomer() {
String userName = "customer1";
Customer customer = customerService.readCustomerByUsername(userName);
Order order = cartService.findCartForCustomer(customer);
assert order != null;
assert order.getId() != null;
this.orderId = order.getId();
}
@Test(groups = { "addItemToOrderLegacy" }, dependsOnGroups = { "findCurrentCartForCustomerLegacy", "createSku", "testCatalog" })
@Rollback(false)
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void addItemToOrder() throws PricingException {
numOrderItems++;
Sku sku = skuDao.readFirstSku();
Order order = cartService.findOrderById(orderId);
assert order != null;
assert sku.getId() != null;
DiscreteOrderItemRequest itemRequest = new DiscreteOrderItemRequest();
itemRequest.setQuantity(1);
itemRequest.setSku(sku);
DiscreteOrderItem item = (DiscreteOrderItem) cartService.addDiscreteItemToOrder(order, itemRequest);
assert item != null;
assert item.getQuantity() == numOrderItems;
assert item.getSku() != null;
assert item.getSku().equals(sku);
}
@Test(groups = { "addAnotherItemToOrderLegacy" }, dependsOnGroups = { "addItemToOrderLegacy" })
@Rollback(false)
@Transactional
public void addAnotherItemToOrder() throws PricingException {
numOrderItems++;
Sku sku = skuDao.readFirstSku();
Order order = cartService.findOrderById(orderId);
assert order != null;
assert sku.getId() != null;
cartService.setAutomaticallyMergeLikeItems(true);
DiscreteOrderItemRequest itemRequest = new DiscreteOrderItemRequest();
itemRequest.setQuantity(1);
itemRequest.setSku(sku);
DiscreteOrderItem item = (DiscreteOrderItem) cartService.addDiscreteItemToOrder(order, itemRequest, true);
assert item.getSku() != null;
assert item.getSku().equals(sku);
assert item.getQuantity() == 2; // item-was merged with prior item.
order = cartService.findOrderById(orderId);
assert(order.getOrderItems().size()==1);
assert(order.getOrderItems().get(0).getQuantity()==2);
/*
This test is currently not supported, as the order service only supports like item merging
// re-price the order without automatically merging.
cartService.setAutomaticallyMergeLikeItems(false);
DiscreteOrderItemRequest itemRequest2 = new DiscreteOrderItemRequest();
itemRequest2.setQuantity(1);
itemRequest2.setSku(sku);
DiscreteOrderItem item2 = (DiscreteOrderItem) cartService.addDiscreteItemToOrder(order, itemRequest2, true);
assert item2.getSku() != null;
assert item2.getSku().equals(sku);
assert item2.getQuantity() == 1; // item-was not auto-merged with prior items.
order = cartService.findOrderById(orderId);
assert(order.getOrderItems().size()==2);
assert(order.getOrderItems().get(0).getQuantity()==2);
assert(order.getOrderItems().get(1).getQuantity()==1);
*/
}
@Test(groups = { "addBundleToOrderLegacy" }, dependsOnGroups = { "addAnotherItemToOrderLegacy" })
@Rollback(false)
@Transactional
public void addBundleToOrder() throws PricingException {
//numOrderItems++;
Sku sku = skuDao.readFirstSku();
Order order = cartService.findOrderById(orderId);
assert order != null;
assert sku.getId() != null;
BundleOrderItemRequest bundleRequest = new BundleOrderItemRequest();
bundleRequest.setQuantity(1);
bundleRequest.setName("myBundle");
DiscreteOrderItemRequest itemRequest = new DiscreteOrderItemRequest();
itemRequest.setQuantity(1);
itemRequest.setSku(sku);
bundleRequest.getDiscreteOrderItems().add(itemRequest);
BundleOrderItem item = (BundleOrderItem) cartService.addBundleItemToOrder(order, bundleRequest);
bundleOrderItemId = item.getId();
assert item != null;
assert item.getQuantity() == 1;
}
@Test(groups = { "removeBundleFromOrderLegacy" }, dependsOnGroups = { "addBundleToOrderLegacy" })
@Rollback(false)
@Transactional
public void removeBundleFromOrder() throws PricingException {
Order order = cartService.findOrderById(orderId);
List<OrderItem> orderItems = order.getOrderItems();
assert orderItems.size() == numOrderItems;
int startingSize = orderItems.size();
BundleOrderItem bundleOrderItem = (BundleOrderItem) orderItemService.readOrderItemById(bundleOrderItemId);
assert bundleOrderItem != null;
assert bundleOrderItem.getDiscreteOrderItems() != null;
assert bundleOrderItem.getDiscreteOrderItems().size() == 1;
cartService.removeItemFromOrder(order, bundleOrderItem);
order = cartService.findOrderById(orderId);
List<OrderItem> items = order.getOrderItems();
assert items != null;
assert items.size() == startingSize - 1;
}
@Test(groups = { "getItemsForOrderLegacy" }, dependsOnGroups = { "removeBundleFromOrderLegacy" })
@Transactional
public void getItemsForOrder() {
Order order = cartService.findOrderById(orderId);
List<OrderItem> orderItems = order.getOrderItems();
assert orderItems != null;
assert orderItems.size() == numOrderItems - 1;
}
@Test(groups = { "updateItemsInOrderLegacy" }, dependsOnGroups = { "getItemsForOrderLegacy" })
@Transactional
public void updateItemsInOrder() throws ItemNotFoundException, PricingException {
Order order = cartService.findOrderById(orderId);
List<OrderItem> orderItems = order.getOrderItems();
assert orderItems.size() > 0;
OrderItem item = orderItems.get(0);
//item.setSalePrice(new Money(BigDecimal.valueOf(10000)));
((DiscreteOrderItem) item).getSku().setSalePrice(new Money(BigDecimal.valueOf(10000)));
((DiscreteOrderItem) item).getSku().setRetailPrice(new Money(BigDecimal.valueOf(10000)));
item.setQuantity(10);
cartService.updateItemQuantity(order, item);
OrderItem updatedItem = orderItemService.readOrderItemById(item.getId());
assert updatedItem != null;
assert updatedItem.getPrice().equals(new Money(BigDecimal.valueOf(10000)));
assert updatedItem.getQuantity() == 10;
List<OrderItem> updateItems = new ArrayList<OrderItem> (order.getOrderItems());
updateItems.get(0).setQuantity(15);
cartService.updateItemQuantity(order, updatedItem);
order = cartService.findOrderById(orderId);
assert order.getOrderItems().get(0).getQuantity() == 15;
}
@Test(groups = { "removeItemFromOrderLegacy" }, dependsOnGroups = { "getItemsForOrderLegacy" })
@Transactional
public void removeItemFromOrder() throws PricingException {
Order order = cartService.findOrderById(orderId);
List<OrderItem> orderItems = order.getOrderItems();
assert orderItems.size() > 0;
int startingSize = orderItems.size();
OrderItem item = orderItems.get(0);
assert item != null;
cartService.removeItemFromOrder(order, item);
order = cartService.findOrderById(orderId);
List<OrderItem> items = order.getOrderItems();
assert items != null;
assert items.size() == startingSize - 1;
}
@Test(groups = { "checkOrderItemsLegacy" }, dependsOnGroups = { "removeItemFromOrderLegacy" })
@Transactional
public void checkOrderItems() throws PricingException {
Order order = cartService.findOrderById(orderId);
//the removal from the previous test was rolled back
assert order.getOrderItems().size() == 1;
BundleOrderItem bundleOrderItem = (BundleOrderItem) orderItemService.readOrderItemById(bundleOrderItemId);
assert bundleOrderItem == null;
}
@Test(groups = { "addPaymentToOrderLegacy" }, dataProvider = "basicPaymentInfo", dataProviderClass = PaymentInfoDataProvider.class, dependsOnGroups = { "checkOrderItemsLegacy" })
@Rollback(false)
@Transactional
public void addPaymentToOrder(PaymentInfo paymentInfo) {
Order order = cartService.findOrderById(orderId);
cartService.addPaymentToOrder(order, paymentInfo, null);
order = cartService.findOrderById(orderId);
PaymentInfo payment = order.getPaymentInfos().get(order.getPaymentInfos().indexOf(paymentInfo));
assert payment != null;
//assert payment.getId() != null;
assert payment.getOrder() != null;
assert payment.getOrder().equals(order);
}
@Test(groups = "addFulfillmentGroupToOrderFirstLegacy", dataProvider = "basicFulfillmentGroupLegacy", dataProviderClass = FulfillmentGroupDataProvider.class, dependsOnGroups = { "addPaymentToOrderLegacy" })
@Rollback(false)
@Transactional
public void addFulfillmentGroupToOrderFirst(FulfillmentGroup fulfillmentGroup) throws PricingException {
String userName = "customer1";
Customer customer = customerService.readCustomerByUsername(userName);
Address address = customerAddressService.readActiveCustomerAddressesByCustomerId(customer.getId()).get(0).getAddress();
Order order = cartService.findOrderById(orderId);
fulfillmentGroup.setOrder(order);
fulfillmentGroup.setAddress(address);
FulfillmentGroup fg = cartService.addFulfillmentGroupToOrder(order, fulfillmentGroup);
assert fg != null;
assert fg.getId() != null;
assert fg.getAddress().equals(fulfillmentGroup.getAddress());
assert fg.getOrder().equals(order);
assert fg.getMethod().equals(fulfillmentGroup.getMethod());
assert fg.getReferenceNumber().equals(fulfillmentGroup.getReferenceNumber());
this.fulfillmentGroupId = fg.getId();
}
@Test(groups = { "removeFulfillmentGroupFromOrderLegacy" }, dependsOnGroups = { "addFulfillmentGroupToOrderFirstLegacy" })
@Transactional
public void removeFulfillmentGroupFromOrder() throws PricingException {
Order order = cartService.findOrderById(orderId);
List<FulfillmentGroup> fgItems = order.getFulfillmentGroups();
assert fgItems.size() > 0;
int startingSize = fgItems.size();
FulfillmentGroup item = fgItems.get(0);
assert item != null;
cartService.removeFulfillmentGroupFromOrder(order, item);
order = cartService.findOrderById(orderId);
List<FulfillmentGroup> items = order.getFulfillmentGroups();
assert items != null;
assert items.size() == startingSize - 1;
}
@Test(groups = { "findFulFillmentGroupForOrderFirstLegacy" }, dependsOnGroups = { "addFulfillmentGroupToOrderFirstLegacy" })
@Transactional
public void findFillmentGroupForOrderFirst() {
Order order = cartService.findOrderById(orderId);
FulfillmentGroup fg = order.getFulfillmentGroups().get(0);
assert fg != null;
assert fg.getId() != null;
FulfillmentGroup fulfillmentGroup = em.find(FulfillmentGroupImpl.class, fulfillmentGroupId);
assert fg.getAddress().getId().equals(fulfillmentGroup.getAddress().getId());
assert fg.getOrder().equals(order);
assert fg.getMethod().equals(fulfillmentGroup.getMethod());
assert fg.getReferenceNumber().equals(fulfillmentGroup.getReferenceNumber());
}
@Test(groups= {"addItemToFulfillmentGroupSecondLegacy"}, dependsOnGroups = { "addFulfillmentGroupToOrderFirstLegacy" })
@Transactional
public void addItemToFulfillmentgroupSecond() {
String userName = "customer1";
Customer customer = customerService.readCustomerByUsername(userName);
Address address = customerAddressService.readActiveCustomerAddressesByCustomerId(customer.getId()).get(0).getAddress();
Order order = cartService.findOrderById(orderId);
List<OrderItem> orderItems = order.getOrderItems();
assert(orderItems.size() > 0);
FulfillmentGroup newFg = new FulfillmentGroupImpl();
newFg.setAddress(address);
newFg.setMethod("standard");
newFg.setService(ShippingServiceType.BANDED_SHIPPING.getType());
try {
newFg = cartService.addItemToFulfillmentGroup(orderItems.get(0), newFg, 1);
} catch (PricingException e) {
throw new RuntimeException(e);
}
order = cartService.findOrderById(orderId);
FulfillmentGroup newNewFg = order.getFulfillmentGroups().get(order.getFulfillmentGroups().indexOf(newFg));
assert(newNewFg.getFulfillmentGroupItems().size() == 1);
assert(newNewFg.getFulfillmentGroupItems().get(0).getOrderItem().equals(orderItems.get(0)));
}
/*
* @Test(groups = { "removeFulFillmentGroupForOrderFirst" }, dependsOnGroups
* = { "findCurrentCartForCustomer",
* "addFulfillmentGroupToOrderFirst" }) public void
* removeFulFillmentGroupForOrderFirst() { int beforeRemove =
* cartService.findFulfillmentGroupsForOrder(order).size();
* FulfillmentGroup fulfillmentGroup = entityManager.find(FulfillmentGroupImpl.class,
* fulfillmentGroupId); cartService.removeFulfillmentGroupFromOrder(order,
* fulfillmentGroup); int afterRemove =
* cartService.findFulfillmentGroupsForOrder(order).size(); assert
* (beforeRemove - afterRemove) == 1; }
*/
@Test(groups = { "findDefaultFulFillmentGroupForOrderLegacy" }, dependsOnGroups = { "findCurrentCartForCustomerLegacy", "addFulfillmentGroupToOrderFirstLegacy" })
@Transactional
public void findDefaultFillmentGroupForOrder() {
Order order = cartService.findOrderById(orderId);
FulfillmentGroup fg = cartService.findDefaultFulfillmentGroupForOrder(order);
assert fg != null;
assert fg.getId() != null;
FulfillmentGroup fulfillmentGroup = em.find(FulfillmentGroupImpl.class, fulfillmentGroupId);
assert fg.getAddress().getId().equals(fulfillmentGroup.getAddress().getId());
assert fg.getOrder().equals(order);
assert fg.getMethod().equals(fulfillmentGroup.getMethod());
assert fg.getReferenceNumber().equals(fulfillmentGroup.getReferenceNumber());
}
/*
* @Test(groups = { "removeDefaultFulFillmentGroupForOrder" },
* dependsOnGroups = { "findCurrentCartForCustomer",
* "addFulfillmentGroupToOrderFirst" }) public void
* removeDefaultFulFillmentGroupForOrder() { int beforeRemove =
* cartService.findFulfillmentGroupsForOrder(order).size();
* cartService.removeFulfillmentGroupFromOrder(order, fulfillmentGroup);
* int afterRemove =
* cartService.findFulfillmentGroupsForOrder(order).size(); assert
* (beforeRemove - afterRemove) == 1; }
*/
@Test(groups = { "removeItemFromOrderAfterDefaultFulfillmentGroupLegacy" }, dependsOnGroups = { "addFulfillmentGroupToOrderFirstLegacy" })
@Transactional
public void removeItemFromOrderAfterFulfillmentGroups() {
Order order = cartService.findOrderById(orderId);
List<OrderItem> orderItems = order.getOrderItems();
assert orderItems.size() > 0;
OrderItem item = orderItems.get(0);
assert item != null;
try {
cartService.removeItemFromOrder(order, item);
} catch (PricingException e) {
throw new RuntimeException(e);
}
FulfillmentGroup fg = cartService.findDefaultFulfillmentGroupForOrder(order);
for (FulfillmentGroupItem fulfillmentGroupItem : fg.getFulfillmentGroupItems()) {
assert !fulfillmentGroupItem.getOrderItem().equals(item);
}
}
@Test(groups = { "getOrdersForCustomerLegacy" }, dependsOnGroups = { "readCustomer", "findCurrentCartForCustomerLegacy" })
@Transactional
public void getOrdersForCustomer() {
String username = "customer1";
Customer customer = customerService.readCustomerByUsername(username);
List<Order> orders = cartService.findOrdersForCustomer(customer);
assert orders != null;
assert orders.size() > 0;
}
@Test(groups = { "findCartForAnonymousCustomerLegacy" }, dependsOnGroups = { "getOrdersForCustomerLegacy" })
public void findCartForAnonymousCustomer() {
Customer customer = customerService.createCustomerFromId(null);
Order order = cartService.findCartForCustomer(customer);
assert order == null;
order = cartService.createNewCartForCustomer(customer);
Long orderId = order.getId();
Order newOrder = cartService.findOrderById(orderId);
assert newOrder != null;
assert newOrder.getCustomer() != null;
}
@Test(groups = { "findOrderByOrderNumberLegacy" }, dependsOnGroups = { "findCartForAnonymousCustomerLegacy" })
@Transactional
public void findOrderByOrderNumber() throws PricingException {
Customer customer = customerService.createCustomerFromId(null);
Order order = cartService.createNewCartForCustomer(customer);
order.setOrderNumber("3456");
order = cartService.save(order, false);
Long orderId = order.getId();
Order newOrder = cartService.findOrderByOrderNumber("3456");
assert newOrder.getId().equals(orderId);
Order nullOrder = cartService.findOrderByOrderNumber(null);
assert nullOrder == null;
nullOrder = cartService.findOrderByOrderNumber("");
assert nullOrder == null;
}
@Test(groups = { "findNamedOrderForCustomerLegacy" }, dependsOnGroups = { "findOrderByOrderNumberLegacy" })
@Transactional
public void findNamedOrderForCustomer() throws PricingException {
Customer customer = customerService.createCustomerFromId(null);
Order order = cartService.createNewCartForCustomer(customer);
order.setStatus(OrderStatus.NAMED);
order.setName("COOL ORDER");
order = cartService.save(order, false);
Long orderId = order.getId();
Order newOrder = cartService.findNamedOrderForCustomer("COOL ORDER", customer);
assert newOrder.getId().equals(orderId);
}
@Test(groups = { "testReadOrdersForCustomerLegacy" }, dependsOnGroups = { "findNamedOrderForCustomerLegacy" })
@Transactional
public void testReadOrdersForCustomer() throws PricingException {
Customer customer = customerService.createCustomerFromId(null);
Order order = cartService.createNewCartForCustomer(customer);
order.setStatus(OrderStatus.IN_PROCESS);
order = cartService.save(order, false);
List<Order> newOrders = cartService.findOrdersForCustomer(customer, OrderStatus.IN_PROCESS);
boolean containsOrder = false;
if (newOrders.contains(order))
{
containsOrder = true;
}
assert containsOrder == true;
containsOrder = false;
newOrders = cartService.findOrdersForCustomer(customer, null);
if (newOrders.contains(order))
{
containsOrder = true;
}
assert containsOrder == true;
}
@Test(groups = { "testOrderPropertiesLegacy" }, dependsOnGroups = { "testReadOrdersForCustomerLegacy" })
public void testOrderProperties() throws PricingException {
Customer customer = customerService.createCustomerFromId(null);
Order order = cartService.createNewCartForCustomer(customer);
assert order.getSubTotal() == null;
assert order.getTotal() == null;
assert order.getRemainingTotal() == null;
Calendar testCalendar = Calendar.getInstance();
order.setSubmitDate(testCalendar.getTime());
assert order.getSubmitDate().equals(testCalendar.getTime());
}
@Test(groups = { "testNamedOrderForCustomerLegacy" }, dependsOnGroups = { "testOrderPropertiesLegacy" })
public void testNamedOrderForCustomer() throws PricingException {
Customer customer = customerService.createCustomerFromId(null);
customer = customerService.saveCustomer(customer);
Order order = cartService.createNamedOrderForCustomer("Birthday Order", customer);
Long orderId = order.getId();
assert order != null;
assert order.getName().equals("Birthday Order");
assert order.getCustomer().equals(customer);
cartService.removeNamedOrderForCustomer("Birthday Order", customer);
assert cartService.findOrderById(orderId) == null;
}
@Test(groups = { "testAddSkuToOrderLegacy" })
@Transactional
public void testAddSkuToOrder() throws PricingException {
Customer customer = customerService.saveCustomer(customerService.createCustomerFromId(null));
Category category = new CategoryImpl();
category.setName("Pants");
category = catalogService.saveCategory(category);
Calendar activeStartCal = Calendar.getInstance();
activeStartCal.add(Calendar.DAY_OF_YEAR, -2);
Sku newDefaultSku = new SkuImpl();
newDefaultSku.setName("Leather Pants");
newDefaultSku.setRetailPrice(new Money(44.99));
newDefaultSku.setActiveStartDate(activeStartCal.getTime());
newDefaultSku.setDiscountable(true);
newDefaultSku = catalogService.saveSku(newDefaultSku);
Product newProduct = new ProductImpl();
newProduct.setDefaultCategory(category);
newProduct.setDefaultSku(newDefaultSku);
newProduct = catalogService.saveProduct(newProduct);
Order order = cartService.createNamedOrderForCustomer("Pants Order", customer);
OrderItem orderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
newProduct.getId(), category.getId(), 2);
OrderItem quantityNullOrderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
newProduct.getId(), category.getId(), null);
OrderItem skuNullOrderItem = cartService.addSkuToOrder(order.getId(), null,
null, category.getId(), 2);
OrderItem orderNullOrderItem = cartService.addSkuToOrder(null, newDefaultSku.getId(),
newProduct.getId(), category.getId(), 2);
OrderItem productNullOrderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
null, category.getId(), 2);
OrderItem categoryNullOrderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
newProduct.getId(), null, 2);
assert orderItem != null;
assert skuNullOrderItem == null;
assert quantityNullOrderItem == null;
assert orderNullOrderItem == null;
assert productNullOrderItem != null;
assert categoryNullOrderItem != null;
}
@Test(groups = { "testOrderPaymentInfosLegacy" }, dataProvider = "basicPaymentInfo", dataProviderClass = PaymentInfoDataProvider.class)
@Transactional
public void testOrderPaymentInfos(PaymentInfo info) throws PricingException {
Customer customer = customerService.saveCustomer(createNamedCustomer());
Order order = cartService.createNewCartForCustomer(customer);
cartService.addPaymentToOrder(order, info);
boolean foundInfo = false;
assert order.getPaymentInfos() != null;
for (PaymentInfo testInfo : order.getPaymentInfos())
{
if (testInfo.equals(info))
{
foundInfo = true;
}
}
assert foundInfo == true;
assert cartService.readPaymentInfosForOrder(order) != null;
//cartService.removeAllPaymentsFromOrder(order);
//assert order.getPaymentInfos().size() == 0;
}
@Test(groups = { "testSubmitOrderLegacy" }, dependsOnGroups = { "findNamedOrderForCustomerLegacy" })
public void testSubmitOrder() throws PricingException {
Customer customer = customerService.createCustomerFromId(null);
Order order = cartService.createNewCartForCustomer(customer);
order.setStatus(OrderStatus.IN_PROCESS);
order = cartService.save(order, false);
Long orderId = order.getId();
Order confirmedOrder = cartService.confirmOrder(order);
confirmedOrder = cartService.findOrderById(confirmedOrder.getId());
Long confirmedOrderId = confirmedOrder.getId();
assert orderId.equals(confirmedOrderId);
assert confirmedOrder.getStatus().equals(OrderStatus.SUBMITTED);
}
@Test
public void findCartForNullCustomerId() {
assert cartService.findCartForCustomer(new CustomerImpl()) == null;
}
@Test(groups = { "testCartAndNamedOrderLegacy" })
@Transactional
public void testCreateNamedOrder() throws PricingException {
Customer customer = customerService.saveCustomer(customerService.createCustomerFromId(null));
Calendar activeStartCal = Calendar.getInstance();
activeStartCal.add(Calendar.DAY_OF_YEAR, -2);
Category category = new CategoryImpl();
category.setName("Pants");
category.setActiveStartDate(activeStartCal.getTime());
category = catalogService.saveCategory(category);
Sku newDefaultSku = new SkuImpl();
newDefaultSku.setName("Leather Pants");
newDefaultSku.setRetailPrice(new Money(44.99));
newDefaultSku.setActiveStartDate(activeStartCal.getTime());
newDefaultSku.setDiscountable(true);
newDefaultSku = catalogService.saveSku(newDefaultSku);
Product newProduct = new ProductImpl();
newProduct.setDefaultCategory(category);
newProduct.setDefaultSku(newDefaultSku);
newProduct = catalogService.saveProduct(newProduct);
Order order = cartService.createNamedOrderForCustomer("Pants Order", customer);
OrderItem orderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
newProduct.getId(), category.getId(), 2);
OrderItem quantityNullOrderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
newProduct.getId(), category.getId(), null);
OrderItem skuNullOrderItem = cartService.addSkuToOrder(order.getId(), null,
null, category.getId(), 2);
OrderItem orderNullOrderItem = cartService.addSkuToOrder(null, newDefaultSku.getId(),
newProduct.getId(), category.getId(), 2);
OrderItem productNullOrderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
null, category.getId(), 2);
OrderItem categoryNullOrderItem = cartService.addSkuToOrder(order.getId(), newDefaultSku.getId(),
newProduct.getId(), null, 2);
assert orderItem != null;
assert skuNullOrderItem == null;
assert quantityNullOrderItem == null;
assert orderNullOrderItem == null;
assert productNullOrderItem != null;
assert categoryNullOrderItem != null;
}
@Test(groups = { "testOrderFulfillmentGroupsLegacy" }, dataProvider = "basicShippingRates", dataProviderClass = ShippingRateDataProvider.class)
@Transactional
public void testAddFulfillmentGroupToOrder(ShippingRate shippingRate, ShippingRate sr2) throws PricingException, ItemNotFoundException{
shippingRate = shippingRateService.save(shippingRate);
sr2 = shippingRateService.save(sr2);
Customer customer = createCustomerWithAddresses();
Order order = initializeExistingCart(customer);
CustomerAddress customerAddress = customerAddressService.readActiveCustomerAddressesByCustomerId(customer.getId()).get(0);
FulfillmentGroupRequest fgRequest = new FulfillmentGroupRequest();
List<FulfillmentGroupItemRequest> fgiRequests = new ArrayList<FulfillmentGroupItemRequest>();
for (OrderItem orderItem : order.getOrderItems()) {
FulfillmentGroupItemRequest fgiRequest = new FulfillmentGroupItemRequest();
fgiRequest.setOrderItem(orderItem);
fgiRequest.setQuantity(1);
fgiRequests.add(fgiRequest);
}
fgRequest.setAddress(customerAddress.getAddress());
fgRequest.setFulfillmentGroupItemRequests(fgiRequests);
fgRequest.setOrder(cartService.findCartForCustomer(customer));
fgRequest.setMethod("standard");
fgRequest.setService(ShippingServiceType.BANDED_SHIPPING.getType());
cartService.addFulfillmentGroupToOrder(fgRequest);
Order resultOrder = cartService.findOrderById(order.getId());
assert resultOrder.getFulfillmentGroups().size() == 1;
assert resultOrder.getFulfillmentGroups().get(0).getFulfillmentGroupItems().size() == 2;
cartService.removeAllFulfillmentGroupsFromOrder(order, false);
resultOrder = cartService.findOrderById(order.getId());
assert resultOrder.getFulfillmentGroups().size() == 0;
FulfillmentGroup defaultFg = cartService.createDefaultFulfillmentGroup(order, customerAddress.getAddress());
defaultFg.setMethod("standard");
defaultFg.setService(ShippingServiceType.BANDED_SHIPPING.getType());
assert defaultFg.isPrimary();
cartService.addFulfillmentGroupToOrder(order, defaultFg);
resultOrder = cartService.findOrderById(order.getId());
assert resultOrder.getFulfillmentGroups().size() == 1;
}
} | 0true
| integration_src_test_java_org_broadleafcommerce_core_order_service_legacy_LegacyOrderTest.java |
904 | @SuppressWarnings({ "unchecked" })
public class ODocument extends ORecordSchemaAwareAbstract<Object> implements Iterable<Entry<String, Object>>, ODetachable,
Externalizable {
private static final long serialVersionUID = 1L;
public static final byte RECORD_TYPE = 'd';
protected Map<String, Object> _fieldValues;
protected Map<String, Object> _fieldOriginalValues;
protected Map<String, OType> _fieldTypes;
protected Map<String, OSimpleMultiValueChangeListener<String, Object>> _fieldChangeListeners;
protected Map<String, OMultiValueChangeTimeLine<String, Object>> _fieldCollectionChangeTimeLines;
protected boolean _trackingChanges = true;
protected boolean _ordered = true;
protected boolean _lazyLoad = true;
protected boolean _allowChainedAccess = true;
protected transient List<WeakReference<ORecordElement>> _owners = null;
protected static final String[] EMPTY_STRINGS = new String[] {};
/**
* Internal constructor used on unmarshalling.
*/
public ODocument() {
setup();
}
/**
* Creates a new instance by the raw stream usually read from the database. New instances are not persistent until {@link #save()}
* is called.
*
* @param iSource
* Raw stream
*/
public ODocument(final byte[] iSource) {
_source = iSource;
setup();
}
/**
* Creates a new instance by the raw stream usually read from the database. New instances are not persistent until {@link #save()}
* is called.
*
* @param iSource
* Raw stream as InputStream
*/
public ODocument(final InputStream iSource) throws IOException {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
OIOUtils.copyStream(iSource, out, -1);
_source = out.toByteArray();
setup();
}
/**
* Creates a new instance in memory linked by the Record Id to the persistent one. New instances are not persistent until
* {@link #save()} is called.
*
* @param iRID
* Record Id
*/
public ODocument(final ORID iRID) {
setup();
_recordId = (ORecordId) iRID;
_status = STATUS.NOT_LOADED;
_dirty = false;
}
/**
* Creates a new instance in memory of the specified class, linked by the Record Id to the persistent one. New instances are not
* persistent until {@link #save()} is called.
*
* @param iClassName
* Class name
* @param iRID
* Record Id
*/
public ODocument(final String iClassName, final ORID iRID) {
this(iClassName);
_recordId = (ORecordId) iRID;
_dirty = false;
_status = STATUS.NOT_LOADED;
}
/**
* Creates a new instance in memory of the specified class. New instances are not persistent until {@link #save()} is called.
*
* @param iClassName
* Class name
*/
public ODocument(final String iClassName) {
setClassName(iClassName);
setup();
}
/**
* Creates a new instance in memory of the specified schema class. New instances are not persistent until {@link #save()} is
* called. The database reference is taken from the thread local.
*
* @param iClass
* OClass instance
*/
public ODocument(final OClass iClass) {
setup();
_clazz = iClass;
}
/**
* Fills a document passing the field array in form of pairs of field name and value.
*
* @param iFields
* Array of field pairs
*/
public ODocument(final Object[] iFields) {
setup();
if (iFields != null && iFields.length > 0)
for (int i = 0; i < iFields.length; i += 2) {
field(iFields[i].toString(), iFields[i + 1]);
}
}
/**
* Fills a document passing a map of key/values where the key is the field name and the value the field's value.
*
* @param iFieldMap
* Map of Object/Object
*/
public ODocument(final Map<? extends Object, Object> iFieldMap) {
setup();
if (iFieldMap != null && !iFieldMap.isEmpty())
for (Entry<? extends Object, Object> entry : iFieldMap.entrySet()) {
field(entry.getKey().toString(), entry.getValue());
}
}
/**
* Fills a document passing the field names/values pair, where the first pair is mandatory.
*/
public ODocument(final String iFieldName, final Object iFieldValue, final Object... iFields) {
this(iFields);
field(iFieldName, iFieldValue);
}
/**
* Copies the current instance to a new one. Hasn't been choose the clone() to let ODocument return type. Once copied the new
* instance has the same identity and values but all the internal structure are totally independent by the source.
*/
public ODocument copy() {
return (ODocument) copyTo(new ODocument());
}
/**
* Copies all the fields into iDestination document.
*/
@Override
public ORecordAbstract<Object> copyTo(final ORecordAbstract<Object> iDestination) {
// TODO: REMOVE THIS
checkForFields();
ODocument destination = (ODocument) iDestination;
super.copyTo(iDestination);
destination._ordered = _ordered;
destination._clazz = _clazz;
destination._trackingChanges = _trackingChanges;
if (_owners != null)
destination._owners = new ArrayList<WeakReference<ORecordElement>>(_owners);
if (_fieldValues != null) {
destination._fieldValues = _fieldValues instanceof LinkedHashMap ? new LinkedHashMap<String, Object>()
: new HashMap<String, Object>();
for (Entry<String, Object> entry : _fieldValues.entrySet())
ODocumentHelper.copyFieldValue(destination, entry);
}
if (_fieldTypes != null)
destination._fieldTypes = new HashMap<String, OType>(_fieldTypes);
destination._fieldChangeListeners = null;
destination._fieldCollectionChangeTimeLines = null;
destination._fieldOriginalValues = null;
destination.addAllMultiValueChangeListeners();
destination._dirty = _dirty; // LEAVE IT AS LAST TO AVOID SOMETHING SET THE FLAG TO TRUE
return destination;
}
@Override
public ODocument flatCopy() {
if (isDirty())
throw new IllegalStateException("Cannot execute a flat copy of a dirty record");
final ODocument cloned = new ODocument();
cloned.setOrdered(_ordered);
cloned.fill(_recordId, _recordVersion, _source, false);
return cloned;
}
/**
* Returns an empty record as place-holder of the current. Used when a record is requested, but only the identity is needed.
*
* @return
*/
public ORecord<?> placeholder() {
final ODocument cloned = new ODocument();
cloned._source = null;
cloned._recordId = _recordId.copy();
cloned._status = STATUS.NOT_LOADED;
cloned._dirty = false;
return cloned;
}
/**
* Detaches all the connected records. If new records are linked to the document the detaching cannot be completed and false will
* be returned.
*
* @return true if the record has been detached, otherwise false
*/
public boolean detach() {
boolean fullyDetached = true;
if (_fieldValues != null) {
Object fieldValue;
for (Map.Entry<String, Object> entry : _fieldValues.entrySet()) {
fieldValue = entry.getValue();
if (fieldValue instanceof ORecord<?>)
if (((ORecord<?>) fieldValue).getIdentity().isNew())
fullyDetached = false;
else
_fieldValues.put(entry.getKey(), ((ORecord<?>) fieldValue).getIdentity());
if (fieldValue instanceof ODetachable) {
if (!((ODetachable) fieldValue).detach())
fullyDetached = false;
}
}
}
return fullyDetached;
}
/**
* Loads the record using a fetch plan. Example:
* <p>
* <code>doc.load( "*:3" ); // LOAD THE DOCUMENT BY EARLY FETCHING UP TO 3rd LEVEL OF CONNECTIONS</code>
* </p>
*
* @param iFetchPlan
* Fetch plan to use
*/
public ODocument load(final String iFetchPlan) {
return load(iFetchPlan, false);
}
/**
* Loads the record using a fetch plan. Example:
* <p>
* <code>doc.load( "*:3", true ); // LOAD THE DOCUMENT BY EARLY FETCHING UP TO 3rd LEVEL OF CONNECTIONS IGNORING THE CACHE</code>
* </p>
*
* @param iIgnoreCache
* Ignore the cache or use it
*/
public ODocument load(final String iFetchPlan, boolean iIgnoreCache) {
Object result = null;
try {
result = getDatabase().load(this, iFetchPlan, iIgnoreCache);
} catch (Exception e) {
throw new ORecordNotFoundException("The record with id '" + getIdentity() + "' was not found", e);
}
if (result == null)
throw new ORecordNotFoundException("The record with id '" + getIdentity() + "' was not found");
return (ODocument) result;
}
public ODocument load(final String iFetchPlan, boolean iIgnoreCache, boolean loadTombstone) {
Object result = null;
try {
result = getDatabase().load(this, iFetchPlan, iIgnoreCache, loadTombstone);
} catch (Exception e) {
throw new ORecordNotFoundException("The record with id '" + getIdentity() + "' was not found", e);
}
if (result == null)
throw new ORecordNotFoundException("The record with id '" + getIdentity() + "' was not found");
return (ODocument) result;
}
@Override
public ODocument reload(String iFetchPlan, boolean iIgnoreCache) {
super.reload(iFetchPlan, iIgnoreCache);
if (!_lazyLoad) {
checkForFields();
checkForLoading();
}
return this;
}
public boolean hasSameContentOf(final ODocument iOther) {
final ODatabaseRecord currentDb = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
return ODocumentHelper.hasSameContentOf(this, currentDb, iOther, currentDb, null);
}
@Override
public byte[] toStream() {
if (_recordFormat == null)
setup();
return super.toStream();
}
/**
* Dumps the instance as string.
*/
@Override
public String toString() {
final boolean saveDirtyStatus = _dirty;
final StringBuilder buffer = new StringBuilder();
try {
checkForFields();
if (_clazz != null)
buffer.append(_clazz.getStreamableName());
if (_recordId != null) {
if (_recordId.isValid())
buffer.append(_recordId);
}
boolean first = true;
ORecord<?> record;
for (Entry<String, Object> f : _fieldValues.entrySet()) {
buffer.append(first ? '{' : ',');
buffer.append(f.getKey());
buffer.append(':');
if (f.getValue() instanceof Collection<?>) {
buffer.append('[');
buffer.append(((Collection<?>) f.getValue()).size());
buffer.append(']');
} else if (f.getValue() instanceof ORecord<?>) {
record = (ORecord<?>) f.getValue();
if (record.getIdentity().isValid())
record.getIdentity().toString(buffer);
else
buffer.append(record.toString());
} else
buffer.append(f.getValue());
if (first)
first = false;
}
if (!first)
buffer.append('}');
if (_recordId != null && _recordId.isValid()) {
buffer.append(" v");
buffer.append(_recordVersion);
}
} finally {
_dirty = saveDirtyStatus;
}
return buffer.toString();
}
/**
* Fills the ODocument directly with the string representation of the document itself. Use it for faster insertion but pay
* attention to respect the OrientDB record format.
* <p>
* <code>
* record.reset();<br/>
* record.setClassName("Account");<br/>
* record.fromString(new String("Account@id:" + data.getCyclesDone() + ",name:'Luca',surname:'Garulli',birthDate:" + date.getTime()<br/>
* + ",salary:" + 3000f + i));<br/>
* record.save();<br/>
* </code>
* </p>
*
* @param iValue
*/
public void fromString(final String iValue) {
_dirty = true;
_source = OBinaryProtocol.string2bytes(iValue);
removeAllCollectionChangeListeners();
_fieldCollectionChangeTimeLines = null;
_fieldOriginalValues = null;
_fieldTypes = null;
_fieldValues = null;
}
/**
* Returns the set of field names.
*/
public String[] fieldNames() {
checkForLoading();
checkForFields();
if (_fieldValues == null || _fieldValues.size() == 0)
return EMPTY_STRINGS;
return _fieldValues.keySet().toArray(new String[_fieldValues.size()]);
}
/**
* Returns the array of field values.
*/
public Object[] fieldValues() {
checkForLoading();
checkForFields();
return _fieldValues.values().toArray(new Object[_fieldValues.size()]);
}
public <RET> RET rawField(final String iFieldName) {
if (iFieldName == null || iFieldName.length() == 0)
return null;
checkForLoading();
if (!checkForFields(iFieldName))
// NO FIELDS
return null;
// OPTIMIZATION
if (iFieldName.charAt(0) != '@' && OStringSerializerHelper.indexOf(iFieldName, 0, '.', '[') == -1)
return (RET) _fieldValues.get(iFieldName);
// NOT FOUND, PARSE THE FIELD NAME
return (RET) ODocumentHelper.getFieldValue(this, iFieldName);
}
/**
* Reads the field value.
*
* @param iFieldName
* field name
* @return field value if defined, otherwise null
*/
public <RET> RET field(final String iFieldName) {
RET value = this.<RET> rawField(iFieldName);
final OType t = fieldType(iFieldName);
if (_lazyLoad && value instanceof ORID && t != OType.LINK && ODatabaseRecordThreadLocal.INSTANCE.isDefined()) {
// CREATE THE DOCUMENT OBJECT IN LAZY WAY
value = (RET) getDatabase().load((ORID) value);
if (!iFieldName.contains(".")) {
removeCollectionChangeListener(iFieldName);
removeCollectionTimeLine(iFieldName);
_fieldValues.put(iFieldName, value);
addCollectionChangeListener(iFieldName, value);
}
}
// CHECK FOR CONVERSION
if (t != null) {
Object newValue = null;
if (t == OType.BINARY && value instanceof String)
newValue = OStringSerializerHelper.getBinaryContent(value);
else if (t == OType.DATE && value instanceof Long)
newValue = (RET) new Date(((Long) value).longValue());
else if ((t == OType.EMBEDDEDSET || t == OType.LINKSET) && value instanceof List)
// CONVERT LIST TO SET
newValue = (RET) ODocumentHelper.convertField(this, iFieldName, Set.class, value);
else if ((t == OType.EMBEDDEDLIST || t == OType.LINKLIST) && value instanceof Set)
// CONVERT SET TO LIST
newValue = (RET) ODocumentHelper.convertField(this, iFieldName, List.class, value);
if (newValue != null) {
// VALUE CHANGED: SET THE NEW ONE
removeCollectionChangeListener(iFieldName);
removeCollectionTimeLine(iFieldName);
_fieldValues.put(iFieldName, newValue);
addCollectionChangeListener(iFieldName, newValue);
value = (RET) newValue;
}
}
return value;
}
/**
* Reads the field value forcing the return type. Use this method to force return of ORID instead of the entire document by
* passing ORID.class as iFieldType.
*
* @param iFieldName
* field name
* @param iFieldType
* Forced type.
* @return field value if defined, otherwise null
*/
public <RET> RET field(final String iFieldName, final Class<?> iFieldType) {
RET value = this.<RET> rawField(iFieldName);
if (value != null)
value = (RET) ODocumentHelper.convertField(this, iFieldName, iFieldType, value);
return value;
}
/**
* Reads the field value forcing the return type. Use this method to force return of binary data.
*
* @param iFieldName
* field name
* @param iFieldType
* Forced type.
* @return field value if defined, otherwise null
*/
public <RET> RET field(final String iFieldName, final OType iFieldType) {
setFieldType(iFieldName, iFieldType);
return (RET) field(iFieldName);
}
/**
* Writes the field value. This method sets the current document as dirty.
*
* @param iFieldName
* field name. If contains dots (.) the change is applied to the nested documents in chain. To disable this feature call
* {@link #setAllowChainedAccess(boolean)} to false.
* @param iPropertyValue
* field value
* @return The Record instance itself giving a "fluent interface". Useful to call multiple methods in chain.
*/
public ODocument field(final String iFieldName, Object iPropertyValue) {
return field(iFieldName, iPropertyValue, null);
}
/**
* Fills a document passing the field names/values.
*/
public ODocument fields(final String iFieldName, final Object iFieldValue, final Object... iFields) {
if (iFields != null && iFields.length % 2 != 0)
throw new IllegalArgumentException("Fields must be passed in pairs as name and value");
field(iFieldName, iFieldValue);
if (iFields != null && iFields.length > 0)
for (int i = 0; i < iFields.length; i += 2) {
field(iFields[i].toString(), iFields[i + 1]);
}
return this;
}
/**
* Fills a document passing the field names/values as a Map<String,Object> where the keys are the field names and the values are
* the field values.
*/
public ODocument fields(final Map<String, Object> iMap) {
if (iMap != null) {
for (Entry<String, Object> entry : iMap.entrySet())
field(entry.getKey(), entry.getValue());
}
return this;
}
/**
* Writes the field value forcing the type. This method sets the current document as dirty.
*
* @param iFieldName
* field name. If contains dots (.) the change is applied to the nested documents in chain. To disable this feature call
* {@link #setAllowChainedAccess(boolean)} to false.
* @param iPropertyValue
* field value
* @param iFieldType
* Forced type (not auto-determined)
* @return The Record instance itself giving a "fluent interface". Useful to call multiple methods in chain. If the updated
* document is another document (using the dot (.) notation) then the document returned is the changed one or NULL if no
* document has been found in chain
*/
public ODocument field(String iFieldName, Object iPropertyValue, OType iFieldType) {
if ("@class".equals(iFieldName)) {
setClassName(iPropertyValue.toString());
return this;
} else if ("@rid".equals(iFieldName)) {
_recordId.fromString(iPropertyValue.toString());
return this;
}
final int lastSep = _allowChainedAccess ? iFieldName.lastIndexOf('.') : -1;
if (lastSep > -1) {
// SUB PROPERTY GET 1 LEVEL BEFORE LAST
final Object subObject = field(iFieldName.substring(0, lastSep));
if (subObject != null) {
final String subFieldName = iFieldName.substring(lastSep + 1);
if (subObject instanceof ODocument) {
// SUB-DOCUMENT
((ODocument) subObject).field(subFieldName, iPropertyValue);
return (ODocument) (((ODocument) subObject).isEmbedded() ? this : subObject);
} else if (subObject instanceof Map<?, ?>)
// KEY/VALUE
((Map<String, Object>) subObject).put(subFieldName, iPropertyValue);
else if (OMultiValue.isMultiValue(subObject)) {
// APPLY CHANGE TO ALL THE ITEM IN SUB-COLLECTION
for (Object subObjectItem : OMultiValue.getMultiValueIterable(subObject)) {
if (subObjectItem instanceof ODocument) {
// SUB-DOCUMENT, CHECK IF IT'S NOT LINKED
if (!((ODocument) subObjectItem).isEmbedded())
throw new IllegalArgumentException("Property '" + iFieldName
+ "' points to linked collection of items. You can only change embedded documents in this way");
((ODocument) subObjectItem).field(subFieldName, iPropertyValue);
} else if (subObjectItem instanceof Map<?, ?>) {
// KEY/VALUE
((Map<String, Object>) subObjectItem).put(subFieldName, iPropertyValue);
}
}
return this;
}
}
return null;
}
iFieldName = checkFieldName(iFieldName);
checkForLoading();
checkForFields();
final boolean knownProperty = _fieldValues.containsKey(iFieldName);
final Object oldValue = _fieldValues.get(iFieldName);
if (knownProperty)
// CHECK IF IS REALLY CHANGED
if (iPropertyValue == null) {
if (oldValue == null)
// BOTH NULL: UNCHANGED
return this;
} else {
try {
if (iPropertyValue.equals(oldValue)) {
if (!(iPropertyValue instanceof ORecordElement))
// SAME BUT NOT TRACKABLE: SET THE RECORD AS DIRTY TO BE SURE IT'S SAVED
setDirty();
// SAVE VALUE: UNCHANGED
return this;
}
if (OType.isSimpleType(iPropertyValue) && iPropertyValue.equals(oldValue))
// SAVE VALUE: UNCHANGED
return this;
} catch (Exception e) {
OLogManager.instance().warn(this, "Error on checking the value of property %s against the record %s", e, iFieldName,
getIdentity());
}
}
setFieldType(iFieldName, iFieldType);
if (iFieldType == null && _clazz != null) {
// SCHEMAFULL?
final OProperty prop = _clazz.getProperty(iFieldName);
if (prop != null)
iFieldType = prop.getType();
}
if (iPropertyValue != null)
// CHECK FOR CONVERSION
if (iFieldType != null)
iPropertyValue = ODocumentHelper.convertField(this, iFieldName, iFieldType.getDefaultJavaType(), iPropertyValue);
else if (iPropertyValue instanceof Enum)
iPropertyValue = iPropertyValue.toString();
removeCollectionChangeListener(iFieldName);
removeCollectionTimeLine(iFieldName);
_fieldValues.put(iFieldName, iPropertyValue);
addCollectionChangeListener(iFieldName, iPropertyValue);
if (_status != STATUS.UNMARSHALLING) {
setDirty();
if (_trackingChanges && _recordId.isValid()) {
// SAVE THE OLD VALUE IN A SEPARATE MAP ONLY IF TRACKING IS ACTIVE AND THE RECORD IS NOT NEW
if (_fieldOriginalValues == null)
_fieldOriginalValues = new HashMap<String, Object>();
// INSERT IT ONLY IF NOT EXISTS TO AVOID LOOSE OF THE ORIGINAL VALUE (FUNDAMENTAL FOR INDEX HOOK)
if (!_fieldOriginalValues.containsKey(iFieldName))
_fieldOriginalValues.put(iFieldName, oldValue);
}
}
return this;
}
/**
* Removes a field.
*/
public Object removeField(final String iFieldName) {
checkForLoading();
checkForFields();
final boolean knownProperty = _fieldValues.containsKey(iFieldName);
final Object oldValue = _fieldValues.get(iFieldName);
if (knownProperty && _trackingChanges) {
// SAVE THE OLD VALUE IN A SEPARATE MAP
if (_fieldOriginalValues == null)
_fieldOriginalValues = new HashMap<String, Object>();
// INSERT IT ONLY IF NOT EXISTS TO AVOID LOOSE OF THE ORIGINAL VALUE (FUNDAMENTAL FOR INDEX HOOK)
if (!_fieldOriginalValues.containsKey(iFieldName)) {
_fieldOriginalValues.put(iFieldName, oldValue);
}
}
removeCollectionTimeLine(iFieldName);
removeCollectionChangeListener(iFieldName);
_fieldValues.remove(iFieldName);
_source = null;
setDirty();
return oldValue;
}
/**
* Merge current document with the document passed as parameter. If the field already exists then the conflicts are managed based
* on the value of the parameter 'iConflictsOtherWins'.
*
* @param iOther
* Other ODocument instance to merge
* @param iUpdateOnlyMode
* if true, the other document properties will always be added or overwritten. If false, the missed properties in the
* "other" document will be removed by original document
* @param iMergeSingleItemsOfMultiValueFields
*
* @return
*/
public ODocument merge(final ODocument iOther, boolean iUpdateOnlyMode, boolean iMergeSingleItemsOfMultiValueFields) {
iOther.checkForLoading();
iOther.checkForFields();
if (_clazz == null && iOther.getSchemaClass() != null)
_clazz = iOther.getSchemaClass();
return merge(iOther._fieldValues, iUpdateOnlyMode, iMergeSingleItemsOfMultiValueFields);
}
/**
* Merge current document with the document passed as parameter. If the field already exists then the conflicts are managed based
* on the value of the parameter 'iConflictsOtherWins'.
*
* @param iOther
* Other ODocument instance to merge
* @param iUpdateOnlyMode
* if true, the other document properties will always be added or overwritten. If false, the missed properties in the
* "other" document will be removed by original document
* @param iMergeSingleItemsOfMultiValueFields
*
* @return
*/
public ODocument merge(final Map<String, Object> iOther, final boolean iUpdateOnlyMode,
boolean iMergeSingleItemsOfMultiValueFields) {
checkForLoading();
checkForFields();
_source = null;
for (String f : iOther.keySet()) {
if (containsField(f) && iMergeSingleItemsOfMultiValueFields) {
Object field = field(f);
if (field instanceof Map<?, ?>) {
final Map<String, Object> map = (Map<String, Object>) field;
final Map<String, Object> otherMap = (Map<String, Object>) iOther.get(f);
for (Entry<String, Object> entry : otherMap.entrySet()) {
map.put(entry.getKey(), entry.getValue());
}
continue;
} else if (field instanceof Collection<?>) {
final Collection<Object> coll = (Collection<Object>) field;
final Collection<Object> otherColl = (Collection<Object>) iOther.get(f);
for (Object item : otherColl) {
if (coll.contains(item))
// REMOVE PREVIOUS ITEM BECAUSE THIS COULD BE UPDATED INSIDE OF IT
coll.remove(item);
coll.add(item);
}
// JUMP RAW REPLACE
continue;
}
}
// RESET THE FIELD TYPE
setFieldType(f, null);
// RAW SET/REPLACE
field(f, iOther.get(f));
}
if (!iUpdateOnlyMode) {
// REMOVE PROPERTIES NOT FOUND IN OTHER DOC
for (String f : fieldNames())
if (!iOther.containsKey(f))
removeField(f);
}
return this;
}
/**
* Returns list of changed fields. There are two types of changes:
* <ol>
* <li>Value of field itself was changed by calling of {@link #field(String, Object)} method for example.</li>
* <li>Internal state of field was changed but was not saved. This case currently is applicable for for collections only.</li>
* </ol>
*
* @return List of fields, values of which were changed.
*/
public String[] getDirtyFields() {
if ((_fieldOriginalValues == null || _fieldOriginalValues.isEmpty())
&& (_fieldCollectionChangeTimeLines == null || _fieldCollectionChangeTimeLines.isEmpty()))
return EMPTY_STRINGS;
final Set<String> dirtyFields = new HashSet<String>();
if (_fieldOriginalValues != null)
dirtyFields.addAll(_fieldOriginalValues.keySet());
if (_fieldCollectionChangeTimeLines != null)
dirtyFields.addAll(_fieldCollectionChangeTimeLines.keySet());
return dirtyFields.toArray(new String[dirtyFields.size()]);
}
/**
* Returns the original value of a field before it has been changed.
*
* @param iFieldName
* Property name to retrieve the original value
*/
public Object getOriginalValue(final String iFieldName) {
return _fieldOriginalValues != null ? _fieldOriginalValues.get(iFieldName) : null;
}
public OMultiValueChangeTimeLine<String, Object> getCollectionTimeLine(final String iFieldName) {
return _fieldCollectionChangeTimeLines != null ? _fieldCollectionChangeTimeLines.get(iFieldName) : null;
}
/**
* Returns the iterator fields
*/
public Iterator<Entry<String, Object>> iterator() {
checkForLoading();
checkForFields();
if (_fieldValues == null)
return OEmptyIterator.INSTANCE;
final Iterator<Entry<String, Object>> iterator = _fieldValues.entrySet().iterator();
return new Iterator<Entry<String, Object>>() {
private Entry<String, Object> current;
public boolean hasNext() {
return iterator.hasNext();
}
public Entry<String, Object> next() {
current = iterator.next();
return current;
}
public void remove() {
iterator.remove();
if (_trackingChanges) {
// SAVE THE OLD VALUE IN A SEPARATE MAP
if (_fieldOriginalValues == null)
_fieldOriginalValues = new HashMap<String, Object>();
// INSERT IT ONLY IF NOT EXISTS TO AVOID LOOSE OF THE ORIGINAL VALUE (FUNDAMENTAL FOR INDEX HOOK)
if (!_fieldOriginalValues.containsKey(current.getKey())) {
_fieldOriginalValues.put(current.getKey(), current.getValue());
}
}
removeCollectionChangeListener(current.getKey());
removeCollectionTimeLine(current.getKey());
}
};
}
/**
* Checks if a field exists.
*
* @return True if exists, otherwise false.
*/
public boolean containsField(final String iFieldName) {
if (iFieldName == null)
return false;
checkForLoading();
checkForFields(iFieldName);
return _fieldValues.containsKey(iFieldName);
}
/**
* Internal.
*/
public byte getRecordType() {
return RECORD_TYPE;
}
/**
* Returns true if the record has some owner.
*/
public boolean hasOwners() {
return _owners != null && !_owners.isEmpty();
}
/**
* Internal.
*
* @return
*/
public ODocument addOwner(final ORecordElement iOwner) {
if (_owners == null)
_owners = new ArrayList<WeakReference<ORecordElement>>();
this._owners.add(new WeakReference<ORecordElement>(iOwner));
return this;
}
public Iterable<ORecordElement> getOwners() {
if (_owners == null)
return Collections.emptyList();
final List<ORecordElement> result = new ArrayList<ORecordElement>();
for (WeakReference<ORecordElement> o : _owners)
result.add(o.get());
return result;
}
public ODocument removeOwner(final ORecordElement iRecordElement) {
if (_owners != null) {
// PROPAGATES TO THE OWNER
ORecordElement e;
for (int i = 0; i < _owners.size(); ++i) {
e = _owners.get(i).get();
if (e == iRecordElement) {
_owners.remove(i);
break;
}
}
}
return this;
}
/**
* Propagates the dirty status to the owner, if any. This happens when the object is embedded in another one.
*/
@Override
public ORecordAbstract<Object> setDirty() {
if (_owners != null) {
// PROPAGATES TO THE OWNER
ORecordElement e;
for (WeakReference<ORecordElement> o : _owners) {
e = o.get();
if (e != null)
e.setDirty();
}
}
// THIS IS IMPORTANT TO BE SURE THAT FIELDS ARE LOADED BEFORE IT'S TOO LATE AND THE RECORD _SOURCE IS NULL
checkForFields();
return super.setDirty();
}
@Override
public void onBeforeIdentityChanged(final ORID iRID) {
if (_owners != null) {
final List<WeakReference<ORecordElement>> temp = new ArrayList<WeakReference<ORecordElement>>(_owners);
ORecordElement e;
for (WeakReference<ORecordElement> o : temp) {
e = o.get();
if (e != null)
e.onBeforeIdentityChanged(iRID);
}
}
}
@Override
public void onAfterIdentityChanged(final ORecord<?> iRecord) {
super.onAfterIdentityChanged(iRecord);
if (_owners != null) {
final List<WeakReference<ORecordElement>> temp = new ArrayList<WeakReference<ORecordElement>>(_owners);
ORecordElement e;
for (WeakReference<ORecordElement> o : temp) {
e = o.get();
if (e != null)
e.onAfterIdentityChanged(iRecord);
}
}
}
@Override
public ODocument fromStream(final byte[] iRecordBuffer) {
removeAllCollectionChangeListeners();
_fieldValues = null;
_fieldTypes = null;
_fieldOriginalValues = null;
_fieldChangeListeners = null;
_fieldCollectionChangeTimeLines = null;
super.fromStream(iRecordBuffer);
if (!_lazyLoad) {
checkForFields();
checkForLoading();
}
return (ODocument) this;
}
/**
* Returns the forced field type if any.
*
* @param iFieldName
*/
public OType fieldType(final String iFieldName) {
return _fieldTypes != null ? _fieldTypes.get(iFieldName) : null;
}
@Override
public ODocument unload() {
super.unload();
internalReset();
return this;
}
/**
* Clears all the field values and types.
*/
@Override
public ODocument clear() {
super.clear();
internalReset();
_owners = null;
return this;
}
/**
* Resets the record values and class type to being reused. This can be used only if no transactions are begun.
*/
@Override
public ODocument reset() {
ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && db.getTransaction().isActive())
throw new IllegalStateException("Cannot reset documents during a transaction. Create a new one each time");
super.reset();
internalReset();
if (_fieldOriginalValues != null)
_fieldOriginalValues.clear();
_owners = null;
return this;
}
protected void internalReset() {
removeAllCollectionChangeListeners();
if (_fieldCollectionChangeTimeLines != null)
_fieldCollectionChangeTimeLines.clear();
if (_fieldValues != null)
_fieldValues.clear();
}
/**
* Rollbacks changes to the loaded version without reloading the document. Works only if tracking changes is enabled @see
* {@link #isTrackingChanges()} and {@link #setTrackingChanges(boolean)} methods.
*/
public ODocument undo() {
if (!_trackingChanges)
throw new OConfigurationException("Cannot undo the document because tracking of changes is disabled");
for (Entry<String, Object> entry : _fieldOriginalValues.entrySet()) {
final Object value = entry.getValue();
if (value == null)
_fieldValues.remove(entry.getKey());
else
_fieldValues.put(entry.getKey(), entry.getValue());
}
return this;
}
public boolean isLazyLoad() {
return _lazyLoad;
}
public void setLazyLoad(final boolean iLazyLoad) {
this._lazyLoad = iLazyLoad;
if (_fieldValues != null) {
// PROPAGATE LAZINESS TO THE FIELDS
for (Entry<String, Object> field : _fieldValues.entrySet()) {
if (field.getValue() instanceof ORecordLazyMultiValue)
((ORecordLazyMultiValue) field.getValue()).setAutoConvertToRecord(false);
}
}
}
public boolean isTrackingChanges() {
return _trackingChanges;
}
/**
* Enabled or disabled the tracking of changes in the document. This is needed by some triggers like
* {@link com.orientechnologies.orient.core.index.OClassIndexManager} to determine what fields are changed to update indexes.
*
* @param iTrackingChanges
* True to enable it, otherwise false
* @return
*/
public ODocument setTrackingChanges(final boolean iTrackingChanges) {
this._trackingChanges = iTrackingChanges;
if (!iTrackingChanges) {
// FREE RESOURCES
this._fieldOriginalValues = null;
removeAllCollectionChangeListeners();
_fieldChangeListeners = null;
_fieldCollectionChangeTimeLines = null;
} else {
addAllMultiValueChangeListeners();
}
return this;
}
public boolean isOrdered() {
return _ordered;
}
public ODocument setOrdered(final boolean iOrdered) {
this._ordered = iOrdered;
return this;
}
@Override
public boolean equals(Object obj) {
if (!super.equals(obj))
return false;
return this == obj || _recordId.isValid();
}
/**
* Returns the number of fields in memory.
*/
public int fields() {
return _fieldValues == null ? 0 : _fieldValues.size();
}
public boolean isEmpty() {
return _fieldValues == null || _fieldValues.isEmpty();
}
public boolean isEmbedded() {
return _owners != null && !_owners.isEmpty();
}
@Override
protected boolean checkForFields(final String... iFields) {
if (_fieldValues == null)
_fieldValues = _ordered ? new LinkedHashMap<String, Object>() : new HashMap<String, Object>();
if (_status == ORecordElement.STATUS.LOADED && _source != null)
// POPULATE FIELDS LAZY
return deserializeFields(iFields);
return true;
}
/**
* Internal.
*/
@Override
protected void setup() {
super.setup();
_recordFormat = ORecordSerializerFactory.instance().getFormat(ORecordSerializerSchemaAware2CSV.NAME);
}
/**
* Sets the field type. This overrides the schema property settings if any.
*
* @param iFieldName
* Field name
* @param iFieldType
* Type to set between OType enumaration values
*/
public ODocument setFieldType(final String iFieldName, final OType iFieldType) {
if (iFieldType != null) {
// SET THE FORCED TYPE
if (_fieldTypes == null)
_fieldTypes = new HashMap<String, OType>();
_fieldTypes.put(iFieldName, iFieldType);
} else if (_fieldTypes != null) {
// REMOVE THE FIELD TYPE
_fieldTypes.remove(iFieldName);
if (_fieldTypes.size() == 0)
// EMPTY: OPTIMIZE IT BY REMOVING THE ENTIRE MAP
_fieldTypes = null;
}
return this;
}
@Override
public ODocument save() {
return save(false);
}
@Override
public ODocument save(final String iClusterName) {
return save(iClusterName, false);
}
@Override
public ODocument save(boolean forceCreate) {
if (_clazz != null)
return save(getDatabase().getClusterNameById(_clazz.getDefaultClusterId()), forceCreate);
convertAllMultiValuesToTrackedVersions();
validate();
return (ODocument) super.save(forceCreate);
}
@Override
public ODocument save(final String iClusterName, boolean forceCreate) {
convertAllMultiValuesToTrackedVersions();
validate();
return (ODocument) super.save(iClusterName, forceCreate);
}
/*
* Initializes the object if has been unserialized
*/
@Override
public boolean deserializeFields(final String... iFields) {
if (_source == null)
// ALREADY UNMARSHALLED OR JUST EMPTY
return true;
if (iFields != null && iFields.length > 0) {
// EXTRACT REAL FIELD NAMES
for (int i = 0; i < iFields.length; ++i) {
final String f = iFields[i];
if (!f.startsWith("@")) {
int pos1 = f.indexOf('[');
int pos2 = f.indexOf('.');
if (pos1 > -1 || pos2 > -1) {
int pos = pos1 > -1 ? pos1 : pos2;
if (pos2 > -1 && pos2 < pos)
pos = pos2;
// REPLACE THE FIELD NAME
iFields[i] = f.substring(0, pos);
}
}
}
// CHECK IF HAS BEEN ALREADY UNMARSHALLED
if (_fieldValues != null && !_fieldValues.isEmpty()) {
boolean allFound = true;
for (String f : iFields)
if (!f.startsWith("@") && !_fieldValues.containsKey(f)) {
allFound = false;
break;
}
if (allFound)
// ALL THE REQUESTED FIELDS HAVE BEEN LOADED BEFORE AND AVAILABLES, AVOID UNMARSHALLIGN
return true;
}
}
if (_recordFormat == null)
setup();
super.deserializeFields(iFields);
if (iFields != null && iFields.length > 0) {
if (iFields[0].startsWith("@"))
// ATTRIBUTE
return true;
// PARTIAL UNMARSHALLING
if (_fieldValues != null && !_fieldValues.isEmpty())
for (String f : iFields)
if (_fieldValues.containsKey(f))
return true;
// NO FIELDS FOUND
return false;
} else if (_source != null)
// FULL UNMARSHALLING
_source = null;
return true;
}
protected String checkFieldName(final String iFieldName) {
final Character c = OSchemaShared.checkNameIfValid(iFieldName);
if (c != null)
throw new IllegalArgumentException("Invalid field name '" + iFieldName + "'. Character '" + c + "' is invalid");
return iFieldName;
}
private void addCollectionChangeListener(final String fieldName) {
final Object fieldValue = _fieldValues.get(fieldName);
addCollectionChangeListener(fieldName, fieldValue);
}
private void addCollectionChangeListener(final String fieldName, final Object fieldValue) {
OType fieldType = fieldType(fieldName);
if (fieldType == null && _clazz != null) {
final OProperty prop = _clazz.getProperty(fieldName);
fieldType = prop != null ? prop.getType() : null;
}
if (fieldType == null
|| !(OType.EMBEDDEDLIST.equals(fieldType) || OType.EMBEDDEDMAP.equals(fieldType) || OType.EMBEDDEDSET.equals(fieldType)
|| OType.LINKSET.equals(fieldType) || OType.LINKLIST.equals(fieldType) || OType.LINKMAP.equals(fieldType)))
return;
if (!(fieldValue instanceof OTrackedMultiValue))
return;
final OTrackedMultiValue<String, Object> multiValue = (OTrackedMultiValue<String, Object>) fieldValue;
if (_fieldChangeListeners == null)
_fieldChangeListeners = new HashMap<String, OSimpleMultiValueChangeListener<String, Object>>();
if (!_fieldChangeListeners.containsKey(fieldName)) {
final OSimpleMultiValueChangeListener<String, Object> listener = new OSimpleMultiValueChangeListener<String, Object>(
fieldName);
multiValue.addChangeListener(listener);
_fieldChangeListeners.put(fieldName, listener);
}
}
private void removeAllCollectionChangeListeners() {
if (_fieldValues == null)
return;
for (final String fieldName : _fieldValues.keySet()) {
removeCollectionChangeListener(fieldName);
}
_fieldChangeListeners = null;
}
private void addAllMultiValueChangeListeners() {
if (_fieldValues == null)
return;
for (final String fieldName : _fieldValues.keySet()) {
addCollectionChangeListener(fieldName);
}
}
private void removeCollectionChangeListener(final String fieldName) {
if (_fieldChangeListeners == null)
return;
final OMultiValueChangeListener<String, Object> changeListener = _fieldChangeListeners.remove(fieldName);
final Object fieldValue;
if (_fieldValues == null)
return;
fieldValue = _fieldValues.get(fieldName);
if (!(fieldValue instanceof OTrackedMultiValue))
return;
if (changeListener != null) {
final OTrackedMultiValue<String, Object> multiValue = (OTrackedMultiValue<String, Object>) fieldValue;
multiValue.removeRecordChangeListener(changeListener);
}
}
private void removeCollectionTimeLine(final String fieldName) {
if (_fieldCollectionChangeTimeLines == null)
return;
_fieldCollectionChangeTimeLines.remove(fieldName);
}
/**
* Converts all non-tracked collections implementations contained in document fields to tracked ones.
*
* @see OTrackedMultiValue
*/
public void convertAllMultiValuesToTrackedVersions() {
if (_fieldValues == null)
return;
final Map<String, Object> fieldsToUpdate = new HashMap<String, Object>();
for (Map.Entry<String, Object> fieldEntry : _fieldValues.entrySet()) {
final Object fieldValue = fieldEntry.getValue();
OType fieldType = fieldType(fieldEntry.getKey());
if (fieldType == null && _clazz != null) {
final OProperty prop = _clazz.getProperty(fieldEntry.getKey());
fieldType = prop != null ? prop.getType() : null;
}
if (fieldType == null
|| !(OType.EMBEDDEDLIST.equals(fieldType) || OType.EMBEDDEDMAP.equals(fieldType) || OType.EMBEDDEDSET.equals(fieldType)
|| OType.LINKSET.equals(fieldType) || OType.LINKLIST.equals(fieldType) || OType.LINKMAP.equals(fieldType)))
continue;
if (fieldValue instanceof List && fieldType.equals(OType.EMBEDDEDLIST) && !(fieldValue instanceof OTrackedMultiValue))
fieldsToUpdate.put(fieldEntry.getKey(), new OTrackedList<Object>(this, (List<?>) fieldValue, null));
else if (fieldValue instanceof Set && fieldType.equals(OType.EMBEDDEDSET) && !(fieldValue instanceof OTrackedMultiValue))
fieldsToUpdate.put(fieldEntry.getKey(), new OTrackedSet<Object>(this, (Set<OIdentifiable>) fieldValue, null));
else if (fieldValue instanceof Map && fieldType.equals(OType.EMBEDDEDMAP) && !(fieldValue instanceof OTrackedMultiValue))
fieldsToUpdate
.put(fieldEntry.getKey(), new OTrackedMap<OIdentifiable>(this, (Map<Object, OIdentifiable>) fieldValue, null));
else if (fieldValue instanceof Set && fieldType.equals(OType.LINKSET) && !(fieldValue instanceof OTrackedMultiValue))
fieldsToUpdate.put(fieldEntry.getKey(), new ORecordLazySet(this, (Collection<OIdentifiable>) fieldValue));
else if (fieldValue instanceof List && fieldType.equals(OType.LINKLIST) && !(fieldValue instanceof OTrackedMultiValue))
fieldsToUpdate.put(fieldEntry.getKey(), new ORecordLazyList(this, (List<OIdentifiable>) fieldValue));
else if (fieldValue instanceof Map && fieldType.equals(OType.LINKMAP) && !(fieldValue instanceof OTrackedMultiValue))
fieldsToUpdate.put(fieldEntry.getKey(), new ORecordLazyMap(this, (Map<Object, OIdentifiable>) fieldValue));
}
_fieldValues.putAll(fieldsToUpdate);
addAllMultiValueChangeListeners();
}
/**
* Perform gathering of all operations performed on tracked collection and create mapping between list of collection operations
* and field name that contains collection that was changed.
*
* @param <K>
* Value that uniquely identifies position of item in collection
* @param <V>
* Item value.
*/
private final class OSimpleMultiValueChangeListener<K, V> implements OMultiValueChangeListener<K, V> {
private final String fieldName;
private OSimpleMultiValueChangeListener(final String fieldName) {
this.fieldName = fieldName;
}
public void onAfterRecordChanged(final OMultiValueChangeEvent<K, V> event) {
if (_status != STATUS.UNMARSHALLING)
setDirty();
if (!(_trackingChanges && _recordId.isValid()) || _status == STATUS.UNMARSHALLING)
return;
if (_fieldOriginalValues != null && _fieldOriginalValues.containsKey(fieldName))
return;
if (_fieldCollectionChangeTimeLines == null)
_fieldCollectionChangeTimeLines = new HashMap<String, OMultiValueChangeTimeLine<String, Object>>();
OMultiValueChangeTimeLine<String, Object> timeLine = _fieldCollectionChangeTimeLines.get(fieldName);
if (timeLine == null) {
timeLine = new OMultiValueChangeTimeLine<String, Object>();
_fieldCollectionChangeTimeLines.put(fieldName, timeLine);
}
timeLine.addCollectionChangeEvent((OMultiValueChangeEvent<String, Object>) event);
}
}
@Override
public void writeExternal(ObjectOutput stream) throws IOException {
final byte[] idBuffer = _recordId.toStream();
stream.writeInt(idBuffer.length);
stream.write(idBuffer);
_recordVersion.getSerializer().writeTo(stream, _recordVersion);
final byte[] content = toStream();
stream.writeInt(content.length);
stream.write(content);
stream.writeBoolean(_dirty);
}
@Override
public void readExternal(ObjectInput stream) throws IOException, ClassNotFoundException {
final byte[] idBuffer = new byte[stream.readInt()];
stream.readFully(idBuffer);
_recordId.fromStream(idBuffer);
_recordVersion.getSerializer().readFrom(stream, _recordVersion);
final int len = stream.readInt();
final byte[] content = new byte[len];
stream.readFully(content);
fromStream(content);
_dirty = stream.readBoolean();
}
/**
* Returns the behavior of field() methods allowing access to the sub documents with dot notation ('.'). Default is true. Set it
* to false if you allow to store properties with the dot.
*/
public boolean isAllowChainedAccess() {
return _allowChainedAccess;
}
/**
* Change the behavior of field() methods allowing access to the sub documents with dot notation ('.'). Default is true. Set it to
* false if you allow to store properties with the dot.
*
* @param _allowChainedAccess
*/
public ODocument setAllowChainedAccess(final boolean _allowChainedAccess) {
this._allowChainedAccess = _allowChainedAccess;
return this;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_record_impl_ODocument.java |
2,908 | public static class Employee implements Serializable {
long id;
String name;
String city;
int age;
boolean active;
double salary;
Timestamp date;
Date createDate;
java.sql.Date sqlDate;
State state;
BigDecimal bigDecimal = new BigDecimal("1.23E3");
public Employee(long id, String name, int age, boolean live, double salary, State state) {
this(id,name,age,live,salary);
this.state = state;
}
public Employee(long id, String name, int age, boolean live, double salary) {
this(id, name, null, age, live, salary);
}
public Employee(String name, int age, boolean live, double salary) {
this(-1, name, age, live, salary);
}
public Employee(String name, String city, int age, boolean live, double salary) {
this(-1, name, city, age, live, salary);
}
public Employee(long id, String name, String city, int age, boolean live, double salary) {
this.id = id;
this.name = name;
this.city = city;
this.age = age;
this.active = live;
this.salary = salary;
this.createDate = new Date();
this.date = new Timestamp(createDate.getTime());
this.sqlDate = new java.sql.Date(createDate.getTime());
}
public Employee() {
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
public java.sql.Date getSqlDate() {
return sqlDate;
}
public void setSqlDate(java.sql.Date sqlDate) {
this.sqlDate = sqlDate;
}
public void setName(String name) {
this.name = name;
}
public void setCity(String city) {
this.city = city;
}
public void setAge(int age) {
this.age = age;
}
public void setActive(boolean active) {
this.active = active;
}
public void setSalary(double salary) {
this.salary = salary;
}
public void setDate(Timestamp date) {
this.date = date;
}
public void setBigDecimal(BigDecimal bigDecimal) {
this.bigDecimal = bigDecimal;
}
public BigDecimal getBigDecimal() {
return bigDecimal;
}
public Timestamp getDate() {
return date;
}
public String getName() {
return name;
}
public String getCity() {
return city;
}
public int getAge() {
return age;
}
public double getSalary() {
return salary;
}
public boolean isActive() {
return active;
}
public State getState() {
return state;
}
public void setState(State state) {
this.state = state;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Employee employee = (Employee) o;
if (active != employee.active) return false;
if (age != employee.age) return false;
if (Double.compare(employee.salary, salary) != 0) return false;
if (name != null ? !name.equals(employee.name) : employee.name != null) return false;
return true;
}
@Override
public int hashCode() {
int result;
long temp;
result = name != null ? name.hashCode() : 0;
result = 31 * result + age;
result = 31 * result + (active ? 1 : 0);
temp = salary != +0.0d ? Double.doubleToLongBits(salary) : 0L;
result = 31 * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Employee");
sb.append("{name='").append(name).append('\'');
sb.append(", city=").append(city);
sb.append(", age=").append(age);
sb.append(", active=").append(active);
sb.append(", salary=").append(salary);
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_test_java_com_hazelcast_query_SampleObjects.java |
387 | public class OMultiValueChangeTimeLine<K, V> {
private final List<OMultiValueChangeEvent<K, V>> multiValueChangeEvents = new ArrayList<OMultiValueChangeEvent<K, V>>();
/**
* @return <code>List</code> of all operations that were performed on collection starting from
* the time when it was loaded from DB.
*/
public List<OMultiValueChangeEvent<K, V>> getMultiValueChangeEvents() {
return Collections.unmodifiableList(multiValueChangeEvents);
}
/**
* Add new operation that was performed on collection to collection history.
*
* @param changeEvent Description of operation that was performed on collection.
*/
public void addCollectionChangeEvent(OMultiValueChangeEvent<K, V> changeEvent) {
multiValueChangeEvents.add(changeEvent);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_OMultiValueChangeTimeLine.java |
148 | public class OBooleanSerializer implements OBinarySerializer<Boolean> {
/**
* size of boolean value in bytes
*/
public static final int BOOLEAN_SIZE = 1;
public static OBooleanSerializer INSTANCE = new OBooleanSerializer();
public static final byte ID = 1;
public int getObjectSize(Boolean object, Object... hints) {
return BOOLEAN_SIZE;
}
public void serialize(Boolean object, byte[] stream, int startPosition, Object... hints) {
if (object)
stream[startPosition] = (byte) 1;
else
stream[startPosition] = (byte) 0;
}
public Boolean deserialize(byte[] stream, int startPosition) {
return stream[startPosition] == 1;
}
public int getObjectSize(byte[] stream, int startPosition) {
return BOOLEAN_SIZE;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return BOOLEAN_SIZE;
}
public void serializeNative(Boolean object, byte[] stream, int startPosition, Object... hints) {
serialize(object, stream, startPosition);
}
public Boolean deserializeNative(byte[] stream, int startPosition) {
return deserialize(stream, startPosition);
}
@Override
public void serializeInDirectMemory(Boolean object, ODirectMemoryPointer pointer, long offset, Object... hints) {
pointer.setByte(offset, object ? (byte) 1 : 0);
}
@Override
public Boolean deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
return pointer.getByte(offset) > 0;
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return BOOLEAN_SIZE;
}
public boolean isFixedLength() {
return true;
}
public int getFixedLength() {
return BOOLEAN_SIZE;
}
@Override
public Boolean preprocess(Boolean value, Object... hints) {
return value;
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_serialization_types_OBooleanSerializer.java |
264 | public class EmbeddedStoreTest extends AbstractCassandraStoreTest {
@BeforeClass
public static void startCassandra() {
CassandraStorageSetup.startCleanEmbedded();
}
@Override
public ModifiableConfiguration getBaseStorageConfiguration() {
return CassandraStorageSetup.getEmbeddedConfiguration(getClass().getSimpleName());
}
@Override
public AbstractCassandraStoreManager openStorageManager(Configuration c) throws BackendException {
return new CassandraEmbeddedStoreManager(c);
}
@Test
@Category({ OrderedKeyStoreTests.class })
public void testConfiguration() {
StoreFeatures features = manager.getFeatures();
assertTrue(features.isKeyOrdered());
assertTrue(features.hasLocalKeyPartition());
}
} | 0true
| titan-cassandra_src_test_java_com_thinkaurelius_titan_diskstorage_cassandra_embedded_EmbeddedStoreTest.java |
311 | new Thread() {
public void run() {
beforeLock.countDown();
map.lock(key);
afterLock.countDown();
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java |
2,893 | public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<NorwegianAnalyzer> {
private final NorwegianAnalyzer analyzer;
@Inject
public NorwegianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
analyzer = new NorwegianAnalyzer(version,
Analysis.parseStopWords(env, settings, NorwegianAnalyzer.getDefaultStopSet(), version),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET, version));
}
@Override
public NorwegianAnalyzer get() {
return this.analyzer;
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_NorwegianAnalyzerProvider.java |
3,147 | public class TxnPrepareBackupOperation extends QueueOperation implements BackupOperation {
private long itemId;
private boolean pollOperation;
private String transactionId;
public TxnPrepareBackupOperation() {
}
public TxnPrepareBackupOperation(String name, long itemId, boolean pollOperation, String transactionId) {
super(name);
this.itemId = itemId;
this.pollOperation = pollOperation;
this.transactionId = transactionId;
}
@Override
public void run() throws Exception {
QueueContainer container = getOrCreateContainer();
if (pollOperation) {
response = container.txnPollBackupReserve(itemId, transactionId);
} else {
container.txnOfferBackupReserve(itemId, transactionId);
response = true;
}
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(itemId);
out.writeBoolean(pollOperation);
out.writeUTF(transactionId);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
itemId = in.readLong();
pollOperation = in.readBoolean();
transactionId = in.readUTF();
}
@Override
public int getId() {
return QueueDataSerializerHook.TXN_PREPARE_BACKUP;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_queue_tx_TxnPrepareBackupOperation.java |
2,701 | cluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
if (node_1.equals(nodeName)) {
logger.info("--> deleting the data for the first node");
gateway1.reset();
}
return null;
}
}); | 0true
| src_test_java_org_elasticsearch_gateway_local_LocalGatewayIndexStateTests.java |
1,897 | nodeEngine.getExecutionService().execute("hz:near-cache", new Runnable() {
public void run() {
try {
lastCleanup = Clock.currentTimeMillis();
for (Map.Entry<Data, CacheRecord> entry : cache.entrySet()) {
if (entry.getValue().expired()) {
final Data key = entry.getKey();
final CacheRecord record = cache.remove(key);
//if a mapping exists.
if (record != null) {
updateSizeEstimator(-calculateCost(record));
}
}
}
} finally {
canCleanUp.set(true);
}
}
}); | 0true
| hazelcast_src_main_java_com_hazelcast_map_NearCache.java |
138 | public class MissingLogDataException extends IOException
{
public MissingLogDataException()
{
super();
}
public MissingLogDataException( String message, Throwable cause )
{
super( message, cause );
}
public MissingLogDataException( String message )
{
super( message );
}
public MissingLogDataException( Throwable cause )
{
super( cause );
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_MissingLogDataException.java |
2,803 | public static class CharFiltersBindings {
private final Map<String, Class<? extends CharFilterFactory>> charFilters = Maps.newHashMap();
public CharFiltersBindings() {
}
public void processCharFilter(String name, Class<? extends CharFilterFactory> charFilterFactory) {
charFilters.put(name, charFilterFactory);
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_AnalysisModule.java |
1,416 | @XmlRootElement(name = "searchFacetValue")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class SearchFacetValueWrapper extends BaseWrapper implements APIWrapper<SearchFacetResultDTO> {
/*
* Indicates if this facet value was active in a current search.
*/
@XmlElement
protected Boolean active = Boolean.FALSE;
/*
* A value. If the min and max values are populated, this should be null.
*/
@XmlElement
protected String value;
/*
* The value that should be passed in when using a search facet to filter a search.
* For example, a value key may be something like: "range[0.00000:5.00000]". This would
* be the value passed in as a query parameter (e.g. price=range[0.00000:5.00000]). Or this could
* be a simple value if not min and max values are used.
*/
@XmlElement
protected String valueKey;
/*
* Indicates how many results are associated with this facet value.
*/
@XmlElement
protected Integer quantity;
/*
* Min value of a range. Should be null if value is not null.
*/
@XmlElement
protected BigDecimal minValue;
/*
* Max value of a range. Should be null if value is not null.
*/
@XmlElement
protected BigDecimal maxValue;
@Override
public void wrapDetails(SearchFacetResultDTO model, HttpServletRequest request) {
this.active = model.isActive();
this.valueKey = model.getValueKey();
this.quantity = model.getQuantity();
this.value = model.getValue();
this.minValue = model.getMinValue();
this.maxValue = model.getMaxValue();
}
@Override
public void wrapSummary(SearchFacetResultDTO model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_SearchFacetValueWrapper.java |
347 | Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(request.delay.millis());
} catch (InterruptedException e) {
// ignore
}
// first, stop the cluster service
logger.trace("[cluster_shutdown]: stopping the cluster service so no re-routing will occur");
clusterService.stop();
final CountDownLatch latch = new CountDownLatch(nodes.size());
for (ObjectCursor<DiscoveryNode> cursor : nodes) {
final DiscoveryNode node = cursor.value;
if (node.id().equals(state.nodes().masterNodeId())) {
// don't shutdown the master yet...
latch.countDown();
} else {
logger.trace("[cluster_shutdown]: sending shutdown request to [{}]", node);
transportService.sendRequest(node, NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node);
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
latch.countDown();
}
});
}
}
try {
latch.await();
} catch (InterruptedException e) {
// ignore
}
logger.info("[cluster_shutdown]: done shutting down all nodes except master, proceeding to master");
// now, kill the master
logger.trace("[cluster_shutdown]: shutting down the master [{}]", state.nodes().masterNode());
transportService.sendRequest(state.nodes().masterNode(), NodeShutdownRequestHandler.ACTION, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
logger.trace("[cluster_shutdown]: received shutdown response from master");
}
@Override
public void handleException(TransportException exp) {
logger.warn("[cluster_shutdown]: received failed shutdown response master", exp);
}
});
}
}); | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_node_shutdown_TransportNodesShutdownAction.java |
1,070 | public class MapStoreConfigReadOnly extends MapStoreConfig {
public MapStoreConfigReadOnly(MapStoreConfig config) {
super(config);
}
public MapStoreConfig setClassName(String className) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setFactoryClassName(String factoryClassName) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setWriteDelaySeconds(int writeDelaySeconds) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setEnabled(boolean enabled) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setImplementation(Object implementation) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setFactoryImplementation(Object factoryImplementation) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setProperty(String name, String value) {
throw new UnsupportedOperationException("This config is read-only");
}
public MapStoreConfig setProperties(Properties properties) {
throw new UnsupportedOperationException("This config is read-only");
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_MapStoreConfigReadOnly.java |
2,768 | public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpServerTransport> implements HttpServerTransport {
static {
NettyStaticSetup.setup();
}
private final NetworkService networkService;
final ByteSizeValue maxContentLength;
final ByteSizeValue maxInitialLineLength;
final ByteSizeValue maxHeaderSize;
final ByteSizeValue maxChunkSize;
private final int workerCount;
private final boolean blockingServer;
final boolean compression;
private final int compressionLevel;
final boolean resetCookies;
private final String port;
private final String bindHost;
private final String publishHost;
private final Boolean tcpNoDelay;
private final Boolean tcpKeepAlive;
private final Boolean reuseAddress;
private final ByteSizeValue tcpSendBufferSize;
private final ByteSizeValue tcpReceiveBufferSize;
private final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory;
final ByteSizeValue maxCumulationBufferCapacity;
final int maxCompositeBufferComponents;
private volatile ServerBootstrap serverBootstrap;
private volatile BoundTransportAddress boundAddress;
private volatile Channel serverChannel;
OpenChannelsHandler serverOpenChannels;
private volatile HttpServerAdapter httpServerAdapter;
@Inject
public NettyHttpServerTransport(Settings settings, NetworkService networkService) {
super(settings);
this.networkService = networkService;
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
}
ByteSizeValue maxContentLength = componentSettings.getAsBytesSize("max_content_length", settings.getAsBytesSize("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
this.maxChunkSize = componentSettings.getAsBytesSize("max_chunk_size", settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
this.maxHeaderSize = componentSettings.getAsBytesSize("max_header_size", settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
this.maxInitialLineLength = componentSettings.getAsBytesSize("max_initial_line_length", settings.getAsBytesSize("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB)));
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
this.resetCookies = componentSettings.getAsBoolean("reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
this.maxCumulationBufferCapacity = componentSettings.getAsBytesSize("max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = componentSettings.getAsInt("max_composite_buffer_components", -1);
this.workerCount = componentSettings.getAsInt("worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.blocking_server", settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
this.port = componentSettings.get("port", settings.get("http.port", "9200-9300"));
this.bindHost = componentSettings.get("bind_host", settings.get("http.bind_host", settings.get("http.host")));
this.publishHost = componentSettings.get("publish_host", settings.get("http.publish_host", settings.get("http.host")));
this.tcpNoDelay = componentSettings.getAsBoolean("tcp_no_delay", settings.getAsBoolean(TCP_NO_DELAY, true));
this.tcpKeepAlive = componentSettings.getAsBoolean("tcp_keep_alive", settings.getAsBoolean(TCP_KEEP_ALIVE, true));
this.reuseAddress = componentSettings.getAsBoolean("reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
this.tcpSendBufferSize = componentSettings.getAsBytesSize("tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
this.tcpReceiveBufferSize = componentSettings.getAsBytesSize("tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().mem().directMemoryMax().bytes() > 0) {
// we can guess a better default...
long l = (long) ((0.3 * JvmInfo.jvmInfo().mem().directMemoryMax().bytes()) / workerCount);
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
}
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
ByteSizeValue receivePredictorMin = componentSettings.getAsBytesSize("receive_predictor_min", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
ByteSizeValue receivePredictorMax = componentSettings.getAsBytesSize("receive_predictor_max", componentSettings.getAsBytesSize("receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
} else {
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
}
this.compression = settings.getAsBoolean("http.compression", false);
this.compressionLevel = settings.getAsInt("http.compression_level", 6);
// validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]");
maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
}
this.maxContentLength = maxContentLength;
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], receive_predictor[{}->{}]",
maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictorMin, receivePredictorMax);
}
public Settings settings() {
return this.settings;
}
public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {
this.httpServerAdapter = httpServerAdapter;
}
@Override
protected void doStart() throws ElasticsearchException {
this.serverOpenChannels = new OpenChannelsHandler(logger);
if (blockingServer) {
serverBootstrap = new ServerBootstrap(new OioServerSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker"))
));
} else {
serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_boss")),
Executors.newCachedThreadPool(daemonThreadFactory(settings, "http_server_worker")),
workerCount));
}
serverBootstrap.setPipelineFactory(new MyChannelPipelineFactory(this));
if (tcpNoDelay != null) {
serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
}
if (tcpKeepAlive != null) {
serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
}
if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
}
if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
}
serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
if (reuseAddress != null) {
serverBootstrap.setOption("reuseAddress", reuseAddress);
serverBootstrap.setOption("child.reuseAddress", reuseAddress);
}
// Bind and start to accept incoming connections.
InetAddress hostAddressX;
try {
hostAddressX = networkService.resolveBindHostAddress(bindHost);
} catch (IOException e) {
throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e);
}
final InetAddress hostAddress = hostAddressX;
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
try {
serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
} catch (Exception e) {
lastException.set(e);
return false;
}
return true;
}
});
if (!success) {
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
}
InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress();
InetSocketAddress publishAddress;
try {
publishAddress = new InetSocketAddress(networkService.resolvePublishHostAddress(publishHost), boundAddress.getPort());
} catch (Exception e) {
throw new BindTransportException("Failed to resolve publish address", e);
}
this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
}
@Override
protected void doStop() throws ElasticsearchException {
if (serverChannel != null) {
serverChannel.close().awaitUninterruptibly();
serverChannel = null;
}
if (serverOpenChannels != null) {
serverOpenChannels.close();
serverOpenChannels = null;
}
if (serverBootstrap != null) {
serverBootstrap.releaseExternalResources();
serverBootstrap = null;
}
}
@Override
protected void doClose() throws ElasticsearchException {
}
public BoundTransportAddress boundAddress() {
return this.boundAddress;
}
@Override
public HttpInfo info() {
return new HttpInfo(boundAddress(), maxContentLength.bytes());
}
@Override
public HttpStats stats() {
OpenChannelsHandler channels = serverOpenChannels;
return new HttpStats(channels == null ? 0 : channels.numberOfOpenChannels(), channels == null ? 0 : channels.totalChannels());
}
void dispatchRequest(HttpRequest request, HttpChannel channel) {
httpServerAdapter.dispatchRequest(request, channel);
}
void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
if (e.getCause() instanceof ReadTimeoutException) {
if (logger.isTraceEnabled()) {
logger.trace("Connection timeout [{}]", ctx.getChannel().getRemoteAddress());
}
ctx.getChannel().close();
} else {
if (!lifecycle.started()) {
// ignore
return;
}
if (!NetworkExceptionHelper.isCloseConnectionException(e.getCause())) {
logger.warn("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
ctx.getChannel().close();
} else {
logger.debug("Caught exception while handling client http traffic, closing connection {}", e.getCause(), ctx.getChannel());
ctx.getChannel().close();
}
}
}
static class MyChannelPipelineFactory implements ChannelPipelineFactory {
private final NettyHttpServerTransport transport;
private final HttpRequestHandler requestHandler;
MyChannelPipelineFactory(NettyHttpServerTransport transport) {
this.transport = transport;
this.requestHandler = new HttpRequestHandler(transport);
}
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast("openChannels", transport.serverOpenChannels);
HttpRequestDecoder requestDecoder = new HttpRequestDecoder(
(int) transport.maxInitialLineLength.bytes(),
(int) transport.maxHeaderSize.bytes(),
(int) transport.maxChunkSize.bytes()
);
if (transport.maxCumulationBufferCapacity != null) {
if (transport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
requestDecoder.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
} else {
requestDecoder.setMaxCumulationBufferCapacity((int) transport.maxCumulationBufferCapacity.bytes());
}
}
if (transport.maxCompositeBufferComponents != -1) {
requestDecoder.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
pipeline.addLast("decoder", requestDecoder);
if (transport.compression) {
pipeline.addLast("decoder_compress", new HttpContentDecompressor());
}
HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.bytes());
if (transport.maxCompositeBufferComponents != -1) {
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
}
pipeline.addLast("aggregator", httpChunkAggregator);
pipeline.addLast("encoder", new HttpResponseEncoder());
if (transport.compression) {
pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));
}
pipeline.addLast("handler", requestHandler);
return pipeline;
}
}
} | 1no label
| src_main_java_org_elasticsearch_http_netty_NettyHttpServerTransport.java |
2,007 | @Service("blCustomerAddressService")
public class CustomerAddressServiceImpl implements CustomerAddressService {
@Resource(name="blCustomerAddressDao")
protected CustomerAddressDao customerAddressDao;
public CustomerAddress saveCustomerAddress(CustomerAddress customerAddress) {
// if parameter address is set as default, unset all other default addresses
List<CustomerAddress> activeCustomerAddresses = readActiveCustomerAddressesByCustomerId(customerAddress.getCustomer().getId());
if (activeCustomerAddresses != null && activeCustomerAddresses.isEmpty()) {
customerAddress.getAddress().setDefault(true);
} else {
if (customerAddress.getAddress().isDefault()) {
for (CustomerAddress activeCustomerAddress : activeCustomerAddresses) {
if (activeCustomerAddress.getId() != customerAddress.getId() && activeCustomerAddress.getAddress().isDefault()) {
activeCustomerAddress.getAddress().setDefault(false);
customerAddressDao.save(activeCustomerAddress);
}
}
}
}
return customerAddressDao.save(customerAddress);
}
public List<CustomerAddress> readActiveCustomerAddressesByCustomerId(Long customerId) {
return customerAddressDao.readActiveCustomerAddressesByCustomerId(customerId);
}
public CustomerAddress readCustomerAddressById(Long customerAddressId) {
return customerAddressDao.readCustomerAddressById(customerAddressId);
}
public void makeCustomerAddressDefault(Long customerAddressId, Long customerId) {
customerAddressDao.makeCustomerAddressDefault(customerAddressId, customerId);
}
public void deleteCustomerAddressById(Long customerAddressId){
customerAddressDao.deleteCustomerAddressById(customerAddressId);
}
public CustomerAddress findDefaultCustomerAddress(Long customerId) {
return customerAddressDao.findDefaultCustomerAddress(customerId);
}
public CustomerAddress create() {
return customerAddressDao.create();
}
} | 1no label
| core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_service_CustomerAddressServiceImpl.java |
730 | ItemListener listener = new ItemListener() {
@Override
public void itemAdded(ItemEvent item) {
send(item);
}
@Override
public void itemRemoved(ItemEvent item) {
send(item);
}
private void send(ItemEvent event) {
if (endpoint.live()) {
Data item = clientEngine.toData(event.getItem());
final ItemEventType eventType = event.getEventType();
final String uuid = event.getMember().getUuid();
PortableItemEvent portableItemEvent = new PortableItemEvent(item, eventType, uuid);
endpoint.sendEvent(portableItemEvent, getCallId());
}
}
}; | 1no label
| hazelcast_src_main_java_com_hazelcast_collection_client_CollectionAddListenerRequest.java |
2,336 | public class JsonSettingsLoader extends XContentSettingsLoader {
@Override
public XContentType contentType() {
return XContentType.JSON;
}
} | 0true
| src_main_java_org_elasticsearch_common_settings_loader_JsonSettingsLoader.java |
1,882 | boolean b = h1.executeTransaction(options, new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
final TransactionalMap<Object, Object> txMap = context.getMap(mapName);
assertEquals(1, txMap.values(new SqlPredicate("age > 21")).size());
txMap.put(1, employeeAtAge23);
Collection coll = txMap.values(new SqlPredicate("age > 21"));
assertEquals(1, coll.size());
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java |
709 | static class UpdateResult {
final UpdateHelper.Result result;
final ActionRequest actionRequest;
final boolean retry;
final Throwable error;
final WriteResult writeResult;
final UpdateResponse noopResult;
UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, boolean retry, Throwable error, WriteResult writeResult) {
this.result = result;
this.actionRequest = actionRequest;
this.retry = retry;
this.error = error;
this.writeResult = writeResult;
this.noopResult = null;
}
UpdateResult(UpdateHelper.Result result, ActionRequest actionRequest, WriteResult writeResult) {
this.result = result;
this.actionRequest = actionRequest;
this.writeResult = writeResult;
this.retry = false;
this.error = null;
this.noopResult = null;
}
public UpdateResult(UpdateHelper.Result result, UpdateResponse updateResponse) {
this.result = result;
this.noopResult = updateResponse;
this.actionRequest = null;
this.writeResult = null;
this.retry = false;
this.error = null;
}
boolean failure() {
return error != null;
}
boolean success() {
return noopResult != null || writeResult != null;
}
@SuppressWarnings("unchecked")
<T extends ActionRequest> T request() {
return (T) actionRequest;
}
} | 0true
| src_main_java_org_elasticsearch_action_bulk_TransportShardBulkAction.java |
1,263 | public class ODataLocalHole extends OSingleFileSegment {
private static final int DEF_START_SIZE = 262144;
private static final int RECORD_SIZE = 12;
private int maxHoleSize = -1;
private final List<Integer> freeHoles = new ArrayList<Integer>();
private final ODataHoleInfo cursor = new ODataHoleInfo();
private final List<ODataHoleInfo> availableHolesList = new ArrayList<ODataHoleInfo>();
private final OMVRBTreeMemory<ODataHoleInfo, ODataHoleInfo> availableHolesBySize;
private final OMVRBTreeMemory<ODataHoleInfo, ODataHoleInfo> availableHolesByPosition;
private final String PROFILER_DATA_RECYCLED_COMPLETE;
private final String PROFILER_DATA_RECYCLED_PARTIAL;
private final String PROFILER_DATA_RECYCLED_NOTFOUND;
private final String PROFILER_DATA_HOLE_CREATE;
private final String PROFILER_DATA_HOLE_UPDATE;
public ODataLocalHole(final OStorageLocal iStorage, final OStorageFileConfiguration iConfig) throws IOException {
super(iStorage, iConfig);
PROFILER_DATA_RECYCLED_COMPLETE = "db." + storage.getName() + ".data.recycled.complete";
PROFILER_DATA_RECYCLED_PARTIAL = "db." + storage.getName() + ".data.recycled.partial";
PROFILER_DATA_RECYCLED_NOTFOUND = "db." + storage.getName() + ".data.recycled.notFound";
PROFILER_DATA_HOLE_CREATE = "db." + storage.getName() + ".data.createHole";
PROFILER_DATA_HOLE_UPDATE = "db." + storage.getName() + ".data.updateHole";
availableHolesBySize = new OMVRBTreeMemory<ODataHoleInfo, ODataHoleInfo>(512, 0.7f);
availableHolesByPosition = new OMVRBTreeMemory<ODataHoleInfo, ODataHoleInfo>(new Comparator<ODataHoleInfo>() {
public int compare(final ODataHoleInfo o1, final ODataHoleInfo o2) {
if (o1.dataOffset == o2.dataOffset)
return 0;
if (o1.dataOffset > o2.dataOffset)
return 1;
return -1;
}
});
}
@Override
public synchronized boolean open() throws IOException {
final boolean status = super.open();
loadHolesInMemory();
return status;
}
@Override
public synchronized void create(final int iStartSize) throws IOException {
super.create(iStartSize > -1 ? iStartSize : DEF_START_SIZE);
}
/**
* Appends the hole to the end of the segment.
*
* @throws IOException
*/
public synchronized void createHole(final long iRecordOffset, final int iRecordSize) throws IOException {
final long timer = Orient.instance().getProfiler().startChrono();
// IN MEMORY
final int recycledPosition;
final ODataHoleInfo hole;
if (!freeHoles.isEmpty()) {
// RECYCLE THE FIRST FREE HOLE
recycledPosition = freeHoles.remove(0);
hole = availableHolesList.get(recycledPosition);
hole.dataOffset = iRecordOffset;
hole.size = iRecordSize;
} else {
// APPEND A NEW ONE
recycledPosition = getHoles();
hole = new ODataHoleInfo(iRecordSize, iRecordOffset, recycledPosition);
availableHolesList.add(hole);
file.allocateSpace(RECORD_SIZE);
}
availableHolesBySize.put(hole, hole);
availableHolesByPosition.put(hole, hole);
if (maxHoleSize < iRecordSize)
maxHoleSize = iRecordSize;
// TO FILE
final long p = recycledPosition * RECORD_SIZE;
file.writeLong(p, iRecordOffset);
file.writeInt(p + OBinaryProtocol.SIZE_LONG, iRecordSize);
Orient.instance().getProfiler()
.stopChrono(PROFILER_DATA_HOLE_CREATE, "Time to create a hole in data segment", timer, "db.*.data.createHole");
}
public synchronized ODataHoleInfo getCloserHole(final long iHolePosition, final int iHoleSize, final long iLowerRange,
final long iHigherRange) {
cursor.dataOffset = iHolePosition;
ODataHoleInfo lowerHole = availableHolesByPosition.lowerKey(cursor);
cursor.dataOffset = iHolePosition + iHoleSize;
ODataHoleInfo higherHole = availableHolesByPosition.higherKey(cursor);
if (lowerHole != null && higherHole != null && lowerHole != higherHole && lowerHole.dataOffset >= higherHole.dataOffset)
// CHECK ERROR
throw new OStorageException("Found bad order in hole list: " + lowerHole + " is higher than " + higherHole);
final ODataHoleInfo closestHole;
if (lowerHole != null && (lowerHole.dataOffset + lowerHole.size < iLowerRange))
// OUT OF RANGE: INVALID IT
lowerHole = null;
if (higherHole != null && (higherHole.dataOffset > iHigherRange))
// OUT OF RANGE: INVALID IT
higherHole = null;
if (lowerHole == higherHole)
closestHole = higherHole;
else if (lowerHole == null && higherHole != null)
closestHole = higherHole;
else if (lowerHole != null && higherHole == null)
closestHole = lowerHole;
else if (iHolePosition - (lowerHole.dataOffset + lowerHole.size) > higherHole.dataOffset - iHolePosition)
closestHole = higherHole;
else
closestHole = lowerHole;
return closestHole;
}
/**
* Returns the first available hole (at least iRecordSize length) to be reused.
*
* @return
*
* @throws IOException
*/
protected synchronized long popFirstAvailableHole(final int iRecordSize) throws IOException {
if (maxHoleSize > -1 && iRecordSize + ODataLocal.RECORD_FIX_SIZE + 50 > maxHoleSize)
// DON'T BROWSE: NO ONE HOLE WITH THIS SIZE IS AVAILABLE
return -1;
final long timer = Orient.instance().getProfiler().startChrono();
if (!availableHolesBySize.isEmpty()) {
cursor.size = iRecordSize;
// SEARCH THE HOLE WITH THE SAME SIZE
ODataHoleInfo hole = availableHolesBySize.get(cursor);
if (hole != null && hole.size == iRecordSize) {
// PERFECT MATCH: DELETE THE HOLE
Orient
.instance()
.getProfiler()
.stopChrono(PROFILER_DATA_RECYCLED_COMPLETE, "Time to recycle the hole space completely in data segment", timer,
"db.*.data.recycled.complete");
final long pos = hole.dataOffset;
deleteHole(hole.holeOffset);
return pos;
}
// TRY WITH THE BIGGEST HOLE
hole = availableHolesBySize.lastKey();
if (hole.size > iRecordSize + ODataLocal.RECORD_FIX_SIZE + 50) {
// GOOD MATCH SINCE THE HOLE IS BIG ENOUGH ALSO FOR ANOTHER RECORD: UPDATE THE HOLE WITH THE DIFFERENCE
final long pos = hole.dataOffset;
Orient
.instance()
.getProfiler()
.stopChrono(PROFILER_DATA_RECYCLED_PARTIAL, "Time to recycle the hole space partially in data segment", timer,
"db.*.data.recycled.partial");
updateHole(hole, hole.dataOffset + iRecordSize, hole.size - iRecordSize);
return pos;
}
}
Orient
.instance()
.getProfiler()
.stopChrono(PROFILER_DATA_RECYCLED_NOTFOUND, "Time to recycle a hole space in data segment, but without luck", timer,
"db.*.data.notFound");
return -1;
}
/**
* Fills the holes information into OPhysicalPosition object given as parameter.
*
* @return true, if it's a valid hole, otherwise false
* @throws IOException
*/
public synchronized ODataHoleInfo getHole(final int iPosition) {
final ODataHoleInfo hole = availableHolesList.get(iPosition);
if (hole.dataOffset == -1)
return null;
return hole;
}
/**
* Update hole data
*
* @param iUpdateFromMemory
*
* @throws IOException
*/
public synchronized void updateHole(final ODataHoleInfo iHole, final long iNewDataOffset, final int iNewRecordSize)
throws IOException {
final long timer = Orient.instance().getProfiler().startChrono();
final boolean offsetChanged = iNewDataOffset != iHole.dataOffset;
final boolean sizeChanged = iNewRecordSize != iHole.size;
if (maxHoleSize < iNewRecordSize)
maxHoleSize = iNewRecordSize;
// IN MEMORY
if (offsetChanged)
availableHolesByPosition.remove(iHole);
if (sizeChanged)
availableHolesBySize.remove(iHole);
if (offsetChanged)
iHole.dataOffset = iNewDataOffset;
if (sizeChanged)
iHole.size = iNewRecordSize;
if (offsetChanged)
availableHolesByPosition.put(iHole, iHole);
if (sizeChanged)
availableHolesBySize.put(iHole, iHole);
// TO FILE
final long holePosition = iHole.holeOffset * RECORD_SIZE;
if (offsetChanged)
file.writeLong(holePosition, iNewDataOffset);
if (sizeChanged)
file.writeInt(holePosition + OBinaryProtocol.SIZE_LONG, iNewRecordSize);
Orient.instance().getProfiler()
.stopChrono(PROFILER_DATA_HOLE_UPDATE, "Time to update a hole in data segment", timer, "db.*.updateHole");
}
/**
* Delete the hole
*
* @param iRemoveAlsoFromMemory
*
* @throws IOException
*/
public synchronized void deleteHole(int iHolePosition) throws IOException {
// IN MEMORY
final ODataHoleInfo hole = availableHolesList.get(iHolePosition);
availableHolesBySize.remove(hole);
availableHolesByPosition.remove(hole);
hole.dataOffset = -1;
freeHoles.add(iHolePosition);
// TO FILE
iHolePosition = iHolePosition * RECORD_SIZE;
file.writeLong(iHolePosition, -1);
}
public synchronized int getHoles() {
return (int) (file.getFilledUpTo() / RECORD_SIZE);
}
@Override
public synchronized void synch() throws IOException {
super.synch();
}
@Override
public synchronized void setSoftlyClosed(boolean softlyClosed) throws IOException {
super.setSoftlyClosed(softlyClosed);
}
private void loadHolesInMemory() throws IOException {
final int holes = getHoles();
for (int pos = 0; pos < holes; ++pos) {
final long dataOffset = file.readLong(pos * RECORD_SIZE);
final int recordSize = file.readInt(pos * RECORD_SIZE + OBinaryProtocol.SIZE_LONG);
final ODataHoleInfo hole = new ODataHoleInfo(recordSize, dataOffset, pos);
availableHolesList.add(hole);
if (dataOffset == -1)
freeHoles.add(pos);
else {
availableHolesBySize.put(hole, hole);
availableHolesByPosition.put(hole, hole);
if (maxHoleSize < recordSize)
maxHoleSize = recordSize;
}
}
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_ODataLocalHole.java |
732 | public class CollectionClearRequest extends CollectionRequest {
public CollectionClearRequest() {
}
public CollectionClearRequest(String name) {
super(name);
}
@Override
protected Operation prepareOperation() {
return new CollectionClearOperation(name);
}
@Override
public int getClassId() {
return CollectionPortableHook.COLLECTION_CLEAR;
}
@Override
public String getRequiredAction() {
return ActionConstants.ACTION_REMOVE;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_client_CollectionClearRequest.java |
2,184 | public class LimitFilter extends NoCacheFilter {
private final int limit;
private int counter;
public LimitFilter(int limit) {
this.limit = limit;
}
public int getLimit() {
return limit;
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
if (counter > limit) {
return null;
}
return new LimitDocIdSet(context.reader().maxDoc(), acceptDocs, limit);
}
public class LimitDocIdSet extends MatchDocIdSet {
private final int limit;
public LimitDocIdSet(int maxDoc, @Nullable Bits acceptDocs, int limit) {
super(maxDoc, acceptDocs);
this.limit = limit;
}
@Override
protected boolean matchDoc(int doc) {
if (++counter > limit) {
return false;
}
return true;
}
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_search_LimitFilter.java |
158 | class RenameAliasProposal implements ICompletionProposal,
ICompletionProposalExtension6 {
private final Tree.Alias alias;
private final Declaration dec;
private final CeylonEditor editor;
private RenameAliasProposal(Tree.Alias alias,
Declaration dec, CeylonEditor editor) {
this.alias = alias;
this.dec = dec;
this.editor = editor;
}
@Override
public void apply(IDocument document) {
if (useLinkedMode()) {
new EnterAliasLinkedMode(editor).start();
}
else {
new EnterAliasRefactoringAction(editor).run();
}
}
static void addRenameAliasProposal(Tree.ImportMemberOrType imt,
Collection<ICompletionProposal> proposals,
CeylonEditor editor) {
if (imt!=null) {
Declaration dec = imt.getDeclarationModel();
Tree.Alias a = imt.getAlias();
if (dec!=null && a!=null) {
proposals.add(new RenameAliasProposal(a, dec, editor));
}
}
}
@Override
public StyledString getStyledDisplayString() {
return Highlights.styleProposal(getDisplayString(), false);
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public String getDisplayString() {
return "Rename alias '" +
alias.getIdentifier().getText() +
"' of '" + dec.getName() + "'";
}
@Override
public Image getImage() {
return RENAME;
}
@Override
public IContextInformation getContextInformation() {
return null;
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_RenameAliasProposal.java |
467 | new ODbRelatedCall<Iterator<Map.Entry<Object, Object>>>() {
public Iterator<Map.Entry<Object, Object>> call() {
return indexOne.iterator();
}
}); | 1no label
| core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseCompare.java |
2,866 | public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAnalyzer> {
private final GreekAnalyzer analyzer;
@Inject
public GreekAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
analyzer = new GreekAnalyzer(version,
Analysis.parseStopWords(env, settings, GreekAnalyzer.getDefaultStopSet(), version));
}
@Override
public GreekAnalyzer get() {
return this.analyzer;
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_GreekAnalyzerProvider.java |
1,176 | public enum ItemEventType {
ADDED(1),
REMOVED(2);
private int type;
private ItemEventType(final int type) {
this.type = type;
}
public int getType() {
return type;
}
public static ItemEventType getByType(final int eventType) {
for (ItemEventType entryEventType : values()) {
if (entryEventType.type == eventType) {
return entryEventType;
}
}
return null;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_core_ItemEventType.java |
1,499 | public class DefaultNodeInitializer implements NodeInitializer {
protected ILogger logger;
protected ILogger systemLogger;
protected Node node;
protected String version;
protected String build;
@Override
public void beforeInitialize(Node node) {
this.node = node;
systemLogger = node.getLogger("com.hazelcast.system");
logger = node.getLogger("com.hazelcast.initializer");
parseSystemProps();
}
@Override
public void printNodeInfo(Node node) {
systemLogger.info("Hazelcast Community Edition " + version + " ("
+ build + ") starting at " + node.getThisAddress());
systemLogger.info("Copyright (C) 2008-2014 Hazelcast.com");
}
@Override
public void afterInitialize(Node node) {
}
protected void parseSystemProps() {
version = node.getBuildInfo().getVersion();
build = node.getBuildInfo().getBuild();
}
@Override
public SecurityContext getSecurityContext() {
logger.warning("Security features are only available on Hazelcast Enterprise Edition!");
return null;
}
@Override
public Storage<DataRef> getOffHeapStorage() {
throw new UnsupportedOperationException("Offheap feature is only available on Hazelcast Enterprise Edition!");
}
@Override
public WanReplicationService geWanReplicationService() {
return new WanReplicationServiceImpl(node);
}
@Override
public void destroy() {
logger.info("Destroying node initializer.");
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_instance_DefaultNodeInitializer.java |
357 | public interface HBaseCompat {
/**
* Configure the compression scheme {@code algo} on a column family
* descriptor {@code cd}. The {@code algo} parameter is a string value
* corresponding to one of the values of HBase's Compression enum. The
* Compression enum has moved between packages as HBase has evolved, which
* is why this method has a String argument in the signature instead of the
* enum itself.
*
* @param cd
* column family to configure
* @param algo
* compression type to use
*/
public void setCompression(HColumnDescriptor cd, String algo);
/**
* Create and return a HTableDescriptor instance with the given name. The
* constructors on this method have remained stable over HBase development
* so far, but the old HTableDescriptor(String) constructor & byte[] friends
* are now marked deprecated and may eventually be removed in favor of the
* HTableDescriptor(TableName) constructor. That constructor (and the
* TableName type) only exists in newer HBase versions. Hence this method.
*
* @param tableName
* HBase table name
* @return a new table descriptor instance
*/
public HTableDescriptor newTableDescriptor(String tableName);
} | 0true
| titan-hbase-parent_titan-hbase-core_src_main_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseCompat.java |
391 | new Thread() {
public void run() {
try {
if (mm.tryLock(key, 4, TimeUnit.SECONDS)) {
tryLockSuccess.countDown();
}
} catch (InterruptedException e) {
fail(e.getMessage());
}
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapLockTest.java |
610 | public interface BroadleafSandBoxResolver {
/**
* @deprecated use {@link #resolveSandBox(WebRequest, Site)} instead
*/
@Deprecated
public SandBox resolveSandBox(HttpServletRequest request, Site site);
/**
* Resolve the sandbox for the given site and request
*
* @param request
* @param site
* @return the sandbox for the current request
*/
public SandBox resolveSandBox(WebRequest request, Site site);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_web_BroadleafSandBoxResolver.java |
303 | nodesHotThreadsRequestBuilder.execute(new ActionListener<NodesHotThreadsResponse>() {
@Override
public void onResponse(NodesHotThreadsResponse nodeHotThreads) {
boolean success = false;
try {
assertThat(nodeHotThreads, notNullValue());
Map<String,NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();
assertThat(nodesMap.size(), equalTo(cluster().size()));
for (NodeHotThreads ht : nodeHotThreads) {
assertNotNull(ht.getHotThreads());
//logger.info(ht.getHotThreads());
}
success = true;
} finally {
if (!success) {
hasErrors.set(true);
}
latch.countDown();
}
}
@Override
public void onFailure(Throwable e) {
logger.error("FAILED", e);
hasErrors.set(true);
latch.countDown();
fail();
}
}); | 0true
| src_test_java_org_elasticsearch_action_admin_HotThreadsTest.java |
1,516 | public class OObjectFetchListener implements OFetchListener {
@SuppressWarnings({ "unchecked", "rawtypes" })
public void processStandardField(final ORecordSchemaAware<?> iRecord, final Object iFieldValue, final String iFieldName,
final OFetchContext iContext, final Object iUserObject, final String iFormat) throws OFetchException {
if (iFieldValue instanceof ORecordLazyList)
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, new OObjectLazyList(iRecord, (ORecordLazyList) iFieldValue,
OObjectEntitySerializer.isCascadeDeleteField(iUserObject.getClass(), iFieldName)));
else if (iFieldValue instanceof ORecordLazySet)
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, new OObjectLazySet(iRecord, (ORecordLazySet) iFieldValue,
OObjectEntitySerializer.isCascadeDeleteField(iUserObject.getClass(), iFieldName)));
else if (iFieldValue instanceof ORecordLazyMap)
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, new OObjectLazyMap(iRecord, (ORecordLazyMap) iFieldValue,
OObjectEntitySerializer.isCascadeDeleteField(iUserObject.getClass(), iFieldName)));
else
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, iFieldValue);
}
public void processStandardCollectionValue(Object iFieldValue, OFetchContext iContext) throws OFetchException {
}
public void parseLinkedCollectionValue(ORecordSchemaAware<?> iRootRecord, OIdentifiable iLinked, Object iUserObject,
String iFieldName, OFetchContext iContext) throws OFetchException {
}
@SuppressWarnings("unchecked")
public void parseLinked(final ORecordSchemaAware<?> iRootRecord, final OIdentifiable iLinked, final Object iUserObject,
final String iFieldName, final OFetchContext iContext) throws OFetchException {
final Class<?> type = OObjectSerializerHelper.getFieldType(iUserObject, iFieldName);
if (type == null || Map.class.isAssignableFrom(type)) {
} else if (Set.class.isAssignableFrom(type) || Collection.class.isAssignableFrom(type) || type.isArray()) {
if (!((OObjectFetchContext) iContext).isLazyLoading()) {
Object value = ((OObjectFetchContext) iContext).getObj2RecHandler().getUserObjectByRecord((ODocument) iLinked,
((OObjectFetchContext) iContext).getFetchPlan());
if (!((OObjectFetchContext) iContext).isLazyLoading()) {
Collection<Object> target = (Collection<Object>) OObjectSerializerHelper.getFieldValue(iUserObject, iFieldName);
target.add(value);
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, target);
}
}
return;
} else if (iLinked instanceof ORecordSchemaAware
&& !(((OObjectFetchContext) iContext).getObj2RecHandler().existsUserObjectByRID(iLinked.getIdentity()))) {
fetchLinked(iRootRecord, iUserObject, iFieldName, (ORecordSchemaAware<?>) iLinked, iContext);
}
}
public Object fetchLinkedMapEntry(final ORecordSchemaAware<?> iRoot, final Object iUserObject, final String iFieldName,
String iKey, final ORecordSchemaAware<?> iLinked, final OFetchContext iContext) throws OFetchException {
Object value = null;
final Class<?> type = OObjectSerializerHelper.getFieldType((ODocument) iLinked,
((OObjectFetchContext) iContext).getEntityManager());
final Class<?> fieldClass = ((OObjectFetchContext) iContext).getEntityManager().getEntityClass(type.getSimpleName());
if (fieldClass != null) {
// RECOGNIZED TYPE
value = ((OObjectFetchContext) iContext).getObj2RecHandler().getUserObjectByRecord((ODocument) iLinked,
((OObjectFetchContext) iContext).getFetchPlan());
}
return value;
}
@SuppressWarnings("unchecked")
public Object fetchLinkedCollectionValue(final ORecordSchemaAware<?> iRoot, final Object iUserObject, final String iFieldName,
final ORecordSchemaAware<?> iLinked, final OFetchContext iContext) throws OFetchException {
Object value = null;
final Class<?> fieldClass = OObjectSerializerHelper.getFieldType((ODocument) iLinked,
((OObjectFetchContext) iContext).getEntityManager());
if (fieldClass != null) {
// RECOGNIZED TYPE
value = ((OObjectFetchContext) iContext).getObj2RecHandler().getUserObjectByRecord((ODocument) iLinked,
((OObjectFetchContext) iContext).getFetchPlan());
if (!((OObjectFetchContext) iContext).isLazyLoading()) {
Collection<Object> target = (Collection<Object>) OObjectSerializerHelper.getFieldValue(iUserObject, iFieldName);
target.add(value);
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, target);
}
}
return value;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public Object fetchLinked(ORecordSchemaAware<?> iRoot, Object iUserObject, String iFieldName, ORecordSchemaAware<?> iLinked,
OFetchContext iContext) throws OFetchException {
if (iUserObject == null)
return null;
final Class<?> type;
if (iLinked != null && iLinked instanceof ODocument)
// GET TYPE BY DOCUMENT'S CLASS. THIS WORKS VERY WELL FOR SUB-TYPES
type = OObjectSerializerHelper.getFieldType((ODocument) iLinked, ((OObjectFetchContext) iContext).getEntityManager());
else
// DETERMINE TYPE BY REFLECTION
type = OObjectSerializerHelper.getFieldType(iUserObject, iFieldName);
if (type == null)
throw new OSerializationException(
"Linked type of field '"
+ iRoot.getClassName()
+ "."
+ iFieldName
+ "' is unknown. Probably needs to be registered with <db>.getEntityManager().registerEntityClasses(<package>) or <db>.getEntityManager().registerEntityClass(<class>) or the package cannot be loaded correctly due to a classpath problem. In this case register the single classes one by one.");
Object fieldValue = null;
Class<?> fieldClass;
if (type.isEnum()) {
String enumName = ((ODocument) iLinked).field(iFieldName);
Class<Enum> enumClass = (Class<Enum>) type;
fieldValue = Enum.valueOf(enumClass, enumName);
} else {
fieldClass = ((OObjectFetchContext) iContext).getEntityManager().getEntityClass(type.getSimpleName());
if (fieldClass != null && !((OObjectFetchContext) iContext).isLazyLoading()) {
// RECOGNIZED TYPE
fieldValue = ((OObjectFetchContext) iContext).getObj2RecHandler().getUserObjectByRecord((ODocument) iLinked,
((OObjectFetchContext) iContext).getFetchPlan());
OObjectSerializerHelper.setFieldValue(iUserObject, iFieldName, OObjectSerializerHelper.unserializeFieldValue(
OObjectSerializerHelper.getFieldType(iUserObject, iFieldName), fieldValue));
}
}
return fieldValue;
}
} | 0true
| object_src_main_java_com_orientechnologies_orient_object_fetch_OObjectFetchListener.java |
1,857 | public class Injectors {
public static Throwable getFirstErrorFailure(CreationException e) {
if (e.getErrorMessages().isEmpty()) {
return e;
}
// return the first message that has root cause, probably an actual error
for (Message message : e.getErrorMessages()) {
if (message.getCause() != null) {
return message.getCause();
}
}
return e;
}
/**
* Returns an instance of the given type with the {@link org.elasticsearch.common.inject.name.Named}
* annotation value.
* <p/>
* This method allows you to switch this code
* <code>injector.getInstance(Key.get(type, Names.named(name)));</code>
* <p/>
* to the more concise
* <code>Injectors.getInstance(injector, type, name);</code>
*/
public static <T> T getInstance(Injector injector, java.lang.Class<T> type, String name) {
return injector.getInstance(Key.get(type, Names.named(name)));
}
/**
* Returns a collection of all instances of the given base type
*
* @param baseClass the base type of objects required
* @param <T> the base type
* @return a set of objects returned from this injector
*/
public static <T> Set<T> getInstancesOf(Injector injector, Class<T> baseClass) {
Set<T> answer = Sets.newHashSet();
Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
for (Entry<Key<?>, Binding<?>> entry : entries) {
Key<?> key = entry.getKey();
Class<?> keyType = getKeyType(key);
if (keyType != null && baseClass.isAssignableFrom(keyType)) {
Binding<?> binding = entry.getValue();
Object value = binding.getProvider().get();
if (value != null) {
T castValue = baseClass.cast(value);
answer.add(castValue);
}
}
}
return answer;
}
/**
* Returns a collection of all instances matching the given matcher
*
* @param matcher matches the types to return instances
* @return a set of objects returned from this injector
*/
public static <T> Set<T> getInstancesOf(Injector injector, Matcher<Class> matcher) {
Set<T> answer = Sets.newHashSet();
Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
for (Entry<Key<?>, Binding<?>> entry : entries) {
Key<?> key = entry.getKey();
Class<?> keyType = getKeyType(key);
if (keyType != null && matcher.matches(keyType)) {
Binding<?> binding = entry.getValue();
Object value = binding.getProvider().get();
answer.add((T) value);
}
}
return answer;
}
/**
* Returns a collection of all of the providers matching the given matcher
*
* @param matcher matches the types to return instances
* @return a set of objects returned from this injector
*/
public static <T> Set<Provider<T>> getProvidersOf(Injector injector, Matcher<Class> matcher) {
Set<Provider<T>> answer = Sets.newHashSet();
Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
for (Entry<Key<?>, Binding<?>> entry : entries) {
Key<?> key = entry.getKey();
Class<?> keyType = getKeyType(key);
if (keyType != null && matcher.matches(keyType)) {
Binding<?> binding = entry.getValue();
answer.add((Provider<T>) binding.getProvider());
}
}
return answer;
}
/**
* Returns a collection of all providers of the given base type
*
* @param baseClass the base type of objects required
* @param <T> the base type
* @return a set of objects returned from this injector
*/
public static <T> Set<Provider<T>> getProvidersOf(Injector injector, Class<T> baseClass) {
Set<Provider<T>> answer = Sets.newHashSet();
Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
for (Entry<Key<?>, Binding<?>> entry : entries) {
Key<?> key = entry.getKey();
Class<?> keyType = getKeyType(key);
if (keyType != null && baseClass.isAssignableFrom(keyType)) {
Binding<?> binding = entry.getValue();
answer.add((Provider<T>) binding.getProvider());
}
}
return answer;
}
/**
* Returns true if a binding exists for the given matcher
*/
public static boolean hasBinding(Injector injector, Matcher<Class> matcher) {
return !getBindingsOf(injector, matcher).isEmpty();
}
/**
* Returns true if a binding exists for the given base class
*/
public static boolean hasBinding(Injector injector, Class<?> baseClass) {
return !getBindingsOf(injector, baseClass).isEmpty();
}
/**
* Returns true if a binding exists for the given key
*/
public static boolean hasBinding(Injector injector, Key<?> key) {
Binding<?> binding = getBinding(injector, key);
return binding != null;
}
/**
* Returns the binding for the given key or null if there is no such binding
*/
public static Binding<?> getBinding(Injector injector, Key<?> key) {
Map<Key<?>, Binding<?>> bindings = injector.getBindings();
Binding<?> binding = bindings.get(key);
return binding;
}
/**
* Returns a collection of all of the bindings matching the given matcher
*
* @param matcher matches the types to return instances
* @return a set of objects returned from this injector
*/
public static Set<Binding<?>> getBindingsOf(Injector injector, Matcher<Class> matcher) {
Set<Binding<?>> answer = Sets.newHashSet();
Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
for (Entry<Key<?>, Binding<?>> entry : entries) {
Key<?> key = entry.getKey();
Class<?> keyType = getKeyType(key);
if (keyType != null && matcher.matches(keyType)) {
answer.add(entry.getValue());
}
}
return answer;
}
/**
* Returns a collection of all bindings of the given base type
*
* @param baseClass the base type of objects required
* @return a set of objects returned from this injector
*/
public static Set<Binding<?>> getBindingsOf(Injector injector, Class<?> baseClass) {
Set<Binding<?>> answer = Sets.newHashSet();
Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet();
for (Entry<Key<?>, Binding<?>> entry : entries) {
Key<?> key = entry.getKey();
Class<?> keyType = getKeyType(key);
if (keyType != null && baseClass.isAssignableFrom(keyType)) {
answer.add(entry.getValue());
}
}
return answer;
}
/**
* Returns the key type of the given key
*/
public static <T> Class<?> getKeyType(Key<?> key) {
Class<?> keyType = null;
TypeLiteral<?> typeLiteral = key.getTypeLiteral();
Type type = typeLiteral.getType();
if (type instanceof Class) {
keyType = (Class<?>) type;
}
return keyType;
}
public static void close(Injector injector) {
}
public static void cleanCaches(Injector injector) {
((InjectorImpl) injector).clearCache();
if (injector.getParent() != null) {
cleanCaches(injector.getParent());
}
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_Injectors.java |
174 | (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}}); | 0true
| src_main_java_jsr166y_ForkJoinTask.java |
1,619 | public class OResynchTask extends OAbstractRemoteTask {
private static final long serialVersionUID = 1L;
public OResynchTask() {
}
@Override
public Object execute(final OServer iServer, ODistributedServerManager iManager, final ODatabaseDocumentTx database)
throws Exception {
return Boolean.TRUE;
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
}
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
}
public QUORUM_TYPE getQuorumType() {
return QUORUM_TYPE.ALL;
}
@Override
public String getName() {
return "resynch";
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_distributed_task_OResynchTask.java |
1,352 | completableFuture.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
reference1.set(response);
latch2.countDown();
}
@Override
public void onFailure(Throwable t) {
reference1.set(t);
latch2.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
2 | public class AbbreviationServiceImpl implements AbbreviationService {
private static final Logger logger = LoggerFactory.getLogger(AbbreviationServiceImpl.class);
private static final String ABBREVIATIONS_FILE_PROPERTY = "abbreviations-file";
private AbbreviationsManager manager;
/**
* Activates the service implementation. A map of properties is
* used to configure the service.
*
* @param context the component context for this service
*/
public void activate(ComponentContext context) {
@SuppressWarnings("unchecked")
Dictionary<String,String> properties = context.getProperties();
// This property will always be present, according to OSGi 4.1 Compendium
// Specification section 112.6.
String componentName = properties.get("component.name");
String abbreviationsFilePath = properties.get(ABBREVIATIONS_FILE_PROPERTY);
Properties abbreviationsProperties = null;
if (abbreviationsFilePath == null) {
logger.warn("{}: no configuration value for {} - no abbreviations will be available.", componentName, ABBREVIATIONS_FILE_PROPERTY);
} else {
InputStream in = findFile(abbreviationsFilePath);
if (in == null) {
logger.warn("{}: abbreviations file <{}> not found - no abbreviations will be available.", componentName, abbreviationsFilePath);
} else {
try {
abbreviationsProperties = new Properties();
abbreviationsProperties.load(in);
} catch (IOException ex) {
logger.warn("{}: error loading abbreviations file <{}> - no abbreviations will be available.", componentName, abbreviationsFilePath);
abbreviationsProperties = null;
}
}
}
if (abbreviationsProperties == null) {
abbreviationsProperties = new Properties();
}
manager = new AbbreviationsManager(abbreviationsProperties);
}
/**
* Looks up a file given a path. The file is looked up first relative to the
* current directory. If not found, a matching resource within the bundle is
* tried. If neither method works, null is returned to indicate that the file
* could not be found.
*
* @param path a relative or absolute pathname, or a resource name from within the bundle
* @return an input stream for reading from the file, or null if the file could not be found
*/
InputStream findFile(String path) {
// 1. Try to find using the file path, which may be absolute or
// relative to the current directory.
File f = new File(path);
if (f.isFile() && f.canRead()) {
try {
return new FileInputStream(f);
} catch (Exception ex) {
// ignore, for now
}
}
// 2. Try to find a resource in the bundle. This return value may be null,
// if no resource is found matching the path.
return getClass().getResourceAsStream(path);
}
@Override
public Abbreviations getAbbreviations(String s) {
return manager.getAbbreviations(s);
}
} | 0true
| tableViews_src_main_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationServiceImpl.java |
2,507 | VALUE_EMBEDDED_OBJECT {
@Override
public boolean isValue() {
return true;
}
}, | 0true
| src_main_java_org_elasticsearch_common_xcontent_XContentParser.java |
654 | public class PutIndexTemplateAction extends IndicesAction<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction();
public static final String NAME = "indices/template/put";
private PutIndexTemplateAction() {
super(NAME);
}
@Override
public PutIndexTemplateResponse newResponse() {
return new PutIndexTemplateResponse();
}
@Override
public PutIndexTemplateRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new PutIndexTemplateRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_template_put_PutIndexTemplateAction.java |
3,059 | public class PulsingPostingsFormatProvider extends AbstractPostingsFormatProvider {
private final int freqCutOff;
private final int minBlockSize;
private final int maxBlockSize;
private final Pulsing41PostingsFormat postingsFormat;
@Inject
public PulsingPostingsFormatProvider(@Assisted String name, @Assisted Settings postingsFormatSettings) {
super(name);
this.freqCutOff = postingsFormatSettings.getAsInt("freq_cut_off", 1);
this.minBlockSize = postingsFormatSettings.getAsInt("min_block_size", BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE);
this.maxBlockSize = postingsFormatSettings.getAsInt("max_block_size", BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
this.postingsFormat = new Pulsing41PostingsFormat(freqCutOff, minBlockSize, maxBlockSize);
}
public int freqCutOff() {
return freqCutOff;
}
public int minBlockSize() {
return minBlockSize;
}
public int maxBlockSize() {
return maxBlockSize;
}
@Override
public PostingsFormat get() {
return postingsFormat;
}
} | 0true
| src_main_java_org_elasticsearch_index_codec_postingsformat_PulsingPostingsFormatProvider.java |
1,300 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CAT_SEARCH_FACET_XREF")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region = "blStandardElements")
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE)
public class CategorySearchFacetImpl implements CategorySearchFacet,Serializable {
/**
*
*/
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "CategorySearchFacetId")
@GenericGenerator(
name="CategorySearchFacetId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="CategorySearchFacetImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.search.domain.CategorySearchFacetImpl")
}
)
@Column(name = "CATEGORY_SEARCH_FACET_ID")
protected Long id;
@ManyToOne(targetEntity = CategoryImpl.class)
@JoinColumn(name = "CATEGORY_ID")
@AdminPresentation(excluded = true)
protected Category category;
@ManyToOne(targetEntity = SearchFacetImpl.class)
@JoinColumn(name = "SEARCH_FACET_ID")
protected SearchFacet searchFacet;
@Column(name = "SEQUENCE")
@AdminPresentation(friendlyName = "CategorySearchFacetImpl_sequence")
protected Long sequence;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Category getCategory() {
return category;
}
@Override
public void setCategory(Category category) {
this.category = category;
}
@Override
public SearchFacet getSearchFacet() {
return searchFacet;
}
@Override
public void setSearchFacet(SearchFacet searchFacet) {
this.searchFacet = searchFacet;
}
@Override
public Long getSequence() {
return sequence;
}
@Override
public void setSequence(Long sequence) {
this.sequence = sequence;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_domain_CategorySearchFacetImpl.java |
1,252 | addOperation(operations, new Runnable() {
public void run() {
IMap map = hazelcast.getMap("myMap");
map.replace(random.nextInt(SIZE), new Customer(random.nextInt(100), String.valueOf(random.nextInt(10000))));
}
}, 4); | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
1,115 | static class StringEntry {
String key;
int counter;
StringEntry(String key, int counter) {
this.key = key;
this.counter = counter;
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_hppc_StringMapAdjustOrPutBenchmark.java |
29 | new Visitor() {
@Override
public void visit(Tree.InvocationExpression that) {
Tree.ArgumentList al = that.getPositionalArgumentList();
if (al==null) {
al = that.getNamedArgumentList();
}
if (al!=null) {
Integer startIndex = al.getStartIndex();
Integer startIndex2 = node.getStartIndex();
if (startIndex!=null && startIndex2!=null &&
startIndex.intValue()==startIndex2.intValue()) {
Tree.Primary primary = that.getPrimary();
if (primary instanceof Tree.MemberOrTypeExpression) {
Tree.MemberOrTypeExpression mte =
(Tree.MemberOrTypeExpression) primary;
if (mte.getDeclaration()!=null && mte.getTarget()!=null) {
result.add(new ParameterInfo(al.getStartIndex(),
mte.getDeclaration(), mte.getTarget(),
node.getScope(), cpc,
al instanceof Tree.NamedArgumentList));
}
}
}
}
super.visit(that);
}
}.visit(cpc.getRootNode()); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java |
1,411 | public interface ORecordVersionSerializer {
void writeTo(DataOutput out, ORecordVersion version) throws IOException;
void readFrom(DataInput in, ORecordVersion version) throws IOException;
void readFrom(InputStream stream, ORecordVersion version) throws IOException;
void writeTo(OutputStream stream, ORecordVersion version) throws IOException;
/**
* Writes version to stream.
*
*
* @param iStream
* stream to write data.
* @param pos
* the beginning index, inclusive.
* @param version
* @return size of serialized object
*/
int writeTo(byte[] iStream, int pos, ORecordVersion version);
/**
* Reads version from stream.
*
*
* @param iStream
* stream that contains serialized data.
* @param pos
* the beginning index, inclusive.
* @param version
* @return size of deserialized object
*/
int readFrom(byte[] iStream, int pos, ORecordVersion version);
int writeTo(OFile file, long offset, ORecordVersion version) throws IOException;
long readFrom(OFile file, long offset, ORecordVersion version) throws IOException;
/**
* The same as {@link #writeTo(byte[], int, ORecordVersion)}, but uses platform dependent optimization to speed up writing.
*
*
* @param iStream
* @param pos
* @param version
* @return size of serialized object
*/
int fastWriteTo(byte[] iStream, int pos, ORecordVersion version);
/**
* The same as {@link #readFrom(byte[], int, ORecordVersion)}, but uses platform dependent optimization to speed up reading.
*
*
* @param iStream
* @param pos
* @param version
* @return size of deserialized object
*/
int fastReadFrom(byte[] iStream, int pos, ORecordVersion version);
/**
* Can use platform dependant optimization.
*
* @return serialized version
* @param version
*/
byte[] toByteArray(ORecordVersion version);
String toString();
String toString(ORecordVersion version);
void fromString(String string, ORecordVersion version);
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_version_ORecordVersion.java |
1,196 | @SuppressWarnings("unchecked")
public class CacheRecycler extends AbstractComponent {
public final Recycler<ObjectObjectOpenHashMap> hashMap;
public final Recycler<ObjectOpenHashSet> hashSet;
public final Recycler<DoubleObjectOpenHashMap> doubleObjectMap;
public final Recycler<LongObjectOpenHashMap> longObjectMap;
public final Recycler<LongLongOpenHashMap> longLongMap;
public final Recycler<IntIntOpenHashMap> intIntMap;
public final Recycler<FloatIntOpenHashMap> floatIntMap;
public final Recycler<DoubleIntOpenHashMap> doubleIntMap;
public final Recycler<LongIntOpenHashMap> longIntMap;
public final Recycler<ObjectIntOpenHashMap> objectIntMap;
public final Recycler<IntObjectOpenHashMap> intObjectMap;
public final Recycler<ObjectFloatOpenHashMap> objectFloatMap;
public void close() {
hashMap.close();
hashSet.close();
doubleObjectMap.close();
longObjectMap.close();
longLongMap.close();
intIntMap.close();
floatIntMap.close();
doubleIntMap.close();
longIntMap.close();
objectIntMap.close();
intObjectMap.close();
objectFloatMap.close();
}
@Inject
public CacheRecycler(Settings settings) {
super(settings);
final Type type = Type.parse(settings.get("type"));
int limit = settings.getAsInt("limit", 10);
int smartSize = settings.getAsInt("smart_size", 1024);
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
hashMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectObjectOpenHashMap>() {
@Override
public ObjectObjectOpenHashMap newInstance(int sizing) {
return new ObjectObjectOpenHashMap(size(sizing));
}
@Override
public void clear(ObjectObjectOpenHashMap value) {
value.clear();
}
});
hashSet = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectOpenHashSet>() {
@Override
public ObjectOpenHashSet newInstance(int sizing) {
return new ObjectOpenHashSet(size(sizing), 0.5f);
}
@Override
public void clear(ObjectOpenHashSet value) {
value.clear();
}
});
doubleObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleObjectOpenHashMap>() {
@Override
public DoubleObjectOpenHashMap newInstance(int sizing) {
return new DoubleObjectOpenHashMap(size(sizing));
}
@Override
public void clear(DoubleObjectOpenHashMap value) {
value.clear();
}
});
longObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongObjectOpenHashMap>() {
@Override
public LongObjectOpenHashMap newInstance(int sizing) {
return new LongObjectOpenHashMap(size(sizing));
}
@Override
public void clear(LongObjectOpenHashMap value) {
value.clear();
}
});
longLongMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongLongOpenHashMap>() {
@Override
public LongLongOpenHashMap newInstance(int sizing) {
return new LongLongOpenHashMap(size(sizing));
}
@Override
public void clear(LongLongOpenHashMap value) {
value.clear();
}
});
intIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntIntOpenHashMap>() {
@Override
public IntIntOpenHashMap newInstance(int sizing) {
return new IntIntOpenHashMap(size(sizing));
}
@Override
public void clear(IntIntOpenHashMap value) {
value.clear();
}
});
floatIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<FloatIntOpenHashMap>() {
@Override
public FloatIntOpenHashMap newInstance(int sizing) {
return new FloatIntOpenHashMap(size(sizing));
}
@Override
public void clear(FloatIntOpenHashMap value) {
value.clear();
}
});
doubleIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleIntOpenHashMap>() {
@Override
public DoubleIntOpenHashMap newInstance(int sizing) {
return new DoubleIntOpenHashMap(size(sizing));
}
@Override
public void clear(DoubleIntOpenHashMap value) {
value.clear();
}
});
longIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongIntOpenHashMap>() {
@Override
public LongIntOpenHashMap newInstance(int sizing) {
return new LongIntOpenHashMap(size(sizing));
}
@Override
public void clear(LongIntOpenHashMap value) {
value.clear();
}
});
objectIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectIntOpenHashMap>() {
@Override
public ObjectIntOpenHashMap newInstance(int sizing) {
return new ObjectIntOpenHashMap(size(sizing));
}
@Override
public void clear(ObjectIntOpenHashMap value) {
value.clear();
}
});
intObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntObjectOpenHashMap>() {
@Override
public IntObjectOpenHashMap newInstance(int sizing) {
return new IntObjectOpenHashMap(size(sizing));
}
@Override
public void clear(IntObjectOpenHashMap value) {
value.clear();
}
});
objectFloatMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectFloatOpenHashMap>() {
@Override
public ObjectFloatOpenHashMap newInstance(int sizing) {
return new ObjectFloatOpenHashMap(size(sizing));
}
@Override
public void clear(ObjectFloatOpenHashMap value) {
value.clear();
}
});
}
public <K, V> Recycler.V<ObjectObjectOpenHashMap<K, V>> hashMap(int sizing) {
return (Recycler.V) hashMap.obtain(sizing);
}
public <T> Recycler.V<ObjectOpenHashSet<T>> hashSet(int sizing) {
return (Recycler.V) hashSet.obtain(sizing);
}
public <T> Recycler.V<DoubleObjectOpenHashMap<T>> doubleObjectMap(int sizing) {
return (Recycler.V) doubleObjectMap.obtain(sizing);
}
public <T> Recycler.V<LongObjectOpenHashMap<T>> longObjectMap(int sizing) {
return (Recycler.V) longObjectMap.obtain(sizing);
}
public Recycler.V<LongLongOpenHashMap> longLongMap(int sizing) {
return longLongMap.obtain(sizing);
}
public Recycler.V<IntIntOpenHashMap> intIntMap(int sizing) {
return intIntMap.obtain(sizing);
}
public Recycler.V<FloatIntOpenHashMap> floatIntMap(int sizing) {
return floatIntMap.obtain(sizing);
}
public Recycler.V<DoubleIntOpenHashMap> doubleIntMap(int sizing) {
return doubleIntMap.obtain(sizing);
}
public Recycler.V<LongIntOpenHashMap> longIntMap(int sizing) {
return longIntMap.obtain(sizing);
}
public <T> Recycler.V<ObjectIntOpenHashMap<T>> objectIntMap(int sizing) {
return (Recycler.V) objectIntMap.obtain(sizing);
}
public <T> Recycler.V<IntObjectOpenHashMap<T>> intObjectMap(int sizing) {
return (Recycler.V) intObjectMap.obtain(sizing);
}
public <T> Recycler.V<ObjectFloatOpenHashMap<T>> objectFloatMap(int sizing) {
return (Recycler.V) objectFloatMap.obtain(sizing);
}
static int size(int sizing) {
return sizing > 0 ? sizing : 256;
}
private <T> Recycler<T> build(Type type, int limit, int smartSize, int availableProcessors, Recycler.C<T> c) {
Recycler<T> recycler;
try {
recycler = type.build(c, limit, availableProcessors);
if (smartSize > 0) {
recycler = sizing(recycler, none(c), smartSize);
}
} catch (IllegalArgumentException ex) {
throw new ElasticsearchIllegalArgumentException("no type support [" + type + "] for recycler");
}
return recycler;
}
public static enum Type {
SOFT_THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return threadLocal(softFactory(dequeFactory(c, limit)));
}
},
THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return threadLocal(dequeFactory(c, limit));
}
},
QUEUE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return concurrentDeque(c, limit);
}
},
SOFT_CONCURRENT {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return concurrent(softFactory(dequeFactory(c, limit)), availableProcessors);
}
},
CONCURRENT {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return concurrent(dequeFactory(c, limit), availableProcessors);
}
},
NONE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return none(c);
}
};
public static Type parse(String type) {
if (Strings.isNullOrEmpty(type)) {
return SOFT_CONCURRENT;
}
try {
return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]");
}
}
abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors);
}
} | 0true
| src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java |
1,406 | public class ODateHelper {
public static final String DEF_DATE_FORMAT = "yyyy-MM-dd";
public static final String DEF_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss:SSS";
public static Calendar getDatabaseCalendar() {
return Calendar.getInstance(getDatabaseTimeZone());
}
public static TimeZone getDatabaseTimeZone() {
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !db.isClosed())
return ODatabaseRecordThreadLocal.INSTANCE.get().getStorage().getConfiguration().getTimeZone();
return TimeZone.getDefault();
}
public static DateFormat getDateFormatInstance() {
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !db.isClosed())
return db.getStorage().getConfiguration().getDateFormatInstance();
else
return new SimpleDateFormat(DEF_DATE_FORMAT);
}
public static String getDateFormat() {
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !db.isClosed())
return db.getStorage().getConfiguration().getDateFormat();
else
return DEF_DATE_FORMAT;
}
public static DateFormat getDateTimeFormatInstance() {
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !db.isClosed())
return db.getStorage().getConfiguration().getDateTimeFormatInstance();
else
return new SimpleDateFormat(DEF_DATETIME_FORMAT);
}
public static String getDateTimeFormat() {
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null && !db.isClosed())
return db.getStorage().getConfiguration().getDateTimeFormat();
else
return DEF_DATETIME_FORMAT;
}
public static Date now() {
return getDatabaseCalendar().getTime();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_util_ODateHelper.java |
955 | public interface OrderDaoExtensionHandler extends ExtensionHandler {
public ExtensionResultStatusType attachAdditionalDataToNewCart(Customer customer, Order cart);
public ExtensionResultStatusType applyAdditionalOrderLookupFilter(Customer customer, String name, List<Order> orders);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_dao_OrderDaoExtensionHandler.java |
240 | public class ModuleConfigurationType implements BroadleafEnumerationType, Serializable {
private static final long serialVersionUID = 1L;
private static final Map<String, ModuleConfigurationType> TYPES = new LinkedHashMap<String, ModuleConfigurationType>();
public static final ModuleConfigurationType FULFILLMENT_PRICING = new ModuleConfigurationType("FULFILLMENT_PRICING", "Fulfillment Pricing Module");
public static final ModuleConfigurationType TAX_CALCULATION = new ModuleConfigurationType("TAX_CALCULATION", "Tax Calculation Module");
public static final ModuleConfigurationType ADDRESS_VERIFICATION = new ModuleConfigurationType("ADDRESS_VERIFICATION", "Address Verification Module");
public static final ModuleConfigurationType PAYMENT_PROCESSOR = new ModuleConfigurationType("PAYMENT_PROCESSOR", "Payment Processor Module");
public static final ModuleConfigurationType CDN_PROVIDER = new ModuleConfigurationType("CDN_PROVIDER", "Content Delivery Network Module");
public static ModuleConfigurationType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public ModuleConfigurationType() {
//do nothing
}
public ModuleConfigurationType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
@Override
public String getType() {
return type;
}
@Override
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ModuleConfigurationType other = (ModuleConfigurationType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_config_service_type_ModuleConfigurationType.java |
2,035 | public interface HasDependencies {
/**
* Returns the known dependencies for this type. If this has dependencies whose values are not
* known statically, a dependency for the {@link org.elasticsearch.common.inject.Injector Injector} will be
* included in the returned set.
*
* @return a possibly empty set
*/
Set<Dependency<?>> getDependencies();
} | 0true
| src_main_java_org_elasticsearch_common_inject_spi_HasDependencies.java |
1,071 | public abstract class AbstractOrderServiceExtensionHandler extends AbstractExtensionHandler implements
OrderServiceExtensionHandler {
public ExtensionResultStatusType attachAdditionalDataToNewNamedCart(Customer customer, Order cart) {
return ExtensionResultStatusType.NOT_HANDLED;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_AbstractOrderServiceExtensionHandler.java |
252 | private static class VerifyingXaLogicalLog extends XaLogicalLog
{
private final Visitor<XaCommand, RuntimeException> verifier;
public VerifyingXaLogicalLog( FileSystemAbstraction fs, Visitor<XaCommand, RuntimeException> verifier )
{
super( new File( "log" ), null, null, null, fs, new Monitors(), new SingleLoggingService( DEV_NULL ),
LogPruneStrategies.NO_PRUNING, null, mock( KernelHealth.class ), 25*1024*1024, ALLOW_ALL );
this.verifier = verifier;
}
@Override
public synchronized void writeCommand( XaCommand command, int identifier ) throws IOException
{
this.verifier.visit( command );
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java |
3,722 | private static final Comparator<ScheduledEntry> SCHEDULED_ENTRIES_COMPARATOR = new Comparator<ScheduledEntry>() {
@Override
public int compare(ScheduledEntry o1, ScheduledEntry o2) {
if (o1.getScheduleStartTimeInNanos() > o2.getScheduleStartTimeInNanos()) {
return 1;
} else if (o1.getScheduleStartTimeInNanos() < o2.getScheduleStartTimeInNanos()) {
return -1;
}
return 0;
}
}; | 1no label
| hazelcast_src_main_java_com_hazelcast_util_scheduler_SecondsBasedEntryTaskScheduler.java |
175 | public class NullURLProcessor implements URLProcessor {
private static NullURLProcessor _instance = new NullURLProcessor();
public static NullURLProcessor getInstance() {
return _instance;
}
/**
* Always returns true.
*
* @param requestURI
*
* @return true if this URLProcessor is able to process the passed in request
*/
@Override
public boolean canProcessURL(String requestURI) {
return true;
}
/**
* The processURL method should not be called on the NullURLProcessor. This class provides a cacheable
* instance of URLProcessor that indicates to the controlling program (@see BroadleafProcessURLFilter)
* that the current URL cannot be processed.
*
* @param requestURI The requestURI with the context path trimmed off
* @return true if the processor was able to process the passed in URL.
* @throws UnsupportedOperationException
*/
public boolean processURL(String requestURI) {
throw new UnsupportedOperationException();
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_NullURLProcessor.java |
1,767 | assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(nsize, map.size());
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_EvictionTest.java |
1,728 | public abstract class AbstractLifecycleComponent<T> extends AbstractComponent implements LifecycleComponent<T> {
protected final Lifecycle lifecycle = new Lifecycle();
private final List<LifecycleListener> listeners = new CopyOnWriteArrayList<LifecycleListener>();
protected AbstractLifecycleComponent(Settings settings) {
super(settings);
}
protected AbstractLifecycleComponent(Settings settings, Class customClass) {
super(settings, customClass);
}
protected AbstractLifecycleComponent(Settings settings, Class loggerClass, Class componentClass) {
super(settings, loggerClass, componentClass);
}
protected AbstractLifecycleComponent(Settings settings, String prefixSettings) {
super(settings, prefixSettings);
}
protected AbstractLifecycleComponent(Settings settings, String prefixSettings, Class customClass) {
super(settings, prefixSettings, customClass);
}
protected AbstractLifecycleComponent(Settings settings, String prefixSettings, Class loggerClass, Class componentClass) {
super(settings, prefixSettings, loggerClass, componentClass);
}
@Override
public Lifecycle.State lifecycleState() {
return this.lifecycle.state();
}
@Override
public void addLifecycleListener(LifecycleListener listener) {
listeners.add(listener);
}
@Override
public void removeLifecycleListener(LifecycleListener listener) {
listeners.remove(listener);
}
@SuppressWarnings({"unchecked"})
@Override
public T start() throws ElasticsearchException {
if (!lifecycle.canMoveToStarted()) {
return (T) this;
}
for (LifecycleListener listener : listeners) {
listener.beforeStart();
}
doStart();
lifecycle.moveToStarted();
for (LifecycleListener listener : listeners) {
listener.afterStart();
}
return (T) this;
}
protected abstract void doStart() throws ElasticsearchException;
@SuppressWarnings({"unchecked"})
@Override
public T stop() throws ElasticsearchException {
if (!lifecycle.canMoveToStopped()) {
return (T) this;
}
for (LifecycleListener listener : listeners) {
listener.beforeStop();
}
lifecycle.moveToStopped();
doStop();
for (LifecycleListener listener : listeners) {
listener.afterStop();
}
return (T) this;
}
protected abstract void doStop() throws ElasticsearchException;
@Override
public void close() throws ElasticsearchException {
if (lifecycle.started()) {
stop();
}
if (!lifecycle.canMoveToClosed()) {
return;
}
for (LifecycleListener listener : listeners) {
listener.beforeClose();
}
lifecycle.moveToClosed();
doClose();
for (LifecycleListener listener : listeners) {
listener.afterClose();
}
}
protected abstract void doClose() throws ElasticsearchException;
} | 0true
| src_main_java_org_elasticsearch_common_component_AbstractLifecycleComponent.java |
404 | @RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class PartitionServiceProxyTest {
static HazelcastInstance client;
static HazelcastInstance server;
@BeforeClass
public static void init(){
server = Hazelcast.newHazelcastInstance();
client = HazelcastClient.newHazelcastClient(null);
}
@AfterClass
public static void destroy(){
client.shutdown();
server.shutdown();
}
@Test(expected = UnsupportedOperationException.class)
public void testAddMigrationListener() throws Exception {
PartitionService p = client.getPartitionService();
p.addMigrationListener(new DumMigrationListener());
}
@Test(expected = UnsupportedOperationException.class)
public void testRemoveMigrationListener() throws Exception {
PartitionService p = client.getPartitionService();
p.removeMigrationListener("");
}
@Test
public void testRandomPartitionKeyNotNull() {
PartitionService p = client.getPartitionService();
String key = p.randomPartitionKey();
assertNotNull(key);
}
@Test
public void testGetPartition() {
String key = "Key";
PartitionService clientPartitionService = client.getPartitionService();
Partition clientPartition = clientPartitionService.getPartition(key);
PartitionService serverPartitionService = server.getPartitionService();
Partition serverPartition = serverPartitionService.getPartition(key);
assertEquals(clientPartition.getPartitionId(), serverPartition.getPartitionId());
}
@Test
public void testGetPartitions() {
String key = "Key";
PartitionService clientPartitionService = client.getPartitionService();
Set<Partition> clientPartitions = clientPartitionService.getPartitions();
PartitionService serverPartitionService = server.getPartitionService();
Set<Partition> serverPartitions = serverPartitionService.getPartitions();
assertEquals(clientPartitions.size(), serverPartitions.size());
}
class DumMigrationListener implements MigrationListener {
@Override
public void migrationStarted(MigrationEvent migrationEvent) {
}
@Override
public void migrationCompleted(MigrationEvent migrationEvent) {
}
@Override
public void migrationFailed(MigrationEvent migrationEvent) {
}
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_partitionservice_PartitionServiceProxyTest.java |
185 | @Test
public class OMultiKeyTest {
@Test
public void testEqualsDifferentSize() {
final OMultiKey multiKey = new OMultiKey(Collections.singletonList("a"));
final OMultiKey anotherMultiKey = new OMultiKey(Arrays.asList(new String[]{"a", "b"}));
assertFalse(multiKey.equals(anotherMultiKey));
}
@Test
public void testEqualsDifferentItems() {
final OMultiKey multiKey = new OMultiKey(Arrays.asList(new String[]{"b", "c"}));
final OMultiKey anotherMultiKey = new OMultiKey(Arrays.asList(new String[]{"a", "b"}));
assertFalse(multiKey.equals(anotherMultiKey));
}
@Test
public void testEqualsTheSame() {
final OMultiKey multiKey = new OMultiKey(Collections.singletonList("a"));
assertTrue(multiKey.equals(multiKey));
}
@Test
public void testEqualsNull() {
final OMultiKey multiKey = new OMultiKey(Collections.singletonList("a"));
assertFalse(multiKey.equals(null));
}
@Test
public void testEqualsDifferentClass() {
final OMultiKey multiKey = new OMultiKey(Collections.singletonList("a"));
assertFalse(multiKey.equals("a"));
}
@Test
public void testEmptyKeyEquals() {
final Map<OMultiKey, Object> multiKeyMap = new HashMap<OMultiKey, Object>();
final OMultiKey multiKey = new OMultiKey(Collections.emptyList());
multiKeyMap.put(multiKey, new Object());
final OMultiKey anotherMultiKey = new OMultiKey(Collections.emptyList());
final Object mapResult = multiKeyMap.get(anotherMultiKey);
assertNotNull(mapResult);
}
@Test
public void testOneKeyMap() {
final Map<OMultiKey, Object> multiKeyMap = new HashMap<OMultiKey, Object>();
final OMultiKey multiKey = new OMultiKey(Collections.singletonList("a"));
multiKeyMap.put(multiKey, new Object());
final OMultiKey anotherMultiKey = new OMultiKey(Collections.singletonList("a"));
final Object mapResult = multiKeyMap.get(anotherMultiKey);
assertNotNull(mapResult);
}
@Test
public void testOneKeyNotInMap() {
final Map<OMultiKey, Object> multiKeyMap = new HashMap<OMultiKey, Object>();
final OMultiKey multiKey = new OMultiKey(Collections.singletonList("a"));
multiKeyMap.put(multiKey, new Object());
final OMultiKey anotherMultiKey = new OMultiKey(Collections.singletonList("b"));
final Object mapResult = multiKeyMap.get(anotherMultiKey);
assertNull(mapResult);
}
@Test
public void testTwoKeyMap() {
final Map<OMultiKey, Object> multiKeyMap = new HashMap<OMultiKey, Object>();
final OMultiKey multiKey = new OMultiKey(Arrays.asList(new String[]{"a", "b"}));
multiKeyMap.put(multiKey, new Object());
final OMultiKey anotherMultiKey = new OMultiKey(Arrays.asList(new String[]{"a", "b"}));
final Object mapResult = multiKeyMap.get(anotherMultiKey);
assertNotNull(mapResult);
}
@Test
public void testTwoKeyMapReordered() {
final Map<OMultiKey, Object> multiKeyMap = new HashMap<OMultiKey, Object>();
final OMultiKey multiKey = new OMultiKey(Arrays.asList(new String[]{"a", "b"}));
multiKeyMap.put(multiKey, new Object());
final OMultiKey anotherMultiKey = new OMultiKey(Arrays.asList(new String[]{"b", "a"}));
final Object mapResult = multiKeyMap.get(anotherMultiKey);
assertNotNull(mapResult);
}
} | 0true
| commons_src_test_java_com_orientechnologies_common_util_OMultiKeyTest.java |
1,019 | public class SearchResultsLabelProvider extends CeylonLabelProvider {
@Override
public Image getImage(Object element) {
if (element instanceof WithSourceFolder) {
element = ((WithSourceFolder) element).element;
}
String key;
int decorations;
if (element instanceof ArchiveMatches) {
key = RUNTIME_OBJ;
decorations = 0;
}
else if (element instanceof CeylonElement) {
key = ((CeylonElement) element).getImageKey();
decorations = ((CeylonElement) element).getDecorations();
}
else if (element instanceof IType ||
element instanceof IField ||
element instanceof IMethod) {
key = getImageKeyForDeclaration((IJavaElement) element);
decorations = 0;
}
else {
key = super.getImageKey(element);
decorations = super.getDecorationAttributes(element);
}
return getDecoratedImage(key, decorations, false);
}
@Override
public StyledString getStyledText(Object element) {
if (element instanceof WithSourceFolder) {
element = ((WithSourceFolder) element).element;
}
if (element instanceof ArchiveMatches) {
return new StyledString("Source Archive Matches");
}
else if (element instanceof CeylonElement) {
return getStyledLabelForSearchResult((CeylonElement) element);
}
else if (element instanceof IType ||
element instanceof IField||
element instanceof IMethod) {
return getStyledLabelForSearchResult((IJavaElement) element);
}
else {
return super.getStyledText(element);
}
}
private StyledString getStyledLabelForSearchResult(CeylonElement ce) {
StyledString styledString = new StyledString();
IFile file = ce.getFile();
String path = file==null ?
ce.getVirtualFile().getPath() :
file.getFullPath().toString();
styledString.append(ce.getLabel())
.append(" - " + ce.getPackageLabel(), PACKAGE_STYLER)
.append(" - " + path, COUNTER_STYLER);
return styledString;
}
private StyledString getStyledLabelForSearchResult(IJavaElement je) {
StyledString styledString = new StyledString();
String name = je.getElementName();
if (je instanceof IMethod) {
try {
String returnType = ((IMethod) je).getReturnType();
if (returnType.equals("V")) {
styledString.append("void", Highlights.KW_STYLER);
}
else {
styledString.append("method", KW_STYLER);
/*styleJavaType(styledString,
getSignatureSimpleName(returnType));*/
}
}
catch (Exception e) {
e.printStackTrace();
}
styledString.append(' ').append(name, ID_STYLER);
try {
styledString.append('(');
String[] parameterTypes = ((IMethod) je).getParameterTypes();
String[] parameterNames = ((IMethod) je).getParameterNames();
boolean first = true;
for (int i=0; i<parameterTypes.length && i<parameterNames.length; i++) {
if (first) {
first = false;
}
else {
styledString.append(", ");
}
styleJavaType(styledString,
getSignatureSimpleName(parameterTypes[i]));
styledString.append(' ')
.append(parameterNames[i], ID_STYLER);
}
styledString.append(')');
}
catch (Exception e) {
e.printStackTrace();
}
if (EditorsUI.getPreferenceStore().getBoolean(DISPLAY_RETURN_TYPES)) {
try {
String returnType = ((IMethod) je).getReturnType();
styledString.append(" ∊ ");
styleJavaType(styledString,
getSignatureSimpleName(returnType),
ARROW_STYLER);
}
catch (Exception e) {
e.printStackTrace();
}
}
}
else if (je instanceof IField) {
styledString.append("field", KW_STYLER);
/*try {
String type = ((IField) je).getTypeSignature();
styleJavaType(styledString,
getSignatureSimpleName(type));
}
catch (Exception e) {
e.printStackTrace();
}*/
styledString.append(' ').append(name, ID_STYLER);
if (EditorsUI.getPreferenceStore().getBoolean(DISPLAY_RETURN_TYPES)) {
try {
String type = ((IField) je).getTypeSignature();
styledString.append(" ∊ ");
styleJavaType(styledString,
getSignatureSimpleName(type),
ARROW_STYLER);
}
catch (Exception e) {
e.printStackTrace();
}
}
}
else if (je instanceof IType) {
IType type = (IType) je;
try {
if (type.isAnnotation()) {
styledString.append('@').append("interface ", KW_STYLER);
}
else if (type.isInterface()) {
styledString.append("interface ", KW_STYLER);
}
else if (type.isClass()) {
styledString.append("class ", KW_STYLER);
}
else if (type.isEnum()) {
styledString.append("enum ", KW_STYLER);
}
}
catch (Exception e) {
e.printStackTrace();
}
styledString.append(name, TYPE_ID_STYLER);
}
IJavaElement pkg = ((IJavaElement) je.getOpenable()).getParent();
styledString.append(" - ", PACKAGE_STYLER)
.append(pkg.getElementName(), PACKAGE_STYLER);
IFile file = (IFile) je.getResource();
if (file!=null) {
styledString.append(" - " + file.getFullPath().toString(), COUNTER_STYLER);
}
return styledString;
}
private static String getImageKeyForDeclaration(IJavaElement e) {
if (e==null) return null;
boolean shared = false;
if (e instanceof IMember) {
try {
shared = Flags.isPublic(((IMember) e).getFlags());
}
catch (JavaModelException jme) {
jme.printStackTrace();
}
}
switch(e.getElementType()) {
case IJavaElement.METHOD:
if (shared) {
return CEYLON_METHOD;
}
else {
return CEYLON_LOCAL_METHOD;
}
case IJavaElement.FIELD:
if (shared) {
return CEYLON_ATTRIBUTE;
}
else {
return CEYLON_LOCAL_ATTRIBUTE;
}
case IJavaElement.TYPE:
if (shared) {
return CEYLON_CLASS;
}
else {
return CEYLON_LOCAL_CLASS;
}
default:
return null;
}
}
} | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_search_SearchResultsLabelProvider.java |
515 | public abstract class TestThread extends Thread {
private volatile Throwable error;
protected final Random random = new Random();
public TestThread() {
setName(getClass().getName() + "" + ID_GENERATOR.getAndIncrement());
}
@Override
public final void run() {
try {
startLatch.await();
doRun();
} catch (Throwable t) {
if (stopOnError) {
stopTest();
}
t.printStackTrace();
this.error = t;
}
}
public final void assertNoError() {
assertNull(getName() + " encountered an error", error);
}
public abstract void doRun() throws Exception;
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_stress_StressTestSupport.java |
1,617 | public class BroadleafAdminAuthenticationFailureHandler extends SimpleUrlAuthenticationFailureHandler {
private String defaultFailureUrl;
public BroadleafAdminAuthenticationFailureHandler() {
super();
}
public BroadleafAdminAuthenticationFailureHandler(String defaultFailureUrl) {
super(defaultFailureUrl);
this.defaultFailureUrl = defaultFailureUrl;
}
@Override
public void onAuthenticationFailure(HttpServletRequest request, HttpServletResponse response, AuthenticationException exception) throws IOException, ServletException {
String failureUrlParam = StringUtil.cleanseUrlString(request.getParameter("failureUrl"));
String successUrlParam = StringUtil.cleanseUrlString(request.getParameter("successUrl"));
String failureUrl = failureUrlParam==null?null:failureUrlParam.trim();
Boolean sessionTimeout = (Boolean) request.getAttribute("sessionTimeout");
if (StringUtils.isEmpty(failureUrl) && BooleanUtils.isNotTrue(sessionTimeout)) {
failureUrl = defaultFailureUrl;
}
if (BooleanUtils.isTrue(sessionTimeout)) {
failureUrl = "?sessionTimeout=true";
}
//Grab url the user, was redirected from
successUrlParam = request.getHeader("referer");
if (failureUrl != null) {
if (!StringUtils.isEmpty(successUrlParam)) {
if (!failureUrl.contains("?")) {
failureUrl += "?successUrl=" + successUrlParam;
} else {
failureUrl += "&successUrl=" + successUrlParam;
}
}
saveException(request, exception);
getRedirectStrategy().sendRedirect(request, response, failureUrl);
} else {
super.onAuthenticationFailure(request, response, exception);
}
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_security_BroadleafAdminAuthenticationFailureHandler.java |
204 | public class OStorageRemote extends OStorageAbstract implements OStorageProxy, OChannelListener {
private static final String DEFAULT_HOST = "localhost";
private static final int DEFAULT_PORT = 2424;
private static final String ADDRESS_SEPARATOR = ";";
public static final String PARAM_MIN_POOL = "minpool";
public static final String PARAM_MAX_POOL = "maxpool";
public static final String PARAM_DB_TYPE = "dbtype";
private static final String DRIVER_NAME = "OrientDB Java";
private final ExecutorService asynchExecutor;
private OContextConfiguration clientConfiguration;
private int connectionRetry;
private int connectionRetryDelay;
private final List<OChannelBinaryAsynchClient> networkPool = new ArrayList<OChannelBinaryAsynchClient>();
private final OLock networkPoolLock = new OAdaptiveLock();
private int networkPoolCursor = 0;
protected final List<String> serverURLs = new ArrayList<String>();
private OCluster[] clusters = new OCluster[0];
protected final Map<String, OCluster> clusterMap = new ConcurrentHashMap<String, OCluster>();
private int defaultClusterId;
private int minPool;
private int maxPool;
private final ODocument clusterConfiguration = new ODocument();
private ORemoteServerEventListener asynchEventListener;
private String connectionDbType;
private String connectionUserName;
private String connectionUserPassword;
private Map<String, Object> connectionOptions;
private final String clientId;
private final int maxReadQueue;
public OStorageRemote(final String iClientId, final String iURL, final String iMode) throws IOException {
super(iURL, iURL, iMode, 0, new OCacheLevelTwoLocatorRemote()); // NO TIMEOUT @SINCE 1.5
clientId = iClientId;
configuration = null;
clientConfiguration = new OContextConfiguration();
connectionRetry = clientConfiguration.getValueAsInteger(OGlobalConfiguration.NETWORK_SOCKET_RETRY);
connectionRetryDelay = clientConfiguration.getValueAsInteger(OGlobalConfiguration.NETWORK_SOCKET_RETRY_DELAY);
asynchEventListener = new OStorageRemoteAsynchEventListener(this);
parseServerURLs();
asynchExecutor = Executors.newSingleThreadScheduledExecutor();
maxReadQueue = Runtime.getRuntime().availableProcessors() - 1;
}
public int getSessionId() {
return OStorageRemoteThreadLocal.INSTANCE.get().sessionId.intValue();
}
public String getServerURL() {
return OStorageRemoteThreadLocal.INSTANCE.get().serverURL;
}
public void setSessionId(final String iServerURL, final int iSessionId) {
final OStorageRemoteSession tl = OStorageRemoteThreadLocal.INSTANCE.get();
tl.serverURL = iServerURL;
tl.sessionId = iSessionId;
}
public ORemoteServerEventListener getAsynchEventListener() {
return asynchEventListener;
}
public void setAsynchEventListener(final ORemoteServerEventListener iListener) {
asynchEventListener = iListener;
}
public void removeRemoteServerEventListener() {
asynchEventListener = null;
}
public void open(final String iUserName, final String iUserPassword, final Map<String, Object> iOptions) {
addUser();
lock.acquireExclusiveLock();
try {
connectionUserName = iUserName;
connectionUserPassword = iUserPassword;
connectionOptions = iOptions != null ? new HashMap<String, Object>(iOptions) : null; // CREATE A COPY TO AVOID USER
// MANIPULATION
// POST OPEN
openRemoteDatabase();
configuration = new OStorageConfiguration(this);
configuration.load();
} catch (Exception e) {
if (!OGlobalConfiguration.STORAGE_KEEP_OPEN.getValueAsBoolean())
close();
if (e instanceof RuntimeException)
// PASS THROUGH
throw (RuntimeException) e;
else
throw new OStorageException("Cannot open the remote storage: " + name, e);
} finally {
lock.releaseExclusiveLock();
}
}
public void reload() {
checkConnection();
lock.acquireExclusiveLock();
try {
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_RELOAD);
} finally {
endRequest(network);
}
try {
beginResponse(network);
readDatabaseInformation(network);
break;
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on reloading database information", e);
}
} while (true);
} finally {
lock.releaseExclusiveLock();
}
}
public void create(final Map<String, Object> iOptions) {
throw new UnsupportedOperationException(
"Cannot create a database in a remote server. Please use the console or the OServerAdmin class.");
}
public boolean exists() {
throw new UnsupportedOperationException(
"Cannot check the existance of a database in a remote server. Please use the console or the OServerAdmin class.");
}
public void close(final boolean iForce) {
OChannelBinaryAsynchClient network = null;
lock.acquireExclusiveLock();
try {
networkPoolLock.lock();
try {
if (networkPool.size() > 0) {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_CLOSE);
} finally {
endRequest(network);
}
}
} finally {
networkPoolLock.unlock();
}
setSessionId(null, -1);
if (!checkForClose(iForce))
return;
networkPoolLock.lock();
try {
for (OChannelBinaryAsynchClient n : new ArrayList<OChannelBinaryAsynchClient>(networkPool))
n.close();
networkPool.clear();
} finally {
networkPoolLock.unlock();
}
level2Cache.shutdown();
super.close(iForce);
status = STATUS.CLOSED;
Orient.instance().unregisterStorage(this);
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on closing remote connection: %s", network);
network.close();
} finally {
lock.releaseExclusiveLock();
}
}
public void delete() {
throw new UnsupportedOperationException(
"Cannot delete a database in a remote server. Please use the console or the OServerAdmin class.");
}
public Set<String> getClusterNames() {
lock.acquireSharedLock();
try {
return new HashSet<String>(clusterMap.keySet());
} finally {
lock.releaseSharedLock();
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(final int iDataSegmentId, final ORecordId iRid,
final byte[] iContent, ORecordVersion iRecordVersion, final byte iRecordType, int iMode,
final ORecordCallback<OClusterPosition> iCallback) {
checkConnection();
if (iMode == 1 && iCallback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
final OPhysicalPosition ppos = new OPhysicalPosition(iDataSegmentId, -1, iRecordType);
OChannelBinaryAsynchClient lastNetworkUsed = null;
do {
try {
final OChannelBinaryAsynchClient network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_CREATE);
lastNetworkUsed = network;
try {
if (network.getSrvProtocolVersion() >= 10)
// SEND THE DATA SEGMENT ID
network.writeInt(iDataSegmentId);
network.writeShort((short) iRid.clusterId);
network.writeBytes(iContent);
network.writeByte(iRecordType);
network.writeByte((byte) iMode);
} finally {
endRequest(network);
}
switch (iMode) {
case 0:
// SYNCHRONOUS
try {
beginResponse(network);
iRid.clusterPosition = network.readClusterPosition();
ppos.clusterPosition = iRid.clusterPosition;
if (network.getSrvProtocolVersion() >= 11) {
ppos.recordVersion = network.readVersion();
} else
ppos.recordVersion = OVersionFactory.instance().createVersion();
return new OStorageOperationResult<OPhysicalPosition>(ppos);
} finally {
endResponse(network);
}
case 1:
// ASYNCHRONOUS
if (iCallback != null) {
final int sessionId = getSessionId();
Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
final OClusterPosition result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readClusterPosition();
if (network.getSrvProtocolVersion() >= 11)
network.readVersion();
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
};
asynchExecutor.submit(new FutureTask<Object>(response));
}
}
return new OStorageOperationResult<OPhysicalPosition>(ppos);
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(lastNetworkUsed, "Error on create record in cluster: " + iRid.clusterId, e);
}
} while (true);
}
@Override
public boolean updateReplica(int dataSegmentId, ORecordId rid, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
throw new UnsupportedOperationException("updateReplica()");
}
@Override
public <V> V callInRecordLock(Callable<V> iCallable, ORID rid, boolean iExclusiveLock) {
throw new UnsupportedOperationException("callInRecordLock()");
}
@Override
public ORecordMetadata getRecordMetadata(final ORID rid) {
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_METADATA);
network.writeRID(rid);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final ORID responseRid = network.readRID();
final ORecordVersion responseVersion = network.readVersion();
return new ORecordMetadata(responseRid, responseVersion);
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read record " + rid, e);
}
} while (true);
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, final boolean iIgnoreCache,
final ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
checkConnection();
if (OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting)
// PENDING NETWORK OPERATION, CAN'T EXECUTE IT NOW
return new OStorageOperationResult<ORawBuffer>(null);
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_LOAD);
network.writeRID(iRid);
network.writeString(iFetchPlan != null ? iFetchPlan : "");
if (network.getSrvProtocolVersion() >= 9)
network.writeByte((byte) (iIgnoreCache ? 1 : 0));
if (network.getSrvProtocolVersion() >= 13)
network.writeByte(loadTombstones ? (byte) 1 : (byte) 0);
} finally {
endRequest(network);
}
try {
beginResponse(network);
if (network.readByte() == 0)
return new OStorageOperationResult<ORawBuffer>(null);
final ORawBuffer buffer = new ORawBuffer(network.readBytes(), network.readVersion(), network.readByte());
final ODatabaseRecord database = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
ORecordInternal<?> record;
while (network.readByte() == 2) {
record = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
if (database != null)
// PUT IN THE CLIENT LOCAL CACHE
database.getLevel1Cache().updateRecord(record);
}
return new OStorageOperationResult<ORawBuffer>(buffer);
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read record " + iRid, e);
}
} while (true);
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId iRid, final byte[] iContent,
final ORecordVersion iVersion, final byte iRecordType, int iMode, final ORecordCallback<ORecordVersion> iCallback) {
checkConnection();
if (iMode == 1 && iCallback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
OChannelBinaryAsynchClient lastNetworkUsed = null;
do {
try {
final OChannelBinaryAsynchClient network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_UPDATE);
lastNetworkUsed = network;
try {
network.writeRID(iRid);
network.writeBytes(iContent);
network.writeVersion(iVersion);
network.writeByte(iRecordType);
network.writeByte((byte) iMode);
} finally {
endRequest(network);
}
switch (iMode) {
case 0:
// SYNCHRONOUS
try {
beginResponse(network);
return new OStorageOperationResult<ORecordVersion>(network.readVersion());
} finally {
endResponse(network);
}
case 1:
// ASYNCHRONOUS
if (iCallback != null) {
final int sessionId = getSessionId();
Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
ORecordVersion result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readVersion();
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
};
asynchExecutor.submit(new FutureTask<Object>(response));
}
}
return new OStorageOperationResult<ORecordVersion>(iVersion);
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(lastNetworkUsed, "Error on update record " + iRid, e);
}
} while (true);
}
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRid, final ORecordVersion iVersion, int iMode,
final ORecordCallback<Boolean> iCallback) {
checkConnection();
if (iMode == 1 && iCallback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
OChannelBinaryAsynchClient network = null;
do {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_DELETE);
return new OStorageOperationResult<Boolean>(deleteRecord(iRid, iVersion, iMode, iCallback, network));
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on delete record " + iRid, e);
}
} while (true);
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
checkConnection();
if (iMode == 1 && callback == null)
// ASYNCHRONOUS MODE NO ANSWER
iMode = 2;
OChannelBinaryAsynchClient network = null;
do {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_RECORD_CLEAN_OUT);
return deleteRecord(recordId, recordVersion, iMode, callback, network);
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on clean out record " + recordId, e);
}
} while (true);
}
@Override
public void backup(OutputStream out, Map<String, Object> options, Callable<Object> callable) throws IOException {
throw new UnsupportedOperationException("backup");
}
@Override
public void restore(InputStream in, Map<String, Object> options, Callable<Object> callable) throws IOException {
throw new UnsupportedOperationException("restore");
}
public long count(final int iClusterId) {
return count(new int[] { iClusterId });
}
@Override
public long count(int iClusterId, boolean countTombstones) {
return count(new int[] { iClusterId }, countTombstones);
}
public OClusterPosition[] getClusterDataRange(final int iClusterId) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_DATARANGE);
network.writeShort((short) iClusterId);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return new OClusterPosition[] { network.readClusterPosition(), network.readClusterPosition() };
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on getting last entry position count in cluster: " + iClusterId, e);
}
} while (true);
}
@Override
public OPhysicalPosition[] higherPhysicalPositions(int iClusterId, OPhysicalPosition iClusterPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_HIGHER);
network.writeInt(iClusterId);
network.writeClusterPosition(iClusterPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving higher positions after " + iClusterPosition.clusterPosition, e);
}
} while (true);
}
@Override
public OPhysicalPosition[] ceilingPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_CEILING);
network.writeInt(clusterId);
network.writeClusterPosition(physicalPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving ceiling positions after " + physicalPosition.clusterPosition, e);
}
} while (true);
}
private OPhysicalPosition[] readPhysicalPositions(OChannelBinaryAsynchClient network, int positionsCount) throws IOException {
final OPhysicalPosition[] physicalPositions = new OPhysicalPosition[positionsCount];
for (int i = 0; i < physicalPositions.length; i++) {
final OPhysicalPosition position = new OPhysicalPosition();
position.clusterPosition = network.readClusterPosition();
position.dataSegmentId = network.readInt();
position.dataSegmentPos = network.readLong();
position.recordSize = network.readInt();
position.recordVersion = network.readVersion();
physicalPositions[i] = position;
}
return physicalPositions;
}
@Override
public OPhysicalPosition[] lowerPhysicalPositions(int iClusterId, OPhysicalPosition physicalPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_LOWER);
network.writeInt(iClusterId);
network.writeClusterPosition(physicalPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving lower positions after " + physicalPosition.clusterPosition, e);
}
} while (true);
}
@Override
public OPhysicalPosition[] floorPhysicalPositions(int clusterId, OPhysicalPosition physicalPosition) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_POSITIONS_FLOOR);
network.writeInt(clusterId);
network.writeClusterPosition(physicalPosition.clusterPosition);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int positionsCount = network.readInt();
if (positionsCount == 0) {
return new OPhysicalPosition[0];
} else {
return readPhysicalPositions(network, positionsCount);
}
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on retrieving floor positions after " + physicalPosition.clusterPosition, e);
}
} while (true);
}
public long getSize() {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_SIZE);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readLong();
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read database size", e);
}
} while (true);
}
@Override
public long countRecords() {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DB_COUNTRECORDS);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readLong();
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read database record count", e);
}
} while (true);
}
public long count(final int[] iClusterIds) {
return count(iClusterIds, false);
}
public long count(final int[] iClusterIds, boolean countTombstones) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_COUNT);
network.writeShort((short) iClusterIds.length);
for (int iClusterId : iClusterIds)
network.writeShort((short) iClusterId);
if (network.getSrvProtocolVersion() >= 13)
network.writeByte(countTombstones ? (byte) 1 : (byte) 0);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readLong();
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Error on read record count in clusters: " + Arrays.toString(iClusterIds), e);
}
} while (true);
}
/**
* Execute the command remotely and get the results back.
*/
public Object command(final OCommandRequestText iCommand) {
checkConnection();
if (!(iCommand instanceof OSerializableStream))
throw new OCommandExecutionException("Cannot serialize the command to be executed to the server side.");
OSerializableStream command = iCommand;
Object result = null;
final ODatabaseRecord database = ODatabaseRecordThreadLocal.INSTANCE.get();
OChannelBinaryAsynchClient network = null;
do {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = true;
try {
final OCommandRequestText aquery = iCommand;
final boolean asynch = iCommand instanceof OCommandRequestAsynch && ((OCommandRequestAsynch) iCommand).isAsynchronous();
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_COMMAND);
network.writeByte((byte) (asynch ? 'a' : 's')); // ASYNC / SYNC
network.writeBytes(OStreamSerializerAnyStreamable.INSTANCE.toStream(command));
} finally {
endRequest(network);
}
try {
beginResponse(network);
if (asynch) {
byte status;
// ASYNCH: READ ONE RECORD AT TIME
while ((status = network.readByte()) > 0) {
final ORecordInternal<?> record = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
if (record == null)
continue;
switch (status) {
case 1:
// PUT AS PART OF THE RESULT SET. INVOKE THE LISTENER
try {
if (!aquery.getResultListener().result(record)) {
// EMPTY THE INPUT CHANNEL
while (network.in.available() > 0)
network.in.read();
break;
}
} catch (Throwable t) {
// ABSORBE ALL THE USER EXCEPTIONS
t.printStackTrace();
}
database.getLevel1Cache().updateRecord(record);
break;
case 2:
// PUT IN THE CLIENT LOCAL CACHE
database.getLevel1Cache().updateRecord(record);
}
}
} else {
final byte type = network.readByte();
switch (type) {
case 'n':
result = null;
break;
case 'r':
result = OChannelBinaryProtocol.readIdentifiable(network);
if (result instanceof ORecord<?>)
database.getLevel1Cache().updateRecord((ORecordInternal<?>) result);
break;
case 'l':
final int tot = network.readInt();
final Collection<OIdentifiable> list = new ArrayList<OIdentifiable>(tot);
for (int i = 0; i < tot; ++i) {
final OIdentifiable resultItem = OChannelBinaryProtocol.readIdentifiable(network);
if (resultItem instanceof ORecord<?>)
database.getLevel1Cache().updateRecord((ORecordInternal<?>) resultItem);
list.add(resultItem);
}
result = list;
break;
case 'a':
final String value = new String(network.readBytes());
result = ORecordSerializerStringAbstract.fieldTypeFromStream(null, ORecordSerializerStringAbstract.getType(value),
value);
break;
default:
OLogManager.instance().warn(this, "Received unexpected result from query: %d", type);
}
if (network.getSrvProtocolVersion() >= 17) {
// LOAD THE FETCHED RECORDS IN CACHE
byte status;
while ((status = network.readByte()) > 0) {
final ORecordInternal<?> record = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
if (record != null && status == 2)
// PUT IN THE CLIENT LOCAL CACHE
database.getLevel1Cache().updateRecord(record);
}
}
}
break;
} finally {
if (aquery.getResultListener() != null) {
aquery.getResultListener().end();
}
endResponse(network);
}
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on executing command: " + iCommand, e);
} finally {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = false;
}
} while (true);
return result;
}
public void commit(final OTransaction iTx, Runnable callback) {
checkConnection();
final List<ORecordOperation> committedEntries = new ArrayList<ORecordOperation>();
OChannelBinaryAsynchClient network = null;
do {
try {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = true;
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_TX_COMMIT);
network.writeInt(iTx.getId());
network.writeByte((byte) (iTx.isUsingLog() ? 1 : 0));
final List<ORecordOperation> tmpEntries = new ArrayList<ORecordOperation>();
if (iTx.getCurrentRecordEntries().iterator().hasNext()) {
while (iTx.getCurrentRecordEntries().iterator().hasNext()) {
for (ORecordOperation txEntry : iTx.getCurrentRecordEntries())
tmpEntries.add(txEntry);
iTx.clearRecordEntries();
if (tmpEntries.size() > 0) {
for (ORecordOperation txEntry : tmpEntries) {
commitEntry(network, txEntry);
committedEntries.add(txEntry);
}
tmpEntries.clear();
}
}
} else if (committedEntries.size() > 0) {
for (ORecordOperation txEntry : committedEntries)
commitEntry(network, txEntry);
}
// END OF RECORD ENTRIES
network.writeByte((byte) 0);
// SEND INDEX ENTRIES
network.writeBytes(iTx.getIndexChanges().toStream());
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int createdRecords = network.readInt();
ORecordId currentRid;
ORecordId createdRid;
for (int i = 0; i < createdRecords; i++) {
currentRid = network.readRID();
createdRid = network.readRID();
iTx.updateIdentityAfterCommit(currentRid, createdRid);
}
final int updatedRecords = network.readInt();
ORecordId rid;
for (int i = 0; i < updatedRecords; ++i) {
rid = network.readRID();
ORecordOperation rop = iTx.getRecordEntry(rid);
if (rop != null)
rop.getRecord().getRecordVersion().copyFrom(network.readVersion());
}
committedEntries.clear();
} finally {
endResponse(network);
}
// SET ALL THE RECORDS AS UNDIRTY
for (ORecordOperation txEntry : iTx.getAllRecordEntries())
txEntry.getRecord().unload();
// UPDATE THE CACHE ONLY IF THE ITERATOR ALLOWS IT. USE THE STRATEGY TO ALWAYS REMOVE ALL THE RECORDS SINCE THEY COULD BE
// CHANGED AS CONTENT IN CASE OF TREE AND GRAPH DUE TO CROSS REFERENCES
OTransactionAbstract.updateCacheFromEntries(iTx, iTx.getAllRecordEntries(), false);
break;
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on commit", e);
} finally {
OStorageRemoteThreadLocal.INSTANCE.get().commandExecuting = false;
}
} while (true);
}
public void rollback(OTransaction iTx) {
}
public int getClusterIdByName(final String iClusterName) {
checkConnection();
if (iClusterName == null)
return -1;
if (Character.isDigit(iClusterName.charAt(0)))
return Integer.parseInt(iClusterName);
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
return -1;
return cluster.getId();
}
public String getClusterTypeByName(final String iClusterName) {
checkConnection();
if (iClusterName == null)
return null;
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
return null;
return cluster.getType();
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public int addCluster(final String iClusterType, final String iClusterName, final String iLocation,
final String iDataSegmentName, boolean forceListBased, final Object... iArguments) {
return addCluster(iClusterType, iClusterName, -1, iLocation, iDataSegmentName, forceListBased, iArguments);
}
public int addCluster(String iClusterType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
boolean forceListBased, Object... iParameters) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_ADD);
network.writeString(iClusterType.toString());
network.writeString(iClusterName);
if (network.getSrvProtocolVersion() >= 10 || iClusterType.equalsIgnoreCase("PHYSICAL"))
network.writeString(iLocation);
if (network.getSrvProtocolVersion() >= 10)
network.writeString(iDataSegmentName);
else
network.writeInt(-1);
if (network.getSrvProtocolVersion() >= 18)
network.writeShort((short) iRequestedId);
} finally {
endRequest(network);
}
try {
beginResponse(network);
final int clusterId = network.readShort();
final OClusterRemote cluster = new OClusterRemote();
cluster.setType(iClusterType);
cluster.configure(this, clusterId, iClusterName.toLowerCase(), null, 0);
if (clusters.length <= clusterId)
clusters = Arrays.copyOf(clusters, clusterId + 1);
clusters[cluster.getId()] = cluster;
clusterMap.put(cluster.getName().toLowerCase(), cluster);
return clusterId;
} finally {
endResponse(network);
}
} catch (OModificationOperationProhibitedException mphe) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on add new cluster", e);
}
} while (true);
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATACLUSTER_DROP);
network.writeShort((short) iClusterId);
} finally {
endRequest(network);
}
byte result = 0;
try {
beginResponse(network);
result = network.readByte();
} finally {
endResponse(network);
}
if (result == 1) {
// REMOVE THE CLUSTER LOCALLY
final OCluster cluster = clusters[iClusterId];
clusters[iClusterId] = null;
clusterMap.remove(cluster.getName());
if (configuration.clusters.size() > iClusterId)
configuration.dropCluster(iClusterId); // endResponse must be called before this line, which call updateRecord
getLevel2Cache().freeCluster(iClusterId);
return true;
}
return false;
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on removing of cluster", e);
}
} while (true);
}
public int addDataSegment(final String iDataSegmentName) {
return addDataSegment(iDataSegmentName, null);
}
public int addDataSegment(final String iSegmentName, final String iLocation) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATASEGMENT_ADD);
network.writeString(iSegmentName).writeString(iLocation);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readInt();
} finally {
endResponse(network);
}
} catch (OModificationOperationProhibitedException mphe) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on add new data segment", e);
}
} while (true);
}
public boolean dropDataSegment(final String iSegmentName) {
checkConnection();
OChannelBinaryAsynchClient network = null;
do {
try {
try {
network = beginRequest(OChannelBinaryProtocol.REQUEST_DATASEGMENT_DROP);
network.writeString(iSegmentName);
} finally {
endRequest(network);
}
try {
beginResponse(network);
return network.readByte() == 1;
} finally {
endResponse(network);
}
} catch (OModificationOperationProhibitedException mope) {
handleDBFreeze();
} catch (Exception e) {
handleException(network, "Error on remove data segment", e);
}
} while (true);
}
public void synch() {
}
public String getPhysicalClusterNameById(final int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId >= clusters.length)
return null;
final OCluster cluster = clusters[iClusterId];
return cluster != null ? cluster.getName() : null;
} finally {
lock.releaseSharedLock();
}
}
public int getClusterMap() {
return clusterMap.size();
}
public Collection<OCluster> getClusterInstances() {
lock.acquireSharedLock();
try {
return Arrays.asList(clusters);
} finally {
lock.releaseSharedLock();
}
}
public OCluster getClusterById(int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId == ORID.CLUSTER_ID_INVALID)
// GET THE DEFAULT CLUSTER
iClusterId = defaultClusterId;
return clusters[iClusterId];
} finally {
lock.releaseSharedLock();
}
}
@Override
public long getVersion() {
throw new UnsupportedOperationException("getVersion");
}
public ODocument getClusterConfiguration() {
return clusterConfiguration;
}
/**
* Handles exceptions. In case of IO errors retries to reconnect until the configured retry times has reached.
*
* @param message
* @param exception
*/
protected void handleException(final OChannelBinaryAsynchClient iNetwork, final String message, final Exception exception) {
if (exception instanceof OTimeoutException)
// TIMEOUT, AVOID LOOP, RE-THROW IT
throw (OTimeoutException) exception;
else if (exception instanceof OException)
// RE-THROW IT
throw (OException) exception;
else if (!(exception instanceof IOException))
throw new OStorageException(message, exception);
if (status != STATUS.OPEN)
// STORAGE CLOSED: DON'T HANDLE RECONNECTION
return;
OLogManager.instance().warn(this, "Caught I/O errors from %s (local socket=%s), trying to reconnect (error: %s)", iNetwork,
iNetwork.socket.getLocalSocketAddress(), exception);
try {
iNetwork.close();
} catch (Exception e) {
// IGNORE ANY EXCEPTION
}
final long lostConnectionTime = System.currentTimeMillis();
final int currentMaxRetry;
final int currentRetryDelay;
synchronized (clusterConfiguration) {
if (!clusterConfiguration.isEmpty()) {
// IN CLUSTER: NO RETRY AND 0 SLEEP TIME BETWEEN NODES
currentMaxRetry = 1;
currentRetryDelay = 0;
} else {
currentMaxRetry = connectionRetry;
currentRetryDelay = connectionRetryDelay;
}
}
for (int retry = 0; retry < currentMaxRetry; ++retry) {
// WAIT THE DELAY BEFORE TO RETRY
if (currentRetryDelay > 0)
try {
Thread.sleep(currentRetryDelay);
} catch (InterruptedException e) {
// THREAD INTERRUPTED: RETURN EXCEPTION
Thread.currentThread().interrupt();
break;
}
try {
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance()
.debug(this, "Retrying to connect to remote server #" + (retry + 1) + "/" + currentMaxRetry + "...");
// FORCE RESET OF THREAD DATA (SERVER URL + SESSION ID)
setSessionId(null, -1);
if (createConnectionPool() == 0)
// NO CONNECTION!
break;
// REACQUIRE DB SESSION ID
openRemoteDatabase();
OLogManager.instance().warn(this,
"Connection re-acquired transparently after %dms and %d retries: no errors will be thrown at application level",
System.currentTimeMillis() - lostConnectionTime, retry + 1);
// RECONNECTED!
return;
} catch (Throwable t) {
// DO NOTHING BUT CONTINUE IN THE LOOP
}
}
// RECONNECTION FAILED: THROW+LOG THE ORIGINAL EXCEPTION
throw new OStorageException(message, exception);
}
protected OChannelBinaryAsynchClient openRemoteDatabase() throws IOException {
minPool = OGlobalConfiguration.CLIENT_CHANNEL_MIN_POOL.getValueAsInteger();
maxPool = OGlobalConfiguration.CLIENT_CHANNEL_MAX_POOL.getValueAsInteger();
connectionDbType = ODatabaseDocument.TYPE;
if (connectionOptions != null && connectionOptions.size() > 0) {
if (connectionOptions.containsKey(PARAM_MIN_POOL))
minPool = Integer.parseInt(connectionOptions.get(PARAM_MIN_POOL).toString());
if (connectionOptions.containsKey(PARAM_MAX_POOL))
maxPool = Integer.parseInt(connectionOptions.get(PARAM_MAX_POOL).toString());
if (connectionOptions.containsKey(PARAM_DB_TYPE))
connectionDbType = connectionOptions.get(PARAM_DB_TYPE).toString();
}
boolean availableConnections = true;
OChannelBinaryAsynchClient network = null;
while (availableConnections) {
try {
network = getAvailableNetwork();
try {
network.writeByte(OChannelBinaryProtocol.REQUEST_DB_OPEN);
network.writeInt(getSessionId());
// @SINCE 1.0rc8
sendClientInfo(network);
network.writeString(name);
if (network.getSrvProtocolVersion() >= 8)
network.writeString(connectionDbType);
network.writeString(connectionUserName);
network.writeString(connectionUserPassword);
} finally {
endRequest(network);
}
final int sessionId;
try {
beginResponse(network);
sessionId = network.readInt();
setSessionId(network.getServerURL(), sessionId);
OLogManager.instance().debug(this, "Client connected to %s with session id=%d", network.getServerURL(), sessionId);
readDatabaseInformation(network);
// READ CLUSTER CONFIGURATION
updateClusterConfiguration(network.readBytes());
// read OrientDB release info
if (network.getSrvProtocolVersion() >= 14)
network.readString();
status = STATUS.OPEN;
return network;
} finally {
endResponse(network);
}
} catch (Exception e) {
handleException(network, "Cannot create a connection to remote server address(es): " + serverURLs, e);
}
networkPoolLock.lock();
try {
availableConnections = !networkPool.isEmpty();
} finally {
networkPoolLock.unlock();
}
}
throw new OStorageException("Cannot create a connection to remote server address(es): " + serverURLs);
}
protected void sendClientInfo(OChannelBinaryAsynchClient network) throws IOException {
if (network.getSrvProtocolVersion() >= 7) {
// @COMPATIBILITY 1.0rc8
network.writeString(DRIVER_NAME).writeString(OConstants.ORIENT_VERSION)
.writeShort((short) OChannelBinaryProtocol.CURRENT_PROTOCOL_VERSION).writeString(clientId);
}
}
/**
* Parse the URL in the following formats:<br/>
*/
protected void parseServerURLs() {
int dbPos = url.indexOf('/');
if (dbPos == -1) {
// SHORT FORM
addHost(url);
name = url;
} else {
name = url.substring(url.lastIndexOf("/") + 1);
for (String host : url.substring(0, dbPos).split(ADDRESS_SEPARATOR))
addHost(host);
}
if (serverURLs.size() == 1 && OGlobalConfiguration.NETWORK_BINARY_DNS_LOADBALANCING_ENABLED.getValueAsBoolean()) {
// LOOK FOR LOAD BALANCING DNS TXT RECORD
final String primaryServer = serverURLs.get(0);
try {
final Hashtable<String, String> env = new Hashtable<String, String>();
env.put("java.naming.factory.initial", "com.sun.jndi.dns.DnsContextFactory");
env.put("com.sun.jndi.ldap.connect.timeout",
OGlobalConfiguration.NETWORK_BINARY_DNS_LOADBALANCING_TIMEOUT.getValueAsString());
final DirContext ictx = new InitialDirContext(env);
final String hostName = primaryServer.indexOf(":") == -1 ? primaryServer : primaryServer.substring(0,
primaryServer.indexOf(":"));
final Attributes attrs = ictx.getAttributes(hostName, new String[] { "TXT" });
final Attribute attr = attrs.get("TXT");
if (attr != null) {
String configuration = (String) attr.get();
if (configuration.startsWith(""))
configuration = configuration.substring(1, configuration.length() - 1);
if (configuration != null) {
final String[] parts = configuration.split(" ");
for (String part : parts) {
if (part.startsWith("s=")) {
addHost(part.substring("s=".length()));
}
}
}
}
} catch (NamingException e) {
}
}
}
/**
* Registers the remote server with port.
*/
protected String addHost(String host) {
if (host.startsWith("localhost"))
host = "127.0.0.1" + host.substring("localhost".length());
// REGISTER THE REMOTE SERVER+PORT
if (host.indexOf(":") == -1)
host += ":" + getDefaultPort();
if (!serverURLs.contains(host))
serverURLs.add(host);
return host;
}
protected String getDefaultHost() {
return DEFAULT_HOST;
}
protected int getDefaultPort() {
return DEFAULT_PORT;
}
protected OChannelBinaryAsynchClient createNetworkConnection() throws IOException, UnknownHostException {
final String currentServerURL = getServerURL();
if (currentServerURL != null) {
// TRY WITH CURRENT URL IF ANY
try {
return connect(currentServerURL);
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on connecting to %s", e, currentServerURL);
}
}
for (int serverIdx = 0; serverIdx < serverURLs.size(); ++serverIdx) {
final String server = serverURLs.get(serverIdx);
try {
final OChannelBinaryAsynchClient ch = connect(server);
if (serverIdx > 0) {
// UPDATE SERVER LIST WITH THE REACHABLE ONE AS HEAD TO SPEED UP FURTHER CONNECTIONS
serverURLs.remove(serverIdx);
serverURLs.add(0, server);
OLogManager.instance().debug(this, "New server list priority: %s...", serverURLs);
}
return ch;
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on connecting to %s", e, server);
}
}
// ERROR, NO URL IS REACHABLE
final StringBuilder buffer = new StringBuilder();
for (String server : serverURLs) {
if (buffer.length() > 0)
buffer.append(',');
buffer.append(server);
}
throw new OIOException("Cannot connect to any configured remote nodes: " + buffer);
}
protected OChannelBinaryAsynchClient connect(final String server) throws IOException {
OLogManager.instance().debug(this, "Trying to connect to the remote host %s...", server);
final int sepPos = server.indexOf(":");
final String remoteHost = server.substring(0, sepPos);
final int remotePort = Integer.parseInt(server.substring(sepPos + 1));
final OChannelBinaryAsynchClient ch = new OChannelBinaryAsynchClient(remoteHost, remotePort, clientConfiguration,
OChannelBinaryProtocol.CURRENT_PROTOCOL_VERSION, asynchEventListener);
// REGISTER MYSELF AS LISTENER TO REMOVE THE CHANNEL FROM THE POOL IN CASE OF CLOSING
ch.registerListener(this);
// REGISTER IT IN THE POOL
networkPool.add(ch);
return ch;
}
protected void checkConnection() {
// lock.acquireSharedLock();
//
// try {
// synchronized (networkPool) {
//
// if (networkPool.size() == 0)
// throw new ODatabaseException("Connection is closed");
// }
//
// } finally {
// lock.releaseSharedLock();
// }
}
/**
* Acquire a network channel from the pool. Don't lock the write stream since the connection usage is exclusive.
*
* @param iCommand
* @return
* @throws IOException
*/
protected OChannelBinaryAsynchClient beginRequest(final byte iCommand) throws IOException {
final OChannelBinaryAsynchClient network = getAvailableNetwork();
network.writeByte(iCommand);
network.writeInt(getSessionId());
return network;
}
protected OChannelBinaryAsynchClient getAvailableNetwork() throws IOException, UnknownHostException {
// FIND THE FIRST FREE CHANNEL AVAILABLE
OChannelBinaryAsynchClient network = null;
int beginCursor = networkPoolCursor;
while (network == null) {
networkPoolLock.lock();
try {
if (networkPoolCursor < 0)
networkPoolCursor = 0;
else if (networkPoolCursor >= networkPool.size())
// RESTART FROM THE BEGINNING
networkPoolCursor = 0;
if (networkPool.size() == 0) {
createConnectionPool();
networkPoolCursor = 0;
}
if (networkPool.size() == 0)
throw new ONetworkProtocolException("Connection pool closed");
network = networkPool.get(networkPoolCursor);
networkPoolCursor++;
final String serverURL = getServerURL();
if (serverURL == null || network.getServerURL().equals(serverURL)) {
if (network.getLockWrite().tryAcquireLock())
// WAS UNLOCKED! USE THIS
break;
}
network = null;
if (beginCursor >= networkPool.size())
// THE POOL HAS BEEN REDUCED: RSTART FROM CURRENT POSITION
beginCursor = networkPoolCursor;
if (networkPoolCursor == beginCursor) {
// COMPLETE ROUND AND NOT FREE CONNECTIONS FOUND
if (networkPool.size() < maxPool) {
// CREATE NEW CONNECTION
network = createNetworkConnection();
network.getLockWrite().lock();
} else {
OLogManager.instance().info(this,
"Network connection pool is full (max=%d): increase max size to avoid such bottleneck on connections", maxPool);
removeDeadConnections();
final long startToWait = System.currentTimeMillis();
// TEMPORARY UNLOCK
networkPoolLock.unlock();
try {
synchronized (networkPool) {
networkPool.wait(5000);
}
} catch (InterruptedException e) {
// THREAD INTERRUPTED: RETURN EXCEPTION
Thread.currentThread().interrupt();
throw new OStorageException("Cannot acquire a connection because the thread has been interrupted");
} finally {
networkPoolLock.lock();
}
Orient
.instance()
.getProfiler()
.stopChrono("system.network.connectionPool.waitingTime", "Waiting for a free connection from the pool of channels",
startToWait);
}
}
} finally {
networkPoolLock.unlock();
}
}
return network;
}
private void removeDeadConnections() {
// FREE DEAD CONNECTIONS
int removedDeadConnections = 0;
for (OChannelBinaryAsynchClient n : new ArrayList<OChannelBinaryAsynchClient>(networkPool)) {
if (n != null && !n.isConnected()) //Fixed issue with removing of network connections though connection is active.
{
try {
n.close();
} catch (Exception e) {
}
networkPool.remove(n);
removedDeadConnections++;
}
}
OLogManager.instance().debug(this, "Found and removed %d dead connections from the network pool", removedDeadConnections);
}
/**
* Ends the request and unlock the write lock
*/
public void endRequest(final OChannelBinaryAsynchClient iNetwork) throws IOException {
if (iNetwork == null)
return;
try {
iNetwork.flush();
} catch (IOException e) {
try {
iNetwork.close();
} catch (Exception e2) {
} finally {
networkPoolLock.lock();
try {
networkPool.remove(iNetwork);
} finally {
networkPoolLock.unlock();
}
}
throw e;
} finally {
iNetwork.releaseWriteLock();
networkPoolLock.lock();
try {
synchronized (networkPool) {
networkPool.notifyAll();
}
} finally {
networkPoolLock.unlock();
}
}
}
/**
* Starts listening the response.
*/
protected void beginResponse(final OChannelBinaryAsynchClient iNetwork) throws IOException {
iNetwork.beginResponse(getSessionId());
}
/**
* End response reached: release the channel in the pool to being reused
*/
public void endResponse(final OChannelBinaryAsynchClient iNetwork) {
iNetwork.endResponse();
}
public boolean isPermanentRequester() {
return false;
}
protected void getResponse(final OChannelBinaryAsynchClient iNetwork) throws IOException {
try {
beginResponse(iNetwork);
} finally {
endResponse(iNetwork);
}
}
@SuppressWarnings("unchecked")
public void updateClusterConfiguration(final byte[] obj) {
if (obj == null)
return;
// UPDATE IT
synchronized (clusterConfiguration) {
clusterConfiguration.fromStream(obj);
final List<ODocument> members = clusterConfiguration.field("members");
if (members != null) {
serverURLs.clear();
parseServerURLs();
for (ODocument m : members)
if (m != null && !serverURLs.contains((String) m.field("name"))) {
for (Map<String, Object> listener : ((Collection<Map<String, Object>>) m.field("listeners"))) {
if (((String) listener.get("protocol")).equals("ONetworkProtocolBinary")) {
String url = (String) listener.get("listen");
if (!serverURLs.contains(url))
addHost(url);
}
}
}
}
}
}
private void commitEntry(final OChannelBinaryAsynchClient iNetwork, final ORecordOperation txEntry) throws IOException {
if (txEntry.type == ORecordOperation.LOADED)
// JUMP LOADED OBJECTS
return;
// SERIALIZE THE RECORD IF NEEDED. THIS IS DONE HERE TO CATCH EXCEPTION AND SEND A -1 AS ERROR TO THE SERVER TO SIGNAL THE ABORT
// OF TX COMMIT
byte[] stream = null;
try {
switch (txEntry.type) {
case ORecordOperation.CREATED:
case ORecordOperation.UPDATED:
stream = txEntry.getRecord().toStream();
break;
}
} catch (Exception e) {
// ABORT TX COMMIT
iNetwork.writeByte((byte) -1);
throw new OTransactionException("Error on transaction commit", e);
}
iNetwork.writeByte((byte) 1);
iNetwork.writeByte(txEntry.type);
iNetwork.writeRID(txEntry.getRecord().getIdentity());
iNetwork.writeByte(txEntry.getRecord().getRecordType());
switch (txEntry.type) {
case ORecordOperation.CREATED:
iNetwork.writeBytes(stream);
break;
case ORecordOperation.UPDATED:
iNetwork.writeVersion(txEntry.getRecord().getRecordVersion());
iNetwork.writeBytes(stream);
break;
case ORecordOperation.DELETED:
iNetwork.writeVersion(txEntry.getRecord().getRecordVersion());
break;
}
}
protected int createConnectionPool() throws IOException, UnknownHostException {
networkPoolLock.lock();
try {
if (networkPool.isEmpty())
// ALWAYS CREATE THE FIRST CONNECTION
createNetworkConnection();
// CREATE THE MINIMUM POOL
for (int i = networkPool.size(); i < minPool; ++i)
createNetworkConnection();
return networkPool.size();
} finally {
networkPoolLock.unlock();
}
}
private boolean handleDBFreeze() {
boolean retry;
OLogManager.instance().warn(this,
"DB is frozen will wait for " + OGlobalConfiguration.CLIENT_DB_RELEASE_WAIT_TIMEOUT.getValue() + " ms. and then retry.");
retry = true;
try {
Thread.sleep(OGlobalConfiguration.CLIENT_DB_RELEASE_WAIT_TIMEOUT.getValueAsInteger());
} catch (InterruptedException ie) {
retry = false;
Thread.currentThread().interrupt();
}
return retry;
}
private void readDatabaseInformation(final OChannelBinaryAsynchClient network) throws IOException {
// @COMPATIBILITY 1.0rc8
final int tot = network.getSrvProtocolVersion() >= 7 ? network.readShort() : network.readInt();
clusters = new OCluster[tot];
clusterMap.clear();
for (int i = 0; i < tot; ++i) {
final OClusterRemote cluster = new OClusterRemote();
String clusterName = network.readString();
if (clusterName != null)
clusterName = clusterName.toLowerCase();
final int clusterId = network.readShort();
final String clusterType = network.readString();
final int dataSegmentId = network.getSrvProtocolVersion() >= 12 ? (int) network.readShort() : 0;
cluster.setType(clusterType);
cluster.configure(this, clusterId, clusterName, null, dataSegmentId);
if (clusterId >= clusters.length)
clusters = Arrays.copyOf(clusters, clusterId + 1);
clusters[clusterId] = cluster;
clusterMap.put(clusterName, cluster);
}
defaultClusterId = clusterMap.get(CLUSTER_DEFAULT_NAME).getId();
}
@Override
public String getURL() {
return OEngineRemote.NAME + ":" + url;
}
public String getClientId() {
return clientId;
}
public int getDataSegmentIdByName(final String iName) {
if (iName == null)
return 0;
throw new UnsupportedOperationException("getDataSegmentIdByName()");
}
public ODataSegment getDataSegmentById(final int iDataSegmentId) {
throw new UnsupportedOperationException("getDataSegmentById()");
}
public int getClusters() {
return clusterMap.size();
}
public void setDefaultClusterId(int defaultClusterId) {
this.defaultClusterId = defaultClusterId;
}
@Override
public String getType() {
return OEngineRemote.NAME;
}
@Override
public void onChannelClose(final OChannel iChannel) {
networkPoolLock.lock();
try {
networkPool.remove(iChannel);
} finally {
networkPoolLock.unlock();
}
}
private boolean deleteRecord(final ORecordId iRid, ORecordVersion iVersion, int iMode, final ORecordCallback<Boolean> iCallback,
final OChannelBinaryAsynchClient network) throws IOException {
try {
network.writeRID(iRid);
network.writeVersion(iVersion);
network.writeByte((byte) iMode);
} finally {
endRequest(network);
}
switch (iMode) {
case 0:
// SYNCHRONOUS
try {
beginResponse(network);
return network.readByte() == 1;
} finally {
endResponse(network);
}
case 1:
// ASYNCHRONOUS
if (iCallback != null) {
final int sessionId = getSessionId();
Callable<Object> response = new Callable<Object>() {
public Object call() throws Exception {
Boolean result;
try {
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = sessionId;
beginResponse(network);
result = network.readByte() == 1;
} finally {
endResponse(network);
OStorageRemoteThreadLocal.INSTANCE.get().sessionId = -1;
}
iCallback.call(iRid, result);
return null;
}
};
asynchExecutor.submit(new FutureTask<Object>(response));
}
}
return false;
}
} | 1no label
| client_src_main_java_com_orientechnologies_orient_client_remote_OStorageRemote.java |
280 | new Visitor() {
@Override
public void visit(Tree.ImportList importList) {
super.visit(importList);
if (!importList.getImports().isEmpty()) {
ProjectionAnnotation ann = foldIfNecessary(importList);
if (autofoldImports && ann!=null) {
ann.markCollapsed();
}
}
}
/*@Override
public void visit(Tree.Import that) {
super.visit(that);
foldIfNecessary(that);
}*/
@Override
public void visit(Tree.Body that) {
super.visit(that);
if (that.getToken()!=null) { //for "else if"
foldIfNecessary(that);
}
}
@Override
public void visit(Tree.NamedArgumentList that) {
super.visit(that);
foldIfNecessary(that);
}
@Override
public void visit(Tree.ModuleDescriptor that) {
super.visit(that);
foldIfNecessary(that);
}
}.visit(cu); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_ProjectionAnnotationManager.java |
181 | public interface OCallable<RET, PAR> {
public RET call(PAR iArgument);
} | 0true
| commons_src_main_java_com_orientechnologies_common_util_OCallable.java |
1,964 | public static class GenericArrayTypeImpl
implements GenericArrayType, Serializable, CompositeType {
private final Type componentType;
public GenericArrayTypeImpl(Type componentType) {
this.componentType = canonicalize(componentType);
}
public Type getGenericComponentType() {
return componentType;
}
public boolean isFullySpecified() {
return MoreTypes.isFullySpecified(componentType);
}
@Override
public boolean equals(Object o) {
return o instanceof GenericArrayType
&& MoreTypes.equals(this, (GenericArrayType) o);
}
@Override
public int hashCode() {
return MoreTypes.hashCode(this);
}
@Override
public String toString() {
return MoreTypes.toString(this);
}
private static final long serialVersionUID = 0;
} | 0true
| src_main_java_org_elasticsearch_common_inject_internal_MoreTypes.java |
1,647 | public static class InputStream extends java.io.FilterInputStream {
private boolean encode; // Encoding or decoding
private int position; // Current position in the buffer
private byte[] buffer; // Small buffer holding converted data
private int bufferLength; // Length of buffer (3 or 4)
private int numSigBytes; // Number of meaningful bytes in the buffer
private int lineLength;
private boolean breakLines; // Break lines at less than 80 characters
private int options; // Record options used to create the stream.
private byte[] decodabet; // Local copies to avoid extra method calls
/**
* Constructs a {@link Base64.InputStream} in DECODE mode.
*
* @param in the <tt>java.io.InputStream</tt> from which to read data.
* @since 1.3
*/
public InputStream(java.io.InputStream in) {
this(in, DECODE);
} // end constructor
/**
* Constructs a {@link Base64.InputStream} in
* either ENCODE or DECODE mode.
* <p/>
* Valid options:<pre>
* ENCODE or DECODE: Encode or Decode as data is read.
* DO_BREAK_LINES: break lines at 76 characters
* (only meaningful when encoding)</i>
* </pre>
* <p/>
* Example: <code>new Base64.InputStream( in, Base64.DECODE )</code>
*
* @param in the <tt>java.io.InputStream</tt> from which to read data.
* @param options Specified options
* @see Base64#ENCODE
* @see Base64#DECODE
* @see Base64#DO_BREAK_LINES
* @since 2.0
*/
public InputStream(java.io.InputStream in, int options) {
super(in);
this.options = options; // Record for later
this.breakLines = (options & DO_BREAK_LINES) > 0;
this.encode = (options & ENCODE) > 0;
this.bufferLength = encode ? 4 : 3;
this.buffer = new byte[bufferLength];
this.position = -1;
this.lineLength = 0;
this.decodabet = getDecodabet(options);
} // end constructor
/**
* Reads enough of the input stream to convert
* to/from Base64 and returns the next byte.
*
* @return next byte
* @since 1.3
*/
@Override
public int read() throws java.io.IOException {
// Do we need to get data?
if (position < 0) {
if (encode) {
byte[] b3 = new byte[3];
int numBinaryBytes = 0;
for (int i = 0; i < 3; i++) {
int b = in.read();
// If end of stream, b is -1.
if (b >= 0) {
b3[i] = (byte) b;
numBinaryBytes++;
} else {
break; // out of for loop
} // end else: end of stream
} // end for: each needed input byte
if (numBinaryBytes > 0) {
encode3to4(b3, 0, numBinaryBytes, buffer, 0, options);
position = 0;
numSigBytes = 4;
} // end if: got data
else {
return -1; // Must be end of stream
} // end else
} // end if: encoding
// Else decoding
else {
byte[] b4 = new byte[4];
int i = 0;
for (i = 0; i < 4; i++) {
// Read four "meaningful" bytes:
int b = 0;
do {
b = in.read();
}
while (b >= 0 && decodabet[b & 0x7f] <= WHITE_SPACE_ENC);
if (b < 0) {
break; // Reads a -1 if end of stream
} // end if: end of stream
b4[i] = (byte) b;
} // end for: each needed input byte
if (i == 4) {
numSigBytes = decode4to3(b4, 0, buffer, 0, options);
position = 0;
} // end if: got four characters
else if (i == 0) {
return -1;
} // end else if: also padded correctly
else {
// Must have broken out from above.
throw new java.io.IOException("Improperly padded Base64 input.");
} // end
} // end else: decode
} // end else: get data
// Got data?
if (position >= 0) {
// End of relevant data?
if ( /*!encode &&*/ position >= numSigBytes) {
return -1;
} // end if: got data
if (encode && breakLines && lineLength >= MAX_LINE_LENGTH) {
lineLength = 0;
return '\n';
} // end if
else {
lineLength++; // This isn't important when decoding
// but throwing an extra "if" seems
// just as wasteful.
int b = buffer[position++];
if (position >= bufferLength) {
position = -1;
} // end if: end
return b & 0xFF; // This is how you "cast" a byte that's
// intended to be unsigned.
} // end else
} // end if: position >= 0
// Else error
else {
throw new java.io.IOException("Error in Base64 code reading stream.");
} // end else
} // end read
/**
* Calls {@link #read()} repeatedly until the end of stream
* is reached or <var>len</var> bytes are read.
* Returns number of bytes read into array or -1 if
* end of stream is encountered.
*
* @param dest array to hold values
* @param off offset for array
* @param len max number of bytes to read into array
* @return bytes read into array or -1 if end of stream is encountered.
* @since 1.3
*/
@Override
public int read(byte[] dest, int off, int len)
throws java.io.IOException {
int i;
int b;
for (i = 0; i < len; i++) {
b = read();
if (b >= 0) {
dest[off + i] = (byte) b;
} else if (i == 0) {
return -1;
} else {
break; // Out of 'for' loop
} // Out of 'for' loop
} // end for: each byte read
return i;
} // end read
} // end inner class InputStream | 0true
| src_main_java_org_elasticsearch_common_Base64.java |
1,443 | private static final SoftLock LOCK_SUCCESS = new SoftLock() {
@Override
public String toString() {
return "Lock::Success";
}
}; | 0true
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_LocalRegionCache.java |
921 | public final class LockServiceImpl implements LockService, ManagedService, RemoteService, MembershipAwareService,
MigrationAwareService, ClientAwareService {
private final NodeEngine nodeEngine;
private final LockStoreContainer[] containers;
private final ConcurrentMap<ObjectNamespace, EntryTaskScheduler> evictionProcessors
= new ConcurrentHashMap<ObjectNamespace, EntryTaskScheduler>();
private final ConcurrentMap<String, ConstructorFunction<ObjectNamespace, LockStoreInfo>> constructors
= new ConcurrentHashMap<String, ConstructorFunction<ObjectNamespace, LockStoreInfo>>();
private final ConstructorFunction<ObjectNamespace, EntryTaskScheduler> schedulerConstructor =
new ConstructorFunction<ObjectNamespace, EntryTaskScheduler>() {
@Override
public EntryTaskScheduler createNew(ObjectNamespace namespace) {
LockEvictionProcessor entryProcessor = new LockEvictionProcessor(nodeEngine, namespace);
ScheduledExecutorService scheduledExecutor =
nodeEngine.getExecutionService().getDefaultScheduledExecutor();
return EntryTaskSchedulerFactory
.newScheduler(scheduledExecutor, entryProcessor, ScheduleType.POSTPONE);
}
};
public LockServiceImpl(NodeEngine nodeEngine) {
this.nodeEngine = nodeEngine;
this.containers = new LockStoreContainer[nodeEngine.getPartitionService().getPartitionCount()];
for (int i = 0; i < containers.length; i++) {
containers[i] = new LockStoreContainer(this, i);
}
}
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
registerLockStoreConstructor(SERVICE_NAME, new ConstructorFunction<ObjectNamespace, LockStoreInfo>() {
public LockStoreInfo createNew(ObjectNamespace key) {
return new LockStoreInfo() {
@Override
public int getBackupCount() {
return 1;
}
@Override
public int getAsyncBackupCount() {
return 0;
}
};
}
});
}
@Override
public void reset() {
for (LockStoreContainer container : containers) {
for (LockStoreImpl lockStore : container.getLockStores()) {
lockStore.clear();
}
}
}
@Override
public void shutdown(boolean terminate) {
for (LockStoreContainer container : containers) {
container.clear();
}
}
@Override
public void registerLockStoreConstructor(String serviceName,
ConstructorFunction<ObjectNamespace, LockStoreInfo> constructorFunction) {
boolean put = constructors.putIfAbsent(serviceName, constructorFunction) == null;
if (!put) {
throw new IllegalArgumentException("LockStore constructor for service[" + serviceName + "] "
+ "is already registered!");
}
}
/**
* Gets the constructor for the given service, or null if the constructor doesn't exist.
*
* @param serviceName the name of the constructor to look up.
* @return the found ConstructorFunction.
*/
ConstructorFunction<ObjectNamespace, LockStoreInfo> getConstructor(String serviceName) {
return constructors.get(serviceName);
}
@Override
public LockStore createLockStore(int partitionId, ObjectNamespace namespace) {
final LockStoreContainer container = getLockContainer(partitionId);
container.getOrCreateLockStore(namespace);
return new LockStoreProxy(container, namespace);
}
@Override
public void clearLockStore(int partitionId, ObjectNamespace namespace) {
LockStoreContainer container = getLockContainer(partitionId);
container.clearLockStore(namespace);
}
void scheduleEviction(ObjectNamespace namespace, Data key, long delay) {
EntryTaskScheduler scheduler = getOrPutSynchronized(
evictionProcessors, namespace, evictionProcessors, schedulerConstructor);
scheduler.schedule(delay, key, null);
}
void cancelEviction(ObjectNamespace namespace, Data key) {
EntryTaskScheduler scheduler = getOrPutSynchronized(
evictionProcessors, namespace, evictionProcessors, schedulerConstructor);
scheduler.cancel(key);
}
public LockStoreContainer getLockContainer(int partitionId) {
return containers[partitionId];
}
public LockStoreImpl getLockStore(int partitionId, ObjectNamespace namespace) {
return getLockContainer(partitionId).getOrCreateLockStore(namespace);
}
@Override
public void memberAdded(MembershipServiceEvent event) {
}
@Override
public void memberRemoved(MembershipServiceEvent event) {
final MemberImpl member = event.getMember();
final String uuid = member.getUuid();
releaseLocksOf(uuid);
}
@Override
public void memberAttributeChanged(MemberAttributeServiceEvent event) {
}
private void releaseLocksOf(String uuid) {
for (LockStoreContainer container : containers) {
for (LockStoreImpl lockStore : container.getLockStores()) {
releaseLock(uuid, container, lockStore);
}
}
}
private void releaseLock(String uuid, LockStoreContainer container, LockStoreImpl lockStore) {
Collection<LockResource> locks = lockStore.getLocks();
for (LockResource lock : locks) {
Data key = lock.getKey();
if (uuid.equals(lock.getOwner()) && !lock.isTransactional()) {
sendUnlockOperation(container, lockStore, key);
}
}
}
private void sendUnlockOperation(LockStoreContainer container, LockStoreImpl lockStore, Data key) {
UnlockOperation op = new UnlockOperation(lockStore.getNamespace(), key, -1, true);
op.setAsyncBackup(true);
op.setNodeEngine(nodeEngine);
op.setServiceName(SERVICE_NAME);
op.setService(LockServiceImpl.this);
op.setResponseHandler(ResponseHandlerFactory.createEmptyResponseHandler());
op.setPartitionId(container.getPartitionId());
nodeEngine.getOperationService().executeOperation(op);
}
@Override
public Collection<LockResource> getAllLocks() {
final Collection<LockResource> locks = new LinkedList<LockResource>();
for (LockStoreContainer container : containers) {
for (LockStoreImpl lockStore : container.getLockStores()) {
locks.addAll(lockStore.getLocks());
}
}
return locks;
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
int partitionId = event.getPartitionId();
LockStoreContainer container = containers[partitionId];
int replicaIndex = event.getReplicaIndex();
LockReplicationOperation op = new LockReplicationOperation(container, partitionId, replicaIndex);
if (op.isEmpty()) {
return null;
} else {
return op;
}
}
@Override
public void commitMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
clearPartition(event.getPartitionId());
}
}
private void clearPartition(int partitionId) {
final LockStoreContainer container = containers[partitionId];
for (LockStoreImpl ls : container.getLockStores()) {
ls.clear();
}
}
@Override
public void rollbackMigration(PartitionMigrationEvent event) {
if (event.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
clearPartition(event.getPartitionId());
}
}
@Override
public void clearPartitionReplica(int partitionId) {
clearPartition(partitionId);
}
@Override
public DistributedObject createDistributedObject(String objectId) {
return new LockProxy(nodeEngine, this, objectId);
}
@Override
public void destroyDistributedObject(String objectId) {
Data key = nodeEngine.getSerializationService().toData(objectId);
for (LockStoreContainer container : containers) {
InternalLockNamespace namespace = new InternalLockNamespace(objectId);
LockStoreImpl lockStore = container.getOrCreateLockStore(namespace);
lockStore.forceUnlock(key);
}
}
@Override
public void clientDisconnected(String clientUuid) {
releaseLocksOf(clientUuid);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_concurrent_lock_LockServiceImpl.java |
1,591 | public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecider {
@Inject
public ReplicaAfterPrimaryActiveAllocationDecider(Settings settings) {
super(settings);
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return canAllocate(shardRouting, allocation);
}
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
if (shardRouting.primary()) {
return allocation.decision(Decision.YES, "shard is primary");
}
MutableShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
if (primary == null) {
return allocation.decision(Decision.NO, "primary shard is not yet active");
}
return allocation.decision(Decision.YES, "primary is already active");
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_decider_ReplicaAfterPrimaryActiveAllocationDecider.java |
3,053 | public interface PostingsFormatProvider {
public static final String POSTINGS_FORMAT_SETTINGS_PREFIX = "index.codec.postings_format";
/**
* A helper class to lookup {@link PostingsFormatProvider providers} by their unique {@link PostingsFormatProvider#name() name}
*/
public static class Helper {
/**
* Looks up and creates {@link PostingsFormatProvider} for the given name.
* <p>
* The settings for the created {@link PostingsFormatProvider} is taken from the given index settings.
* All settings with the {@value PostingsFormatProvider#POSTINGS_FORMAT_SETTINGS_PREFIX} prefix
* and the formats name as the key are passed to the factory.
* </p>
*
* @param indexSettings the index settings to configure the postings format
* @param name the name of the postings format to lookup
* @param postingFormatFactories the factory mapping to lookup the {@link Factory} to create the {@link PostingsFormatProvider}
* @return a fully configured {@link PostingsFormatProvider} for the given name.
* @throws org.elasticsearch.ElasticsearchIllegalArgumentException
* if the no {@link PostingsFormatProvider} for the given name parameter could be found.
*/
public static PostingsFormatProvider lookup(@IndexSettings Settings indexSettings, String name, Map<String, Factory> postingFormatFactories) throws ElasticsearchIllegalArgumentException {
Factory factory = postingFormatFactories.get(name);
if (factory == null) {
throw new ElasticsearchIllegalArgumentException("failed to find postings_format [" + name + "]");
}
Settings settings = indexSettings.getGroups(POSTINGS_FORMAT_SETTINGS_PREFIX).get(name);
if (settings == null) {
settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
}
return factory.create(name, settings);
}
}
/**
* Returns this providers {@link PostingsFormat} instance.
*/
PostingsFormat get();
/**
* Returns the name of this providers {@link PostingsFormat}
*/
String name();
/**
* A simple factory used to create {@link PostingsFormatProvider} used by
* delegating providers like {@link BloomFilterLucenePostingsFormatProvider} or
* {@link PulsingPostingsFormatProvider}. Those providers wrap other
* postings formats to enrich their capabilities.
*/
public interface Factory {
PostingsFormatProvider create(String name, Settings settings);
}
} | 0true
| src_main_java_org_elasticsearch_index_codec_postingsformat_PostingsFormatProvider.java |
111 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientNearCacheConfigTest {
@Test
public void testSpecificNearCacheConfig_whenAsteriskAtTheEnd(){
final ClientConfig clientConfig = new ClientConfig();
final NearCacheConfig genericNearCacheConfig = new NearCacheConfig();
genericNearCacheConfig.setName("map*");
clientConfig.addNearCacheConfig(genericNearCacheConfig);
final NearCacheConfig specificNearCacheConfig = new NearCacheConfig();
specificNearCacheConfig.setName("mapStudent*");
clientConfig.addNearCacheConfig(specificNearCacheConfig);
final NearCacheConfig mapFoo = clientConfig.getNearCacheConfig("mapFoo");
final NearCacheConfig mapStudentFoo = clientConfig.getNearCacheConfig("mapStudentFoo");
assertEquals(genericNearCacheConfig, mapFoo);
assertEquals(specificNearCacheConfig, mapStudentFoo);
}
@Test
public void testSpecificNearCacheConfig_whenAsteriskAtTheBeginning(){
final ClientConfig clientConfig = new ClientConfig();
final NearCacheConfig genericNearCacheConfig = new NearCacheConfig();
genericNearCacheConfig.setName("*Map");
clientConfig.addNearCacheConfig(genericNearCacheConfig);
final NearCacheConfig specificNearCacheConfig = new NearCacheConfig();
specificNearCacheConfig.setName("*MapStudent");
clientConfig.addNearCacheConfig(specificNearCacheConfig);
final NearCacheConfig mapFoo = clientConfig.getNearCacheConfig("fooMap");
final NearCacheConfig mapStudentFoo = clientConfig.getNearCacheConfig("fooMapStudent");
assertEquals(genericNearCacheConfig, mapFoo);
assertEquals(specificNearCacheConfig, mapStudentFoo);
}
@Test
public void testSpecificNearCacheConfig_whenAsteriskInTheMiddle(){
final ClientConfig clientConfig = new ClientConfig();
final NearCacheConfig genericNearCacheConfig = new NearCacheConfig();
genericNearCacheConfig.setName("map*Bar");
clientConfig.addNearCacheConfig(genericNearCacheConfig);
final NearCacheConfig specificNearCacheConfig = new NearCacheConfig();
specificNearCacheConfig.setName("mapStudent*Bar");
clientConfig.addNearCacheConfig(specificNearCacheConfig);
final NearCacheConfig mapFoo = clientConfig.getNearCacheConfig("mapFooBar");
final NearCacheConfig mapStudentFoo = clientConfig.getNearCacheConfig("mapStudentFooBar");
assertEquals(genericNearCacheConfig, mapFoo);
assertEquals(specificNearCacheConfig, mapStudentFoo);
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_ClientNearCacheConfigTest.java |
1,337 | public interface SolrSearchServiceExtensionHandler extends ExtensionHandler {
/**
* Returns a prefix if required for the passed in facet.
*/
public ExtensionResultStatusType buildPrefixListForSearchableFacet(Field field, List<String> prefixList);
/**
* Returns a prefix if required for the passed in searchable field.
*/
public ExtensionResultStatusType buildPrefixListForSearchableField(Field field, FieldType searchableFieldType,
List<String> prefixList);
/**
* Builds the search facet ranges for the provided dto.
*
* @param context
* @param dto
* @param ranges
*/
public ExtensionResultStatusType filterSearchFacetRanges(SearchFacetDTO dto, List<SearchFacetRange> ranges);
/**
* Given the input field, populates the values array with the fields needed for the
* passed in field.
*
* For example, a handler might create multiple fields for the given passed in field.
* @param product
* @param field
* @param values
* @param propertyName
* @param locales
* @return
* @throws IllegalAccessException
* @throws InvocationTargetException
* @throws NoSuchMethodException
*/
public ExtensionResultStatusType addPropertyValues(Product product, Field field, FieldType fieldType,
Map<String, Object> values, String propertyName, List<Locale> locales)
throws IllegalAccessException, InvocationTargetException, NoSuchMethodException;
/**
* Provides an extension point to modify the SolrQuery.
*
* @param context
* @param query
* @param qualifiedSolrQuery
* @param facets
* @param searchCriteria
* @param defaultSort
* * @return
*/
public ExtensionResultStatusType modifySolrQuery(SolrQuery query, String qualifiedSolrQuery,
List<SearchFacetDTO> facets, ProductSearchCriteria searchCriteria, String defaultSort);
/**
* Allows the extension additional fields to the document that are not configured via the DB.
*/
public ExtensionResultStatusType attachAdditionalBasicFields(Product product, SolrInputDocument document,
SolrHelperService shs);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_service_solr_SolrSearchServiceExtensionHandler.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.