Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,771 | public class AdornedTargetRetrieval {
private PersistencePackage persistencePackage;
private PersistencePerspective persistencePerspective;
private Entity entity;
private AdornedTargetList adornedTargetList;
private Map<String, FieldMetadata> mergedProperties;
private List<Serializable> records;
private int index;
private List<FilterMapping> filterMappings;
private CriteriaTransferObject cto;
// This constructor is used by the update method
public AdornedTargetRetrieval(PersistencePackage persistencePackage, Entity entity, AdornedTargetList adornedTargetList) {
this(persistencePackage, adornedTargetList, new CriteriaTransferObject());
this.entity = entity;
}
// This constructor is used by the fetch method
public AdornedTargetRetrieval(PersistencePackage persistencePackage, AdornedTargetList adornedTargetList, CriteriaTransferObject cto) {
this.persistencePackage = persistencePackage;
this.persistencePerspective = persistencePackage.getPersistencePerspective();
this.adornedTargetList = adornedTargetList;
this.cto = cto;
}
public Map<String, FieldMetadata> getMergedProperties() {
return mergedProperties;
}
public List<Serializable> getRecords() {
return records;
}
public int getIndex() {
return index;
}
public List<FilterMapping> getFilterMappings() {
return filterMappings;
}
public AdornedTargetRetrieval invokeForFetch() throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException, FieldNotAvailableException, NoSuchFieldException {
invokeInternal();
return this;
}
public AdornedTargetRetrieval invokeForUpdate() throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException, FieldNotAvailableException, NoSuchFieldException {
FilterAndSortCriteria filterCriteria = cto.get(adornedTargetList.getCollectionFieldName());
filterCriteria.setFilterValue(entity.findProperty(adornedTargetList.getLinkedObjectPath() + "." + adornedTargetList.getLinkedIdProperty()).getValue());
invokeInternal();
index = 0;
Long myEntityId = Long.valueOf(entity.findProperty(adornedTargetList.getTargetObjectPath() + "." + adornedTargetList.getTargetIdProperty()).getValue());
FieldManager fieldManager = getFieldManager();
for (Serializable record : records) {
Long targetId = (Long) fieldManager.getFieldValue(record, adornedTargetList.getTargetObjectPath() + "." + adornedTargetList.getTargetIdProperty());
if (myEntityId.equals(targetId)) {
break;
}
index++;
}
return this;
}
private void invokeInternal() throws ClassNotFoundException {
if (adornedTargetList.getSortField() != null) {
FilterAndSortCriteria sortCriteria = cto.get(adornedTargetList.getSortField());
sortCriteria.setSortAscending(adornedTargetList.getSortAscending());
}
Class<?>[] entities = persistenceManager.getPolymorphicEntities(adornedTargetList
.getAdornedTargetEntityClassname());
mergedProperties = persistenceManager.getDynamicEntityDao().getMergedProperties(
adornedTargetList.getAdornedTargetEntityClassname(),
entities,
null,
new String[]{},
new ForeignKey[]{},
MergedPropertyType.ADORNEDTARGETLIST,
persistencePerspective.getPopulateToOneFields(),
persistencePerspective.getIncludeFields(),
persistencePerspective.getExcludeFields(),
persistencePerspective.getConfigurationKey(),
""
);
filterMappings = getAdornedTargetFilterMappings(persistencePerspective, cto, mergedProperties, adornedTargetList);
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
Class<?>[] entities2 = persistenceManager.getPolymorphicEntities(ceilingEntityFullyQualifiedClassname);
Map<String, FieldMetadata> mergedPropertiesTarget = persistenceManager.getDynamicEntityDao()
.getMergedProperties(
ceilingEntityFullyQualifiedClassname,
entities2,
null,
persistencePerspective.getAdditionalNonPersistentProperties(),
persistencePerspective.getAdditionalForeignKeys(),
MergedPropertyType.PRIMARY,
persistencePerspective.getPopulateToOneFields(),
persistencePerspective.getIncludeFields(),
persistencePerspective.getExcludeFields(),
persistencePerspective.getConfigurationKey(),
""
);
// We need to make sure that the target merged properties have the target object path prefix
Map<String, FieldMetadata> convertedMergedPropertiesTarget = new HashMap<String, FieldMetadata>();
String prefix = adornedTargetList.getTargetObjectPath();
for (Entry<String, FieldMetadata> entry : mergedPropertiesTarget.entrySet()) {
convertedMergedPropertiesTarget.put(prefix + "." + entry.getKey(), entry.getValue());
}
// We also need to make sure that the cto filter and sort criteria have the prefix
Map<String, FilterAndSortCriteria> convertedCto = new HashMap<String, FilterAndSortCriteria>();
for (Entry<String, FilterAndSortCriteria> entry : cto.getCriteriaMap().entrySet()) {
if (adornedTargetList.getSortField() != null && entry.getKey().equals(adornedTargetList.getSortField())) {
convertedCto.put(entry.getKey(), entry.getValue());
} else {
convertedCto.put(prefix + "." + entry.getKey(), entry.getValue());
}
}
cto.setCriteriaMap(convertedCto);
List<FilterMapping> filterMappings2 = getBasicFilterMappings(persistencePerspective, cto, convertedMergedPropertiesTarget, ceilingEntityFullyQualifiedClassname);
for (FilterMapping fm : filterMappings2) {
fm.setInheritedFromClass(entities[0]);
}
filterMappings.addAll(filterMappings2);
records = getPersistentRecords(adornedTargetList.getAdornedTargetEntityClassname(), filterMappings, cto.getFirstResult(), cto.getMaxResults());
}
} | 1no label
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_module_AdornedTargetListPersistenceModule.java |
2,246 | public class VersionsTests extends ElasticsearchLuceneTestCase {
public static DirectoryReader reopen(DirectoryReader reader) throws IOException {
return reopen(reader, true);
}
public static DirectoryReader reopen(DirectoryReader reader, boolean newReaderExpected) throws IOException {
DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
if (newReader != null) {
reader.close();
} else {
assertFalse(newReaderExpected);
}
return newReader;
}
@Test
public void testVersions() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
DirectoryReader directoryReader = DirectoryReader.open(writer, true);
MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
writer.addDocument(doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_SET));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(Versions.NOT_SET));
doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1l));
doc = new Document();
Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 2);
doc.add(uid);
doc.add(version);
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2l));
// test reuse of uid field
doc = new Document();
version.setLongValue(3);
doc.add(uid);
doc.add(version);
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3l));
writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
directoryReader.close();
writer.close();
dir.close();
}
@Test
public void testNestedDocuments() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
List<Document> docs = new ArrayList<Document>();
for (int i = 0; i < 4; ++i) {
// Nested
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
docs.add(doc);
}
// Root
Document doc = new Document();
doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
NumericDocValuesField version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
doc.add(version);
docs.add(doc);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
DirectoryReader directoryReader = DirectoryReader.open(writer, true);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5l));
version.setLongValue(6L);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
version.setLongValue(7L);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7l));
writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
directoryReader.close();
writer.close();
dir.close();
}
@Test
public void testBackwardCompatibility() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
DirectoryReader directoryReader = DirectoryReader.open(writer, true);
MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
Document doc = new Document();
UidField uidAndVersion = new UidField("1", 1L);
doc.add(uidAndVersion);
writer.addDocument(doc);
uidAndVersion.uid = "2";
uidAndVersion.version = 2;
writer.addDocument(doc);
writer.commit();
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "3")), equalTo(Versions.NOT_FOUND));
directoryReader.close();
writer.close();
dir.close();
}
// This is how versions used to be encoded
private static class UidField extends Field {
private static final FieldType FIELD_TYPE = new FieldType();
static {
FIELD_TYPE.setTokenized(true);
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
FIELD_TYPE.setStored(true);
FIELD_TYPE.freeze();
}
String uid;
long version;
UidField(String uid, long version) {
super(UidFieldMapper.NAME, uid, FIELD_TYPE);
this.uid = uid;
this.version = version;
}
@Override
public TokenStream tokenStream(Analyzer analyzer) throws IOException {
return new TokenStream() {
boolean finished = true;
final CharTermAttribute term = addAttribute(CharTermAttribute.class);
final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
@Override
public boolean incrementToken() throws IOException {
if (finished) {
return false;
}
term.setEmpty().append(uid);
payload.setPayload(new BytesRef(Numbers.longToBytes(version)));
finished = true;
return true;
}
@Override
public void reset() throws IOException {
finished = false;
}
};
}
}
@Test
public void testMergingOldIndices() throws Exception {
final IndexWriterConfig iwConf = new IndexWriterConfig(Lucene.VERSION, new KeywordAnalyzer());
iwConf.setMergePolicy(new IndexUpgraderMergePolicy(iwConf.getMergePolicy()));
final Directory dir = newDirectory();
final IndexWriter iw = new IndexWriter(dir, iwConf);
// 1st segment, no _version
Document document = new Document();
// Add a dummy field (enough to trigger #3237)
document.add(new StringField("a", "b", Store.NO));
StringField uid = new StringField(UidFieldMapper.NAME, "1", Store.YES);
document.add(uid);
iw.addDocument(document);
uid.setStringValue("2");
iw.addDocument(document);
iw.commit();
// 2nd segment, old layout
document = new Document();
UidField uidAndVersion = new UidField("3", 3L);
document.add(uidAndVersion);
iw.addDocument(document);
uidAndVersion.uid = "4";
uidAndVersion.version = 4L;
iw.addDocument(document);
iw.commit();
// 3rd segment new layout
document = new Document();
uid.setStringValue("5");
Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
document.add(uid);
document.add(version);
iw.addDocument(document);
uid.setStringValue("6");
version.setLongValue(6L);
iw.addDocument(document);
iw.commit();
final Map<String, Long> expectedVersions = ImmutableMap.<String, Long>builder()
.put("1", 0L).put("2", 0L).put("3", 0L).put("4", 4L).put("5", 5L).put("6", 6L).build();
// Force merge and check versions
iw.forceMerge(1);
final AtomicReader ir = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(iw.getDirectory()));
final NumericDocValues versions = ir.getNumericDocValues(VersionFieldMapper.NAME);
assertThat(versions, notNullValue());
for (int i = 0; i < ir.maxDoc(); ++i) {
final String uidValue = ir.document(i).get(UidFieldMapper.NAME);
final long expectedVersion = expectedVersions.get(uidValue);
assertThat(versions.get(i), equalTo(expectedVersion));
}
iw.close();
assertThat(IndexWriter.isLocked(iw.getDirectory()), is(false));
ir.close();
dir.close();
}
} | 0true
| src_test_java_org_elasticsearch_common_lucene_uid_VersionsTests.java |
217 | public class OTableFormatter {
protected final static String MORE = "...";
protected final OConsoleApplication out;
protected int minColumnSize = 4;
protected int maxWidthSize = 132;
protected final static Set<String> prefixedColumns = new HashSet<String>(Arrays.asList(new String[] { "#", "@RID" }));
protected final SimpleDateFormat DEF_DATEFORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
public OTableFormatter(final OConsoleApplication iConsole) {
this.out = iConsole;
}
public OTableFormatter hideRID(final boolean iValue) {
if (iValue)
prefixedColumns.remove("@RID");
else
prefixedColumns.add("@RID");
return this;
}
public void writeRecords(final Collection<OIdentifiable> resultSet, final int limit) {
writeRecords(resultSet, limit, null);
}
public void writeRecords(final Collection<OIdentifiable> resultSet, final int limit,
final OCallable<Object, OIdentifiable> iAfterDump) {
final Map<String, Integer> columns = parseColumns(resultSet, limit);
int fetched = 0;
for (OIdentifiable record : resultSet) {
dumpRecordInTable(fetched++, record, columns);
if (iAfterDump != null)
iAfterDump.call(record);
if (limit > -1 && fetched >= limit) {
printHeaderLine(columns);
out.message("\nLIMIT EXCEEDED: resultset contains more items not displayed (limit=" + limit + ")");
return;
}
}
if (fetched > 0)
printHeaderLine(columns);
}
public int getMaxWidthSize() {
return maxWidthSize;
}
public OTableFormatter setMaxWidthSize(int maxWidthSize) {
this.maxWidthSize = maxWidthSize;
return this;
}
public void dumpRecordInTable(final int iIndex, final OIdentifiable iRecord, final Map<String, Integer> iColumns) {
if (iIndex == 0)
printHeader(iColumns);
// FORMAT THE LINE DYNAMICALLY
List<Object> vargs = new ArrayList<Object>();
try {
if (iRecord instanceof ODocument)
((ODocument) iRecord).setLazyLoad(false);
final StringBuilder format = new StringBuilder(maxWidthSize);
for (Entry<String, Integer> col : iColumns.entrySet()) {
if (format.length() > 0)
format.append('|');
format.append("%-" + col.getValue() + "s");
Object value = getFieldValue(iIndex, iRecord, col.getKey());
if (value != null) {
value = value.toString();
if (((String) value).length() > col.getValue()) {
// APPEND ...
value = ((String) value).substring(0, col.getValue() - 3) + MORE;
}
}
vargs.add(value);
}
out.message(format.toString() + "\n", vargs.toArray());
} catch (Throwable t) {
out.message("%3d|%9s|%s\n", iIndex, iRecord.getIdentity(), "Error on loading record dued to: " + t);
}
}
private Object getFieldValue(final int iIndex, final OIdentifiable iRecord, final String iColumnName) {
Object value = null;
if (iColumnName.equals("#"))
// RECORD NUMBER
value = iIndex;
else if (iColumnName.equals("@RID"))
// RID
value = iRecord.getIdentity().toString();
else if (iRecord instanceof ORecordSchemaAwareAbstract<?>)
value = ((ORecordSchemaAwareAbstract<?>) iRecord).field(iColumnName);
else if (iRecord instanceof ORecordBytes)
value = "<binary> (size=" + ((ORecordBytes) iRecord).toStream().length + " bytes)";
if (value instanceof OMultiCollectionIterator<?>)
value = "[" + ((OMultiCollectionIterator<?>) value).size() + "]";
else if (value instanceof Collection<?>)
value = "[" + ((Collection<?>) value).size() + "]";
else if (value instanceof ORecord<?>) {
if (((ORecord<?>) value).getIdentity().equals(ORecordId.EMPTY_RECORD_ID)) {
value = ((ORecord<?>) value).toString();
} else {
value = ((ORecord<?>) value).getIdentity().toString();
}
} else if (value instanceof Date) {
final ODatabaseRecord db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
if (db != null)
value = db.getStorage().getConfiguration().getDateTimeFormatInstance().format((Date) value);
else {
value = DEF_DATEFORMAT.format((Date) value);
}
} else if (value instanceof byte[])
value = "byte[" + ((byte[]) value).length + "]";
return value;
}
private void printHeader(final Map<String, Integer> iColumns) {
final StringBuilder buffer = new StringBuilder();
printHeaderLine(iColumns);
int i = 0;
for (Entry<String, Integer> column : iColumns.entrySet()) {
if (i++ > 0)
buffer.append('|');
String colName = column.getKey();
if (colName.length() > column.getValue())
colName = colName.substring(0, column.getValue());
buffer.append(String.format("%-" + column.getValue() + "s", colName));
}
buffer.append("\n");
out.message(buffer.toString());
printHeaderLine(iColumns);
}
private void printHeaderLine(final Map<String, Integer> iColumns) {
final StringBuilder buffer = new StringBuilder("\n");
if (iColumns.size() > 0) {
int i = 0;
for (Entry<String, Integer> col : iColumns.entrySet()) {
if (i++ > 0)
buffer.append("+");
for (int k = 0; k < col.getValue(); ++k)
buffer.append("-");
}
}
buffer.append("\n");
out.message(buffer.toString());
}
/**
* Fill the column map computing the maximum size for a field.
*
* @param resultSet
* @param limit
* @return
*/
private Map<String, Integer> parseColumns(final Collection<OIdentifiable> resultSet, final int limit) {
final Map<String, Integer> columns = new LinkedHashMap<String, Integer>();
for (String c : prefixedColumns)
columns.put(c, minColumnSize);
int fetched = 0;
for (OIdentifiable id : resultSet) {
ORecord<?> rec = id.getRecord();
for (String c : prefixedColumns)
columns.put(c, getColumnSize(fetched, rec, c, columns.get(c)));
if (rec instanceof ODocument) {
((ODocument) rec).setLazyLoad(false);
// PARSE ALL THE DOCUMENT'S FIELDS
ODocument doc = (ODocument) rec;
for (String fieldName : doc.fieldNames()) {
columns.put(fieldName, getColumnSize(fetched, doc, fieldName, columns.get(fieldName)));
}
} else if (rec instanceof ORecordBytes) {
// UNIQUE BINARY FIELD
columns.put("value", maxWidthSize - 15);
}
if (limit > -1 && fetched++ >= limit)
break;
}
// COMPUTE MAXIMUM WIDTH
int width = 0;
for (Entry<String, Integer> col : columns.entrySet())
width += col.getValue();
if (width > maxWidthSize) {
// SCALE COLUMNS AUTOMATICALLY
final List<Map.Entry<String, Integer>> orderedColumns = new ArrayList<Map.Entry<String, Integer>>();
orderedColumns.addAll(columns.entrySet());
Collections.sort(orderedColumns, new Comparator<Map.Entry<String, Integer>>() {
public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
return o1.getValue().compareTo(o2.getValue());
}
});
// START CUTTING THE BIGGEST ONES
Collections.reverse(orderedColumns);
while (width > maxWidthSize) {
int oldWidth = width;
for (Map.Entry<String, Integer> entry : orderedColumns) {
final int redux = entry.getValue() * 10 / 100;
if (entry.getValue() - redux < minColumnSize)
// RESTART FROM THE LARGEST COLUMN
break;
entry.setValue(entry.getValue() - redux);
width -= redux;
if (width <= maxWidthSize)
break;
}
if (width == oldWidth)
// REACHED THE MINIMUM
break;
}
// POPULATE THE COLUMNS WITH THE REDUXED VALUES
columns.clear();
for (String c : prefixedColumns)
columns.put(c, minColumnSize);
Collections.reverse(orderedColumns);
for (Entry<String, Integer> col : orderedColumns)
// if (!col.getKey().equals("#") && !col.getKey().equals("@RID"))
columns.put(col.getKey(), col.getValue());
}
return columns;
}
private Integer getColumnSize(final Integer iIndex, final ORecord<?> iRecord, final String fieldName, final Integer origSize) {
Integer newColumnSize;
if (origSize == null)
// START FROM THE FIELD NAME SIZE
newColumnSize = fieldName.length();
else
newColumnSize = Math.max(origSize, fieldName.length());
final Object fieldValue = getFieldValue(iIndex, iRecord, fieldName);
if (fieldValue != null) {
final String fieldValueAsString = fieldValue.toString();
if (fieldValueAsString.length() > newColumnSize)
newColumnSize = fieldValueAsString.length();
}
if (newColumnSize < minColumnSize)
// SET THE MINIMUM SIZE
newColumnSize = minColumnSize;
return newColumnSize;
}
} | 0true
| tools_src_main_java_com_orientechnologies_orient_console_OTableFormatter.java |
1,578 | public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
@Test
public void diskThresholdTest() {
Settings diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build();
Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
usages.put("node1", new DiskUsage("node1", 100, 10)); // 90% used
usages.put("node2", new DiskUsage("node2", 100, 35)); // 65% used
usages.put("node3", new DiskUsage("node3", 100, 60)); // 40% used
usages.put("node4", new DiskUsage("node4", 100, 80)); // 20% used
Map<String, Long> shardSizes = new HashMap<String, Long>();
shardSizes.put("[test][0][p]", 10L); // 10 bytes
shardSizes.put("[test][0][r]", 10L);
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
};
AllocationService strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Primary shard should be initializing, replica should not
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that we're able to start the primary
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
// Assert that node1 didn't get any shards because its disk usage is too high
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
logger.info("--> start the shards (replicas)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that the replica couldn't be started since node1 doesn't have enough space
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that the replica is initialized now that node3 is available with enough space
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (replicas)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that the replica couldn't be started since node1 doesn't have enough space
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
logger.info("--> changing decider settings");
// Set the low threshold to 60 instead of 70
// Set the high threshold to 70 instead of 80
// node2 now should not have new shards allocated to it, but shards can remain
diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.6)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build();
deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shards remain started
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
logger.info("--> changing settings again");
// Set the low threshold to 50 instead of 60
// Set the high threshold to 60 instead of 70
// node2 now should not have new shards allocated to it, and shards cannot remain
diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build();
deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shards remain started
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
// Shard hasn't been moved off of node2 yet because there's nowhere for it to go
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
logger.info("--> adding node4");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4"))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shards remain started
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> apply INITIALIZING shards");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
// Node4 is available now, so the shard is moved off of node2
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
}
@Test
public void diskThresholdWithAbsoluteSizesTest() {
Settings diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b")
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "20b").build();
Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
usages.put("node1", new DiskUsage("node1", 100, 10)); // 90% used
usages.put("node2", new DiskUsage("node2", 100, 35)); // 65% used
usages.put("node3", new DiskUsage("node3", 100, 60)); // 40% used
usages.put("node4", new DiskUsage("node4", 100, 80)); // 20% used
Map<String, Long> shardSizes = new HashMap<String, Long>();
shardSizes.put("[test][0][p]", 10L); // 10 bytes
shardSizes.put("[test][0][r]", 10L);
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
};
AllocationService strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Primary shard should be initializing, replica should not
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (primaries)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that we're able to start the primary
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
// Assert that node1 didn't get any shards because its disk usage is too high
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
logger.info("--> start the shards (replicas)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that the replica couldn't be started since node1 doesn't have enough space
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that the replica is initialized now that node3 is available with enough space
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
logger.info("--> start the shards (replicas)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Assert that the replica couldn't be started since node1 doesn't have enough space
assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
logger.info("--> changing decider settings");
// Set the low threshold to 60 instead of 70
// Set the high threshold to 70 instead of 80
// node2 now should not have new shards allocated to it, but shards can remain
diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b")
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build();
deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shards remain started
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
logger.info("--> changing settings again");
// Set the low threshold to 50 instead of 60
// Set the high threshold to 60 instead of 70
// node2 now should not have new shards allocated to it, and shards cannot remain
diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b")
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build();
deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shards remain started
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
// Shard hasn't been moved off of node2 yet because there's nowhere for it to go
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
logger.info("--> adding node4");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4"))
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shards remain started
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> apply INITIALIZING shards");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
// Node4 is available now, so the shard is moved off of node2
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
}
@Test
public void diskThresholdWithShardSizes() {
Settings diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.71).build();
Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
usages.put("node1", new DiskUsage("node1", 100, 31)); // 69% used
usages.put("node2", new DiskUsage("node2", 100, 1)); // 99% used
Map<String, Long> shardSizes = new HashMap<String, Long>();
shardSizes.put("[test][0][p]", 10L); // 10 bytes
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
};
AllocationService strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding node1");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> start the shards (primaries)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shard can't be allocated to node1 (or node2) because it would cause too much usage
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
// No shards are started, no nodes have enough disk for allocation
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
}
@Test
public void unknownDiskUsageTest() {
Settings diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build();
Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
Map<String, Long> shardSizes = new HashMap<String, Long>();
shardSizes.put("[test][0][p]", 10L); // 10 bytes
shardSizes.put("[test][0][r]", 10L); // 10 bytes
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
AllocationDeciders deciders = new AllocationDeciders(ImmutableSettings.EMPTY,
new HashSet<AllocationDecider>(Arrays.asList(
new SameShardAllocationDecider(ImmutableSettings.EMPTY),
new DiskThresholdDecider(diskSettings))));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
};
AllocationService strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put("cluster.routing.allocation.allow_rebalance", "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, new ShardsAllocators(), cis);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding node1");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters
).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> start the shards (primaries)");
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shard can't be allocated to node1 (or node2) because the average usage is 75% > 70%
assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
// No shards are started, node1 doesn't have enough disk usage
assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
}
@Test
public void averageUsageUnitTest() {
RoutingNode rn = new RoutingNode("node1", newNode("node1"));
DiskThresholdDecider decider = new DiskThresholdDecider(ImmutableSettings.EMPTY);
Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
DiskUsage node1Usage = decider.averageUsage(rn, usages);
assertThat(node1Usage.getTotalBytes(), equalTo(100L));
assertThat(node1Usage.getFreeBytes(), equalTo(25L));
}
@Test
public void freeDiskPercentageAfterShardAssignedUnitTest() {
RoutingNode rn = new RoutingNode("node1", newNode("node1"));
DiskThresholdDecider decider = new DiskThresholdDecider(ImmutableSettings.EMPTY);
Map<String, DiskUsage> usages = new HashMap<String, DiskUsage>();
usages.put("node2", new DiskUsage("node2", 100, 50)); // 50% used
usages.put("node3", new DiskUsage("node3", 100, 0)); // 100% used
Double after = decider.freeDiskPercentageAfterShardAssigned(new DiskUsage("node2", 100, 30), 11L);
assertThat(after, equalTo(19.0));
}
public void logShardStates(ClusterState state) {
RoutingNodes rn = state.routingNodes();
logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
rn.shards(new Predicate<MutableShardRouting>() {
@Override
public boolean apply(org.elasticsearch.cluster.routing.MutableShardRouting input) {
return true;
}
}).size(),
rn.shardsWithState(UNASSIGNED).size(),
rn.shardsWithState(INITIALIZING).size(),
rn.shardsWithState(RELOCATING).size(),
rn.shardsWithState(STARTED).size());
logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}",
rn.shardsWithState(UNASSIGNED),
rn.shardsWithState(INITIALIZING),
rn.shardsWithState(RELOCATING),
rn.shardsWithState(STARTED));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_decider_DiskThresholdDeciderTests.java |
1,620 | public class OSQLCommandTask extends OAbstractReplicatedTask {
private static final long serialVersionUID = 1L;
protected String text;
public OSQLCommandTask() {
}
public OSQLCommandTask(final String iCommand) {
text = iCommand;
}
public Object execute(final OServer iServer, ODistributedServerManager iManager, final ODatabaseDocumentTx database)
throws Exception {
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.IN, "execute command=%s db=%s",
text.toString(), database.getName());
return database.command(new OCommandSQL(text)).execute();
}
public QUORUM_TYPE getQuorumType() {
return QUORUM_TYPE.NONE;
}
@Override
public long getTimeout() {
return OGlobalConfiguration.DISTRIBUTED_COMMAND_TASK_SYNCH_TIMEOUT.getValueAsLong();
}
@Override
public OFixUpdateRecordTask getFixTask(ODistributedRequest iRequest, ODistributedResponse iBadResponse, ODistributedResponse iGoodResponse) {
return null;
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
out.writeUTF(text);
}
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
text = in.readUTF();
}
@Override
public String getName() {
return "command_sql";
}
@Override
public String toString() {
return super.toString() + "(" + text + ")";
}
@Override
public String getPayload() {
return text;
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_distributed_task_OSQLCommandTask.java |
265 | @SuppressWarnings("unchecked")
public abstract class OCommandExecutorAbstract extends OBaseParser implements OCommandExecutor {
protected OProgressListener progressListener;
protected int limit = -1;
protected Map<Object, Object> parameters;
protected OCommandContext context;
public OCommandExecutorAbstract init(final OCommandRequestText iRequest) {
getDatabase().checkSecurity(ODatabaseSecurityResources.COMMAND, ORole.PERMISSION_READ);
parserText = iRequest.getText().trim();
parserTextUpperCase = parserText.toUpperCase(Locale.ENGLISH);
return this;
}
@Override
public String toString() {
return getClass().getSimpleName() + " [text=" + parserText + "]";
}
public OProgressListener getProgressListener() {
return progressListener;
}
public <RET extends OCommandExecutor> RET setProgressListener(final OProgressListener progressListener) {
this.progressListener = progressListener;
return (RET) this;
}
public int getLimit() {
return limit;
}
public <RET extends OCommandExecutor> RET setLimit(final int iLimit) {
this.limit = iLimit;
return (RET) this;
}
public Map<Object, Object> getParameters() {
return parameters;
}
@Override
public String getFetchPlan() {
return null;
}
public OCommandContext getContext() {
if (context == null)
context = new OBasicCommandContext();
return context;
}
public void setContext(final OCommandContext iContext) {
context = iContext;
}
public static ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandExecutorAbstract.java |
1,403 | public class OMVRBTreeRIDEntryProvider extends OMVRBTreeEntryDataProviderAbstract<OIdentifiable, OIdentifiable> {
private static final long serialVersionUID = 1L;
protected final static int OFFSET_TREESIZE = 0;
protected final static int OFFSET_NODESIZE = OFFSET_TREESIZE + OBinaryProtocol.SIZE_INT;
protected final static int OFFSET_COLOR = OFFSET_NODESIZE + OBinaryProtocol.SIZE_INT;
protected final static int OFFSET_PARENT = OFFSET_COLOR + OBinaryProtocol.SIZE_BYTE;
protected final static int OFFSET_LEFT = OFFSET_PARENT + ORecordId.PERSISTENT_SIZE;
protected final static int OFFSET_RIGHT = OFFSET_LEFT + ORecordId.PERSISTENT_SIZE;
protected final static int OFFSET_RIDLIST = OFFSET_RIGHT + ORecordId.PERSISTENT_SIZE;
private int treeSize;
private final OIdentifiable[] rids;
public OMVRBTreeRIDEntryProvider(final OMVRBTreeRIDProvider iTreeDataProvider) {
super(iTreeDataProvider, OFFSET_RIDLIST + (iTreeDataProvider.getDefaultPageSize() * ORecordId.PERSISTENT_SIZE));
rids = OGlobalConfiguration.MVRBTREE_RID_NODE_SAVE_MEMORY.getValueAsBoolean() ? null : new OIdentifiable[pageSize];
}
public OMVRBTreeRIDEntryProvider(final OMVRBTreeRIDProvider iTreeDataProvider, final ORID iRID) {
super(iTreeDataProvider, iRID);
pageSize = treeDataProvider.getDefaultPageSize();
rids = OGlobalConfiguration.MVRBTREE_RID_NODE_SAVE_MEMORY.getValueAsBoolean() ? null : new OIdentifiable[pageSize];
}
/**
* Lazy unmarshall the RID if not in memory.
*/
public OIdentifiable getKeyAt(final int iIndex) {
if (rids != null && rids[iIndex] != null)
return rids[iIndex];
final ORecordId rid = itemFromStream(iIndex);
if (rids != null)
rids[iIndex] = rid;
return rid;
}
/**
* Returns the key
*/
public OIdentifiable getValueAt(final int iIndex) {
return getKeyAt(iIndex);
}
public boolean setValueAt(int iIndex, final OIdentifiable iValue) {
if (iValue == null)
return false;
try {
itemToStream(iValue, iIndex);
} catch (IOException e) {
throw new OSerializationException("Cannot serialize entryRID object: " + this, e);
}
if (rids != null)
rids[iIndex] = iValue;
return setDirty();
}
public boolean insertAt(final int iIndex, final OIdentifiable iKey, final OIdentifiable iValue) {
if (iIndex < size) {
// MOVE RIGHT TO MAKE ROOM FOR THE ITEM
stream.move(getKeyPositionInStream(iIndex), ORecordId.PERSISTENT_SIZE);
if (rids != null)
System.arraycopy(rids, iIndex, rids, iIndex + 1, size - iIndex - 1);
}
try {
itemToStream(iKey, iIndex);
} catch (IOException e) {
throw new OSerializationException("Cannot serialize entryRID object: " + this, e);
}
if (rids != null)
rids[iIndex] = iKey;
size++;
return setDirty();
}
public boolean removeAt(final int iIndex) {
if (iIndex > -1 && iIndex < size - 1) {
// SHIFT LEFT THE VALUES
stream.move(getKeyPositionInStream(iIndex + 1), ORecordId.PERSISTENT_SIZE * -1);
if (rids != null)
System.arraycopy(rids, iIndex + 1, rids, iIndex, size - iIndex - 1);
}
size--;
// FREE RESOURCES
if (rids != null)
rids[size] = null;
return setDirty();
}
public boolean copyDataFrom(final OMVRBTreeEntryDataProvider<OIdentifiable, OIdentifiable> iFrom, final int iStartPosition) {
size = iFrom.getSize() - iStartPosition;
final OMVRBTreeRIDEntryProvider from = (OMVRBTreeRIDEntryProvider) iFrom;
moveToIndex(0).copyFrom(from.moveToIndex(iStartPosition), size * ORecordId.PERSISTENT_SIZE);
if (rids != null)
System.arraycopy(from.rids, iStartPosition, rids, 0, size);
return setDirty();
}
public boolean truncate(final int iNewSize) {
moveToIndex(iNewSize).fill((size - iNewSize) * ORecordId.PERSISTENT_SIZE, (byte) 0);
if (rids != null)
Arrays.fill(rids, iNewSize, size, null);
size = iNewSize;
return setDirty();
}
public boolean copyFrom(final OMVRBTreeEntryDataProvider<OIdentifiable, OIdentifiable> iSource) {
final OMVRBTreeRIDEntryProvider source = (OMVRBTreeRIDEntryProvider) iSource;
stream = source.stream;
size = source.size;
return setDirty();
}
public OSerializableStream fromStream(final byte[] iStream) throws OSerializationException {
if (stream == null)
stream = new OMemoryStream(iStream);
else
stream.setSource(iStream);
treeSize = stream.jump(OFFSET_TREESIZE).getAsInteger();
size = stream.jump(OFFSET_NODESIZE).getAsInteger();
color = stream.jump(OFFSET_COLOR).getAsBoolean();
parentRid.fromStream(stream.jump(OFFSET_PARENT));
leftRid.fromStream(stream.jump(OFFSET_LEFT));
rightRid.fromStream(stream.jump(OFFSET_RIGHT));
if (rids != null)
// CREATE IN MEMORY RIDS FROM STREAM
Arrays.fill(rids, null);
return this;
}
public byte[] toStream() throws OSerializationException {
if (stream == null)
stream = new OMemoryStream();
try {
stream.jump(OFFSET_TREESIZE).set(treeSize);
stream.jump(OFFSET_NODESIZE).set(size);
stream.jump(OFFSET_COLOR).set(color);
parentRid.toStream(stream.jump(OFFSET_PARENT));
leftRid.toStream(stream.jump(OFFSET_LEFT));
rightRid.toStream(stream.jump(OFFSET_RIGHT));
if (rids != null)
// STREAM RIDS
for (int i = 0; i < size; ++i)
if (rids[i] != null)
itemToStream(rids[i], i);
} catch (IOException e) {
throw new OSerializationException("Cannot serialize tree entry RID node: " + this, e);
}
// RETURN DIRECTLY THE UNDERLYING BUFFER SINCE IT'S FIXED
final byte[] buffer = stream.getInternalBuffer();
record.fromStream(buffer);
return buffer;
}
protected OMemoryStream moveToIndex(final int iIndex) {
return stream.jump(getKeyPositionInStream(iIndex));
}
protected int getKeyPositionInStream(final int iIndex) {
return OFFSET_RIDLIST + (iIndex * ORecordId.PERSISTENT_SIZE);
}
public int getTreeSize() {
return treeSize;
}
public boolean setTreeSize(final int treeSize) {
if (this.treeSize != treeSize) {
this.treeSize = treeSize;
setDirty();
return true;
}
return false;
}
protected ORecordId itemFromStream(final int iIndex) {
return new ORecordId().fromStream(moveToIndex(iIndex));
}
protected int itemToStream(final OIdentifiable iKey, final int iIndex) throws IOException {
return iKey.getIdentity().toStream(moveToIndex(iIndex));
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_type_tree_provider_OMVRBTreeRIDEntryProvider.java |
278 | public class EmailPropertyType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, EmailPropertyType> TYPES = new LinkedHashMap<String, EmailPropertyType>();
public static final EmailPropertyType USER = new EmailPropertyType("user", "User");
public static final EmailPropertyType INFO = new EmailPropertyType("info", "Info");
public static final EmailPropertyType SERVERINFO = new EmailPropertyType("serverInfo", "Server Info");
public static EmailPropertyType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public EmailPropertyType() {
//do nothing
}
public EmailPropertyType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
EmailPropertyType other = (EmailPropertyType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_email_service_message_EmailPropertyType.java |
741 | public class ListAddRequest extends CollectionAddRequest {
private int index;
public ListAddRequest() {
}
public ListAddRequest(String name, Data value, int index) {
super(name, value);
this.index = index;
}
@Override
protected Operation prepareOperation() {
return new ListAddOperation(name, index, value);
}
@Override
public int getClassId() {
return CollectionPortableHook.LIST_ADD;
}
public void write(PortableWriter writer) throws IOException {
writer.writeInt("i", index);
super.write(writer);
}
public void read(PortableReader reader) throws IOException {
index = reader.readInt("i");
super.read(reader);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_client_ListAddRequest.java |
646 | public class PrecompressedArtifactFilter extends GenericFilterBean {
private boolean useWhileInDefaultEnvironment = true;
@Resource(name="blConfiguration")
RuntimeEnvironmentPropertiesConfigurer configurer;
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain) throws IOException, ServletException {
HttpServletRequest request = (HttpServletRequest) servletRequest;
HttpServletResponse response = (HttpServletResponse) servletResponse;
checkOutput: {
if (!configurer.determineEnvironment().equals(configurer.getDefaultEnvironment()) || useWhileInDefaultEnvironment) {
String path = getResourcePath(request);
String gzipPath = path + ".gz";
if (useGzipCompression(request, response, path, gzipPath)) {
File output = new File(getServletContext().getRealPath(gzipPath));
if (output.exists()) {
response.addHeader("Content-Encoding", "gzip");
ServletOutputStream sos = servletResponse.getOutputStream();
BufferedInputStream bis = null;
try {
bis = new BufferedInputStream(new FileInputStream(output));
boolean eof = false;
while (!eof) {
int temp = bis.read();
if (temp < 0) {
eof = true;
} else {
sos.write(temp);
}
}
} finally {
sos.flush();
try {
if (bis != null) {
bis.close();
}
} catch (Exception e) {
//do nothing
}
}
break checkOutput;
}
}
}
chain.doFilter(request, response);
}
}
/**
* Copied from Tomcat
*
* Return the page resource path from the request. For example:
* <pre class="codeHtml">
* <span class="blue">http://www.mycorp.com/banking/secure/login.htm</span> -> <span class="red">/secure/login.htm</span> </pre>
*
* @param request the page servlet request
* @return the page resource path from the request
*/
public String getResourcePath(HttpServletRequest request) {
// Adapted from VelocityViewServlet.handleRequest() method:
// If we get here from RequestDispatcher.include(), getServletPath()
// will return the original (wrong) URI requested. The following
// special attribute holds the correct path. See section 8.3 of the
// Servlet 2.3 specification.
String path = (String)
request.getAttribute("javax.servlet.include.servlet_path");
// Also take into account the PathInfo stated on
// SRV.4.4 Request Path Elements.
String info = (String)
request.getAttribute("javax.servlet.include.path_info");
if (path == null) {
path = request.getServletPath();
info = request.getPathInfo();
}
if (info != null) {
path += info;
}
return path;
}
/**
* Copied from Tomcat
*
* Return true if the response should be GZIP compressed.
*
* @param request the request to test
* @param response the response to test
* @param path the request path to test
* @return true if the response should be GZIP compressed
*/
protected boolean useGzipCompression(HttpServletRequest request, HttpServletResponse response, String path, String gzipPath) throws MalformedURLException {
if (gzipPath == null) {
return false;
}
String temp = path.toLowerCase();
if (temp.endsWith(".gif") || temp.endsWith(".png") || temp.endsWith(".jpg")) {
return false;
}
if (getServletContext().getResource(gzipPath) == null) {
return false;
}
// If Content-Encoding header is already set on response, skip compression
if (response.containsHeader("Content-Encoding")) {
return false;
}
// Are we allowed to compress ?
String s = request.getParameter("gzip");
if ("false".equals(s)) {
return false;
}
Enumeration<?> e = request.getHeaders("Accept-Encoding");
while (e.hasMoreElements()) {
String name = (String) e.nextElement();
if (name.contains("gzip")) {
return true;
}
}
return true;
}
public boolean isUseWhileInDefaultEnvironment() {
return useWhileInDefaultEnvironment;
}
public void setUseWhileInDefaultEnvironment(boolean useWhileInDefaultEnvironment) {
this.useWhileInDefaultEnvironment = useWhileInDefaultEnvironment;
}
public RuntimeEnvironmentPropertiesConfigurer getConfigurer() {
return configurer;
}
public void setConfigurer(RuntimeEnvironmentPropertiesConfigurer configurer) {
this.configurer = configurer;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_web_util_PrecompressedArtifactFilter.java |
1,483 | public class JavaSearch {
public static SearchPattern createSearchPattern(
Declaration declaration, int limitTo) {
String pattern;
try {
pattern = getJavaNameOfDeclaration(declaration);
}
catch (IllegalArgumentException iae) {
return null;
}
if (declaration instanceof Method) {
return createPattern(pattern, METHOD, limitTo, R_EXACT_MATCH);
}
else if (declaration instanceof Value) {
int loc = pattern.lastIndexOf('.') + 1;
String setter = pattern.substring(0,loc) +
"set" + pattern.substring(loc+3);
SearchPattern getterPattern =
createPattern(pattern, METHOD, limitTo, R_EXACT_MATCH);
SearchPattern setterPattern =
createPattern(setter, METHOD, limitTo, R_EXACT_MATCH);
switch (limitTo) {
case IJavaSearchConstants.WRITE_ACCESSES:
return setterPattern;
case IJavaSearchConstants.READ_ACCESSES:
return getterPattern;
default:
return createOrPattern(getterPattern, setterPattern);
}
}
else {
SearchPattern searchPattern =
createPattern(pattern, CLASS_AND_INTERFACE,
limitTo, R_EXACT_MATCH);
//weirdly, ALL_OCCURRENCES doesn't return all occurrences
/*if (limitTo==IJavaSearchConstants.ALL_OCCURRENCES) {
searchPattern = createOrPattern(createPattern(pattern, CLASS_AND_INTERFACE,
IJavaSearchConstants.IMPLEMENTORS, R_EXACT_MATCH),
searchPattern);
}*/
return searchPattern;
}
}
public static IProject[] getProjectAndReferencingProjects(IProject project) {
IProject[] referencingProjects = project.getReferencingProjects();
IProject[] projects = new IProject[referencingProjects.length+1];
projects[0] = project;
System.arraycopy(referencingProjects, 0, projects, 1, referencingProjects.length);
return projects;
}
public static IProject[] getProjectAndReferencedProjects(IProject project) {
IProject[] referencedProjects;
try {
referencedProjects = project.getReferencedProjects();
IProject[] projects = new IProject[referencedProjects.length+1];
projects[0] = project;
System.arraycopy(referencedProjects, 0, projects, 1, referencedProjects.length);
return projects;
}
catch (Exception e) {
e.printStackTrace();
return new IProject[] { project };
}
}
public static void runSearch(IProgressMonitor pm, SearchEngine searchEngine,
SearchPattern searchPattern, IProject[] projects,
SearchRequestor requestor)
throws OperationCanceledException {
try {
searchEngine.search(searchPattern,
SearchUtils.getDefaultSearchParticipants(),
SearchEngine.createJavaSearchScope(projects),
requestor, pm);
}
catch (OperationCanceledException oce) {
throw oce;
}
catch (Exception e) {
e.printStackTrace();
}
}
public static String getQualifiedName(IMember dec) {
IPackageFragment packageFragment = (IPackageFragment)
dec.getAncestor(IJavaElement.PACKAGE_FRAGMENT);
IType type = (IType) dec.getAncestor(IJavaElement.TYPE);
String qualifier = packageFragment.getElementName();
String name = dec.getElementName();
if (dec instanceof IMethod && name.equals("get_")) {
return getQualifiedName(type);
}
else if (dec instanceof IType && name.endsWith("_")) {
return qualifier + '.' +
name.substring(0, name.length()-1);
}
if (dec instanceof IMethod) {
if (name.startsWith("$")) {
name = name.substring(1);
}
else if (name.startsWith("get") ||
name.startsWith("set")) {
name = Character.toLowerCase(name.charAt(3)) +
name.substring(4);
}
}
if (dec!=type) {
String typeName = type.getElementName();
if (typeName.endsWith(name + "_")) {
return qualifier + '.' + name;
}
else {
return qualifier + '.' +
type.getElementName() + '.' + name;
}
}
else {
return qualifier + '.' + name;
}
}
public static boolean isDeclarationOfLinkedElement(Declaration d,
IJavaElement javaElement) {
return d.getQualifiedNameString().replace("::", ".")
.equals(getQualifiedName((IMember) javaElement));
}
public static IProject[] getProjectsToSearch(IProject project) {
if (project.getName().equals("Ceylon Source Archives")) {
return CeylonBuilder.getProjects().toArray(new IProject[0]);
}
else {
return getProjectAndReferencingProjects(project);
}
}
} | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_util_JavaSearch.java |
3,756 | private static class ResponseWrapper extends HttpServletResponseWrapper {
public ResponseWrapper(final HttpServletResponse original) {
super(original);
}
} | 1no label
| hazelcast-wm_src_main_java_com_hazelcast_web_WebFilter.java |
1,384 | public class EdgeListInputMapReduce {
public enum Counters {
EDGES_PROCESSED,
VERTICES_EMITTED,
IN_EDGES_CREATED,
OUT_EDGES_CREATED,
VERTICES_CREATED,
VERTEX_PROPERTIES_CREATED
}
public static class Map extends Mapper<NullWritable, FaunusElement, LongWritable, FaunusVertex> {
private final HashMap<Long, FaunusVertex> map = new HashMap<Long, FaunusVertex>();
private static final int MAX_MAP_SIZE = 5000;
private final LongWritable longWritable = new LongWritable();
private int counter = 0;
private Configuration faunusConf;
@Override
public void setup(Context context) {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
}
@Override
public void map(final NullWritable key, final FaunusElement value, final Mapper<NullWritable, FaunusElement, LongWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (value instanceof StandardFaunusEdge) {
final long outId = ((StandardFaunusEdge) value).getVertexId(OUT);
final long inId = ((StandardFaunusEdge) value).getVertexId(IN);
FaunusVertex vertex = this.map.get(outId);
if (null == vertex) {
vertex = new FaunusVertex(faunusConf, outId);
this.map.put(outId, vertex);
}
vertex.addEdge(OUT, WritableUtils.clone((StandardFaunusEdge) value, context.getConfiguration()));
this.counter++;
vertex = this.map.get(inId);
if (null == vertex) {
vertex = new FaunusVertex(faunusConf, inId);
this.map.put(inId, vertex);
}
vertex.addEdge(IN, WritableUtils.clone((StandardFaunusEdge) value, context.getConfiguration()));
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_PROCESSED, 1L);
this.counter++;
} else {
final long id = value.getLongId();
FaunusVertex vertex = this.map.get(id);
if (null == vertex) {
vertex = new FaunusVertex(faunusConf, id);
this.map.put(id, vertex);
}
vertex.addAllProperties(value.getPropertyCollection());
vertex.addEdges(BOTH, WritableUtils.clone((FaunusVertex) value, context.getConfiguration()));
this.counter++;
}
if (this.counter > MAX_MAP_SIZE)
this.flush(context);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusElement, LongWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
this.flush(context);
}
private void flush(final Mapper<NullWritable, FaunusElement, LongWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
for (final FaunusVertex vertex : this.map.values()) {
this.longWritable.set(vertex.getLongId());
context.write(this.longWritable, vertex);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_EMITTED, 1L);
}
this.map.clear();
this.counter = 0;
}
}
public static class Combiner extends Reducer<LongWritable, FaunusVertex, LongWritable, FaunusVertex> {
private Configuration faunusConf;
@Override
public void setup(Context context) {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
}
@Override
public void reduce(final LongWritable key, final Iterable<FaunusVertex> values, final Reducer<LongWritable, FaunusVertex, LongWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
final FaunusVertex vertex = new FaunusVertex(faunusConf, key.get());
for (final FaunusVertex value : values) {
vertex.addEdges(BOTH, value);
vertex.addAllProperties(value.getPropertyCollection());
}
context.write(key, vertex);
}
}
public static class Reduce extends Reducer<LongWritable, FaunusVertex, NullWritable, FaunusVertex> {
private Configuration faunusConf;
@Override
public void setup(Context context) {
faunusConf = ModifiableHadoopConfiguration.of(DEFAULT_COMPAT.getContextConfiguration(context));
}
@Override
public void reduce(final LongWritable key, final Iterable<FaunusVertex> values, final Reducer<LongWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
final FaunusVertex vertex = new FaunusVertex(faunusConf, key.get());
for (final FaunusVertex value : values) {
vertex.addEdges(BOTH, value);
vertex.addAllProperties(value.getPropertyCollection());
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_CREATED, 1L);
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTEX_PROPERTIES_CREATED, vertex.getPropertyCollection().size());
DEFAULT_COMPAT.incrementContextCounter(context, Counters.OUT_EDGES_CREATED, Iterables.size(vertex.getEdges(OUT)));
DEFAULT_COMPAT.incrementContextCounter(context, Counters.IN_EDGES_CREATED, Iterables.size(vertex.getEdges(IN)));
context.write(NullWritable.get(), vertex);
}
}
} | 1no label
| titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_edgelist_EdgeListInputMapReduce.java |
1,067 | execute(request, new ActionListener<MultiTermVectorsResponse>() {
@Override
public void onResponse(MultiTermVectorsResponse response) {
try {
channel.sendResponse(response);
} catch (Throwable t) {
onFailure(t);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Throwable t) {
logger.warn("Failed to send error response for action [" + MultiTermVectorsAction.NAME + "] and request ["
+ request + "]", t);
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_termvector_TransportMultiTermVectorsAction.java |
861 | public abstract class AtomicReferenceBackupAwareOperation extends AtomicReferenceBaseOperation
implements BackupAwareOperation {
protected boolean shouldBackup = true;
public AtomicReferenceBackupAwareOperation() {
}
public AtomicReferenceBackupAwareOperation(String name) {
super(name);
}
@Override
public boolean shouldBackup() {
return shouldBackup;
}
@Override
public int getSyncBackupCount() {
return 1;
}
@Override
public int getAsyncBackupCount() {
return 0;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomicreference_operations_AtomicReferenceBackupAwareOperation.java |
1,594 | static class NoLogger implements ILogger {
@Override
public void finest(String message) {
}
@Override
public void finest(String message, Throwable thrown) {
}
@Override
public void finest(Throwable thrown) {
}
@Override
public boolean isFinestEnabled() {
return false;
}
@Override
public void info(String message) {
}
@Override
public void severe(String message) {
}
@Override
public void severe(Throwable thrown) {
}
@Override
public void severe(String message, Throwable thrown) {
}
@Override
public void warning(String message) {
}
@Override
public void warning(Throwable thrown) {
}
@Override
public void warning(String message, Throwable thrown) {
}
@Override
public void log(Level level, String message) {
}
@Override
public void log(Level level, String message, Throwable thrown) {
}
@Override
public void log(LogEvent logEvent) {
}
@Override
public Level getLevel() {
return Level.OFF;
}
@Override
public boolean isLoggable(Level level) {
return false;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_logging_NoLogFactory.java |
1,138 | public class OSQLMethodAsList extends OAbstractSQLMethod {
public static final String NAME = "aslist";
public OSQLMethodAsList() {
super(NAME);
}
@SuppressWarnings("unchecked")
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
if (ioResult instanceof List)
// ALREADY A LIST
return ioResult;
if (ioResult == null)
// NULL VALUE, RETURN AN EMPTY SET
return new ArrayList<Object>();
if (ioResult instanceof Collection<?>)
return new ArrayList<Object>((Collection<Object>) ioResult);
else if (ioResult instanceof Iterable<?>)
ioResult = ((Iterable<?>) ioResult).iterator();
if (ioResult instanceof Iterator<?>) {
final List<Object> list = ioResult instanceof OSizeable ? new ArrayList<Object>(((OSizeable) ioResult).size())
: new ArrayList<Object>();
for (Iterator<Object> iter = (Iterator<Object>) ioResult; iter.hasNext();)
list.add(iter.next());
return list;
}
// SINGLE ITEM: ADD IT AS UNIQUE ITEM
final Set<Object> list = new HashSet<Object>();
list.add(ioResult);
return list;
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodAsList.java |
1,062 | public interface FixedPriceFulfillmentOption extends FulfillmentOption, Serializable {
public Money getPrice();
public void setPrice(Money price);
public BroadleafCurrency getCurrency();
public void setCurrency(BroadleafCurrency currency);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_fulfillment_domain_FixedPriceFulfillmentOption.java |
1,930 | public class MapClearRequest extends AllPartitionsClientRequest implements Portable, RetryableRequest, SecureRequest {
private String name;
public MapClearRequest() {
}
public MapClearRequest(String name) {
this.name = name;
}
public String getServiceName() {
return MapService.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return MapPortableHook.F_ID;
}
public int getClassId() {
return MapPortableHook.CLEAR;
}
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
}
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
}
@Override
protected OperationFactory createOperationFactory() {
return new ClearOperationFactory(name);
}
@Override
protected Object reduce(Map<Integer, Object> map) {
return null;
}
public Permission getRequiredPermission() {
return new MapPermission(name, ActionConstants.ACTION_REMOVE);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_map_client_MapClearRequest.java |
725 | public interface OTreeInternal<K, V> {
long size();
void loadEntriesMajor(K key, boolean inclusive, RangeResultListener<K, V> listener);
K firstKey();
V remove(K key);
/**
* @author <a href="mailto:[email protected]">Artem Orobets</a>
*/
interface RangeResultListener<K, V> {
boolean addResult(Map.Entry<K, V> entry);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_sbtree_OTreeInternal.java |
156 | public class TransactionInfo implements Comparable<TransactionInfo>
{
private final int identifier;
private final boolean trueForOnePhase;
private final long txId;
private final int master;
private final long checksum;
public TransactionInfo( int identifier, boolean trueForOnePhase, long txId, int master, long checksum )
{
super();
this.identifier = identifier;
this.trueForOnePhase = trueForOnePhase;
this.txId = txId;
this.master = master;
this.checksum = checksum;
}
public int getIdentifier()
{
return identifier;
}
public boolean isOnePhase()
{
return trueForOnePhase;
}
public long getTxId()
{
return txId;
}
public int getMaster()
{
return master;
}
public long getChecksum()
{
return checksum;
}
@Override
public int hashCode()
{
return identifier;
}
@Override
public boolean equals( Object obj )
{
return obj instanceof TransactionInfo && ((TransactionInfo)obj).identifier == identifier;
}
@Override
public int compareTo( TransactionInfo o )
{
return Long.valueOf( txId ).compareTo( Long.valueOf( o.txId ) );
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_TransactionInfo.java |
136 | public class NoSuchLogVersionException extends MissingLogDataException
{
private long version;
public NoSuchLogVersionException( long version )
{
super( "No such log version: '" + version + "'. This means we encountered a log file that we expected " +
"to find was missing. If you are unable to start the database due to this problem, please make " +
"sure that the correct logical log files are in the database directory." );
this.version = version;
}
public long getVersion()
{
return version;
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_NoSuchLogVersionException.java |
176 | static final class AdaptedRunnable<T> extends ForkJoinTask<T>
implements RunnableFuture<T> {
final Runnable runnable;
T result;
AdaptedRunnable(Runnable runnable, T result) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
this.result = result; // OK to set this even before completion
}
public final T getRawResult() { return result; }
public final void setRawResult(T v) { result = v; }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
} | 0true
| src_main_java_jsr166y_ForkJoinTask.java |
3,247 | public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparatorSource {
private final IndexNumericFieldData<?> indexFieldData;
private final Object missingValue;
private final SortMode sortMode;
public DoubleValuesComparatorSource(IndexNumericFieldData<?> indexFieldData, @Nullable Object missingValue, SortMode sortMode) {
this.indexFieldData = indexFieldData;
this.missingValue = missingValue;
this.sortMode = sortMode;
}
@Override
public SortField.Type reducedType() {
return SortField.Type.DOUBLE;
}
@Override
public FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
assert fieldname.equals(indexFieldData.getFieldNames().indexName());
final double dMissingValue = (Double) missingObject(missingValue, reversed);
return new DoubleValuesComparator(indexFieldData, dMissingValue, numHits, sortMode);
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_fieldcomparator_DoubleValuesComparatorSource.java |
1,629 | @Deprecated
public interface OServerHandler extends OServerPlugin {
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_handler_OServerHandler.java |
3,433 | private static class NoResponseHandler implements ResponseHandler {
@Override
public void sendResponse(final Object obj) {
}
@Override
public boolean isLocal() {
return false;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_spi_impl_ResponseHandlerFactory.java |
1,665 | private static final class DeleteRecordNodeCall extends NodeCall<Boolean> {
private String storageName;
private ORecordId recordId;
private ORecordVersion version;
public DeleteRecordNodeCall() {
}
private DeleteRecordNodeCall(long nodeId, String uuid, String storageName, ORecordId iRecordId, ORecordVersion iVersion) {
super(nodeId, uuid);
this.storageName = storageName;
this.recordId = iRecordId;
this.version = iVersion;
}
@Override
protected Boolean call(ODHTNode node) {
return node.deleteRecord(storageName, recordId, version);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
super.writeExternal(out);
out.writeObject(storageName);
out.writeObject(recordId);
version.getSerializer().writeTo(out, version);
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
storageName = (String) in.readObject();
recordId = (ORecordId) in.readObject();
version.getSerializer().readFrom(in, version);
}
} | 0true
| distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_oldsharding_hazelcast_OHazelcastDHTNodeProxy.java |
499 | public class TransportCloseIndexAction extends TransportMasterNodeOperationAction<CloseIndexRequest, CloseIndexResponse> {
private final MetaDataIndexStateService indexStateService;
private final DestructiveOperations destructiveOperations;
@Inject
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataIndexStateService indexStateService, NodeSettingsService nodeSettingsService) {
super(settings, transportService, clusterService, threadPool);
this.indexStateService = indexStateService;
this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
}
@Override
protected String executor() {
// no need to use a thread pool, we go async right away
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return CloseIndexAction.NAME;
}
@Override
protected CloseIndexRequest newRequest() {
return new CloseIndexRequest();
}
@Override
protected CloseIndexResponse newResponse() {
return new CloseIndexResponse();
}
@Override
protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
destructiveOperations.failDestructive(request.indices());
super.doExecute(request, listener);
}
@Override
protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
}
@Override
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) throws ElasticsearchException {
request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(request.indices());
indexStateService.closeIndex(updateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new CloseIndexResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
logger.debug("failed to close indices [{}]", t, request.indices());
listener.onFailure(t);
}
});
}
} | 1no label
| src_main_java_org_elasticsearch_action_admin_indices_close_TransportCloseIndexAction.java |
340 | private static class IncrementorEntryProcessor extends AbstractEntryProcessor implements DataSerializable {
IncrementorEntryProcessor() {
super(true);
}
public Object process(Map.Entry entry) {
Integer value = (Integer) entry.getValue();
if (value == null) {
value = 0;
}
if (value == -1) {
entry.setValue(null);
return null;
}
value++;
entry.setValue(value);
return value;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
}
@Override
public void readData(ObjectDataInput in) throws IOException {
}
public void processBackup(Map.Entry entry) {
entry.setValue((Integer) entry.getValue() + 1);
}
} | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java |
1,276 | public interface AckedClusterStateUpdateTask extends TimeoutClusterStateUpdateTask {
/**
* Called to determine which nodes the acknowledgement is expected from
* @param discoveryNode a node
* @return true if the node is expected to send ack back, false otherwise
*/
boolean mustAck(DiscoveryNode discoveryNode);
/**
* Called once all the nodes have acknowledged the cluster state update request. Must be
* very lightweight execution, since it gets executed on the cluster service thread.
* @param t optional error that might have been thrown
*/
void onAllNodesAcked(@Nullable Throwable t);
/**
* Called once the acknowledgement timeout defined by
* {@link AckedClusterStateUpdateTask#ackTimeout()} has expired
*/
void onAckTimeout();
/**
* Acknowledgement timeout, maximum time interval to wait for acknowledgements
*/
TimeValue ackTimeout();
} | 0true
| src_main_java_org_elasticsearch_cluster_AckedClusterStateUpdateTask.java |
2,425 | public abstract class SlicedObjectList<T> extends AbstractList<T> implements RandomAccess {
public T[] values;
public int offset;
public int length;
public SlicedObjectList(T[] values) {
this(values, 0, values.length);
}
public SlicedObjectList(T[] values, int offset, int length) {
this.values = values;
this.offset = offset;
this.length = length;
}
@Override
public int size() {
return length;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public T get(int index) {
assert index < size();
return values[offset + index];
}
@Override
public T set(int index, T element) {
throw new UnsupportedOperationException("modifying list opertations are not implemented");
}
@Override
public boolean equals(Object object) {
if (object == this) {
return true;
}
if (object instanceof SlicedObjectList) {
SlicedObjectList<?> that = (SlicedObjectList<?>) object;
int size = size();
if (that.size() != size) {
return false;
}
for (int i = 0; i < size; i++) {
if (values[offset + i].equals(that.values[that.offset + i])) {
return false;
}
}
return true;
}
return super.equals(object);
}
@Override
public int hashCode() {
int result = 1;
for (int i = 0; i < length; i++) {
result = 31 * result + values[offset+i].hashCode();
}
return result;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder(size() * 10);
builder.append('[');
if (length > 0) {
builder.append(values[offset]);
for (int i = 1; i < length; i++) {
builder.append(", ").append(values[offset+i]);
}
}
return builder.append(']').toString();
}
public abstract void grow(int newLength);
} | 0true
| src_main_java_org_elasticsearch_common_util_SlicedObjectList.java |
969 | public static class Presentation {
public static class Tab {
public static class Name {
public static final String OrderItems = "OrderImpl_Order_Items_Tab";
}
public static class Order {
public static final int OrderItems = 2000;
}
}
public static class Group {
public static class Name {
}
public static class Order {
}
}
public static class FieldOrder {
public static final int PRODUCT = 2000;
public static final int SKU = 3000;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_BundleOrderItemImpl.java |
245 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CURRENCY")
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region = "blCMSElements")
@AdminPresentationClass(friendlyName = "BroadleafCurrencyImpl_baseCurrency")
public class BroadleafCurrencyImpl implements BroadleafCurrency {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "CURRENCY_CODE")
@AdminPresentation(friendlyName = "BroadleafCurrencyImpl_Currency_Code", order = 1, group = "BroadleafCurrencyImpl_Details", prominent = true)
protected String currencyCode;
@Column(name = "FRIENDLY_NAME")
@AdminPresentation(friendlyName = "BroadleafCurrencyImpl_Name", order = 2, group = "BroadleafCurrencyImpl_Details", prominent = true)
protected String friendlyName;
@Column(name = "DEFAULT_FLAG")
@AdminPresentation(friendlyName = "BroadleafCurrencyImpl_Is_Default", group = "BroadleafCurrencyImpl_Details", excluded = true)
protected Boolean defaultFlag = false;
@Override
public String getCurrencyCode() {
return currencyCode;
}
@Override
public void setCurrencyCode(String code) {
this.currencyCode = code;
}
@Override
public String getFriendlyName() {
return friendlyName;
}
@Override
public void setFriendlyName(String friendlyName) {
this.friendlyName = friendlyName;
}
@Override
public boolean getDefaultFlag() {
if (defaultFlag == null) {
return false;
}
return defaultFlag.booleanValue();
}
@Override
public void setDefaultFlag(boolean defaultFlag) {
this.defaultFlag = new Boolean(defaultFlag);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BroadleafCurrency)) {
return false;
}
BroadleafCurrencyImpl currency = (BroadleafCurrencyImpl) o;
if (currencyCode != null ? !currencyCode.equals(currency.currencyCode) : currency.currencyCode != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = currencyCode != null ? currencyCode.hashCode() : 0;
return result;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_currency_domain_BroadleafCurrencyImpl.java |
1,078 | public interface MergeCartServiceExtensionHandler extends ExtensionHandler {
ExtensionResultStatusType setNewCartOwnership(Order cart, Customer customer);
ExtensionResultStatusType updateMergedOrder(Order cart, Customer customer);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_MergeCartServiceExtensionHandler.java |
1,137 | public class VerifyFulfillmentGroupItemsActivity extends BaseActivity<CartOperationContext> {
@Resource(name = "blFulfillmentGroupItemStrategy")
protected FulfillmentGroupItemStrategy fgItemStrategy;
@Override
public CartOperationContext execute(CartOperationContext context) throws Exception {
CartOperationRequest request = context.getSeedData();
request = fgItemStrategy.verify(request);
context.setSeedData(request);
return context;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_workflow_VerifyFulfillmentGroupItemsActivity.java |
208 | public class OStorageRemoteAsynchEventListener implements ORemoteServerEventListener {
private OStorageRemote storage;
public OStorageRemoteAsynchEventListener(final OStorageRemote storage) {
this.storage = storage;
}
public void onRequest(final byte iRequestCode, final Object obj) {
if (iRequestCode == OChannelBinaryProtocol.REQUEST_PUSH_RECORD)
// ASYNCHRONOUS PUSH INTO THE LEVEL2 CACHE
storage.getLevel2Cache().updateRecord((ORecordInternal<?>) obj);
else if (iRequestCode == OChannelBinaryProtocol.REQUEST_PUSH_DISTRIB_CONFIG) {
storage.updateClusterConfiguration((byte[]) obj);
if (OLogManager.instance().isDebugEnabled()) {
synchronized (storage.getClusterConfiguration()) {
OLogManager.instance()
.debug(this, "Received new cluster configuration: %s", storage.getClusterConfiguration().toJSON(""));
}
}
}
}
public OStorageRemote getStorage() {
return storage;
}
} | 0true
| client_src_main_java_com_orientechnologies_orient_client_remote_OStorageRemoteAsynchEventListener.java |
3,270 | public static class Docs implements Ordinals.Docs {
private final EmptyOrdinals parent;
public static final LongsRef EMPTY_LONGS_REF = new LongsRef();
public Docs(EmptyOrdinals parent) {
this.parent = parent;
}
@Override
public Ordinals ordinals() {
return parent;
}
@Override
public int getNumDocs() {
return parent.getNumDocs();
}
@Override
public long getNumOrds() {
return 0;
}
@Override
public long getMaxOrd() {
return 1;
}
@Override
public boolean isMultiValued() {
return false;
}
@Override
public long getOrd(int docId) {
return 0;
}
@Override
public LongsRef getOrds(int docId) {
return EMPTY_LONGS_REF;
}
@Override
public long nextOrd() {
throw new ElasticsearchIllegalStateException("Empty ordinals has no nextOrd");
}
@Override
public int setDocument(int docId) {
return 0;
}
@Override
public long currentOrd() {
return 0;
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_ordinals_EmptyOrdinals.java |
728 | public class ShardDeleteResponse extends ActionResponse {
private long version;
private boolean found;
public ShardDeleteResponse() {
}
public ShardDeleteResponse(long version, boolean found) {
this.version = version;
this.found = found;
}
public long getVersion() {
return version;
}
public boolean isFound() {
return found;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
version = in.readLong();
found = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(version);
out.writeBoolean(found);
}
} | 0true
| src_main_java_org_elasticsearch_action_delete_index_ShardDeleteResponse.java |
753 | loadEntriesMajor(key, inclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > -1 && result.size() >= maxValuesToFetch)
return false;
return true;
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_sbtreebonsai_local_OSBTreeBonsai.java |
1,397 | clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
mdLock.release();
listener.onResponse(new ClusterStateUpdateResponse(true));
}
@Override
public void onAckTimeout() {
mdLock.release();
listener.onResponse(new ClusterStateUpdateResponse(false));
}
@Override
public TimeValue ackTimeout() {
return request.ackTimeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
mdLock.release();
listener.onFailure(t);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
boolean indexCreated = false;
String failureReason = null;
try {
validate(request, currentState);
// we only find a template when its an API call (a new index)
// find templates, highest order are better matching
List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
Map<String, Custom> customs = Maps.newHashMap();
// add the request mapping
Map<String, Map<String, Object>> mappings = Maps.newHashMap();
for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
mappings.put(entry.getKey(), parseMapping(entry.getValue()));
}
for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
customs.put(entry.getKey(), entry.getValue());
}
// apply templates, merging the mappings into the request mapping if exists
for (IndexTemplateMetaData template : templates) {
for (ObjectObjectCursor<String, CompressedString> cursor : template.mappings()) {
if (mappings.containsKey(cursor.key)) {
XContentHelper.mergeDefaults(mappings.get(cursor.key), parseMapping(cursor.value.string()));
} else {
mappings.put(cursor.key, parseMapping(cursor.value.string()));
}
}
// handle custom
for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
String type = cursor.key;
IndexMetaData.Custom custom = cursor.value;
IndexMetaData.Custom existing = customs.get(type);
if (existing == null) {
customs.put(type, custom);
} else {
IndexMetaData.Custom merged = IndexMetaData.lookupFactorySafe(type).merge(existing, custom);
customs.put(type, merged);
}
}
}
// now add config level mappings
File mappingsDir = new File(environment.configFile(), "mappings");
if (mappingsDir.exists() && mappingsDir.isDirectory()) {
// first index level
File indexMappingsDir = new File(mappingsDir, request.index());
if (indexMappingsDir.exists() && indexMappingsDir.isDirectory()) {
addMappings(mappings, indexMappingsDir);
}
// second is the _default mapping
File defaultMappingsDir = new File(mappingsDir, "_default");
if (defaultMappingsDir.exists() && defaultMappingsDir.isDirectory()) {
addMappings(mappings, defaultMappingsDir);
}
}
ImmutableSettings.Builder indexSettingsBuilder = settingsBuilder();
// apply templates, here, in reverse order, since first ones are better matching
for (int i = templates.size() - 1; i >= 0; i--) {
indexSettingsBuilder.put(templates.get(i).settings());
}
// now, put the request settings, so they override templates
indexSettingsBuilder.put(request.settings());
if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
if (request.index().equals(riverIndexName)) {
indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 1));
} else {
indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
}
}
if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
if (request.index().equals(riverIndexName)) {
indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
} else {
indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
}
}
if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
}
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
indexSettingsBuilder.put(SETTING_VERSION_CREATED, version);
}
indexSettingsBuilder.put(SETTING_UUID, Strings.randomBase64UUID());
Settings actualIndexSettings = indexSettingsBuilder.build();
// Set up everything, now locally create the index to see that things are ok, and apply
// create the index here (on the master) to validate it can be created, as well as adding the mapping
indicesService.createIndex(request.index(), actualIndexSettings, clusterService.localNode().id());
indexCreated = true;
// now add the mappings
IndexService indexService = indicesService.indexServiceSafe(request.index());
MapperService mapperService = indexService.mapperService();
// first, add the default mapping
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
try {
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false);
} catch (Exception e) {
failureReason = "failed on parsing default mapping on index creation";
throw new MapperParsingException("mapping [" + MapperService.DEFAULT_MAPPING + "]", e);
}
}
for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
if (entry.getKey().equals(MapperService.DEFAULT_MAPPING)) {
continue;
}
try {
// apply the default here, its the first time we parse it
mapperService.merge(entry.getKey(), new CompressedString(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true);
} catch (Exception e) {
failureReason = "failed on parsing mappings on index creation";
throw new MapperParsingException("mapping [" + entry.getKey() + "]", e);
}
}
// now, update the mappings with the actual source
Map<String, MappingMetaData> mappingsMetaData = Maps.newHashMap();
for (DocumentMapper mapper : mapperService) {
MappingMetaData mappingMd = new MappingMetaData(mapper);
mappingsMetaData.put(mapper.type(), mappingMd);
}
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings);
for (MappingMetaData mappingMd : mappingsMetaData.values()) {
indexMetaDataBuilder.putMapping(mappingMd);
}
for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
}
indexMetaDataBuilder.state(request.state());
final IndexMetaData indexMetaData;
try {
indexMetaData = indexMetaDataBuilder.build();
} catch (Exception e) {
failureReason = "failed to build index metadata";
throw e;
}
MetaData newMetaData = MetaData.builder(currentState.metaData())
.put(indexMetaData, false)
.build();
logger.info("[{}] creating index, cause [{}], shards [{}]/[{}], mappings {}", request.index(), request.cause(), indexMetaData.numberOfShards(), indexMetaData.numberOfReplicas(), mappings.keySet());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
if (!request.blocks().isEmpty()) {
for (ClusterBlock block : request.blocks()) {
blocks.addIndexBlock(request.index(), block);
}
}
if (request.state() == State.CLOSE) {
blocks.addIndexBlock(request.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
}
ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
if (request.state() == State.OPEN) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable())
.addAsNew(updatedState.metaData().index(request.index()));
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build());
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
}
return updatedState;
} finally {
if (indexCreated) {
// Index was already partially created - need to clean up
indicesService.removeIndex(request.index(), failureReason != null ? failureReason : "failed to create index");
}
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
}); | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaDataCreateIndexService.java |
980 | public class LockReplicationOperation extends AbstractOperation
implements IdentifiedDataSerializable {
private final Collection<LockStoreImpl> locks = new LinkedList<LockStoreImpl>();
public LockReplicationOperation() {
}
public LockReplicationOperation(LockStoreContainer container, int partitionId, int replicaIndex) {
this.setPartitionId(partitionId).setReplicaIndex(replicaIndex);
Collection<LockStoreImpl> lockStores = container.getLockStores();
for (LockStoreImpl ls : lockStores) {
if (ls.getTotalBackupCount() < replicaIndex) {
continue;
}
locks.add(ls);
}
}
@Override
public void run() {
LockServiceImpl lockService = getService();
LockStoreContainer container = lockService.getLockContainer(getPartitionId());
for (LockStoreImpl ls : locks) {
container.put(ls);
}
}
@Override
public String getServiceName() {
return LockServiceImpl.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return LockDataSerializerHook.F_ID;
}
@Override
public int getId() {
return LockDataSerializerHook.LOCK_REPLICATION;
}
@Override
protected void writeInternal(final ObjectDataOutput out) throws IOException {
super.writeInternal(out);
int len = locks.size();
out.writeInt(len);
if (len > 0) {
for (LockStoreImpl ls : locks) {
ls.writeData(out);
}
}
}
@Override
protected void readInternal(final ObjectDataInput in) throws IOException {
super.readInternal(in);
int len = in.readInt();
if (len > 0) {
for (int i = 0; i < len; i++) {
LockStoreImpl ls = new LockStoreImpl();
ls.readData(in);
locks.add(ls);
}
}
}
public boolean isEmpty() {
return locks.isEmpty();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_lock_operations_LockReplicationOperation.java |
622 | public class NullBroadleafThemeResolver implements BroadleafThemeResolver {
private final Theme theme = new ThemeDTO();
@Override
public Theme resolveTheme(HttpServletRequest request, Site site) {
return resolveTheme(new ServletWebRequest(request));
}
@Override
public Theme resolveTheme(WebRequest request) {
return theme;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_web_NullBroadleafThemeResolver.java |
431 | public class ClientReplicatedMapProxy<K, V>
extends ClientProxy
implements ReplicatedMap<K, V> {
private volatile ClientNearCache<Object> nearCache;
private final AtomicBoolean nearCacheInitialized = new AtomicBoolean();
public ClientReplicatedMapProxy(String instanceName, String serviceName, String objectName) {
super(instanceName, serviceName, objectName);
}
@Override
protected void onDestroy() {
if (nearCache != null) {
nearCache.destroy();
}
}
@Override
public V put(K key, V value, long ttl, TimeUnit timeUnit) {
return invoke(new ClientReplicatedMapPutTtlRequest(getName(), key, value, timeUnit.toMillis(ttl)));
}
@Override
public int size() {
return (Integer) invoke(new ClientReplicatedMapSizeRequest(getName()));
}
@Override
public boolean isEmpty() {
return (Boolean) invoke(new ClientReplicatedMapIsEmptyRequest(getName()));
}
@Override
public boolean containsKey(Object key) {
return (Boolean) invoke(new ClientReplicatedMapContainsKeyRequest(getName(), key));
}
@Override
public boolean containsValue(Object value) {
return (Boolean) invoke(new ClientReplicatedMapContainsValueRequest(getName(), value));
}
@Override
public V get(Object key) {
initNearCache();
if (nearCache != null) {
Object cached = nearCache.get(key);
if (cached != null) {
if (cached.equals(ClientNearCache.NULL_OBJECT)) {
return null;
}
return (V) cached;
}
}
ReplicatedMapGetResponse response = invoke(new ClientReplicatedMapGetRequest(getName(), key));
V value = (V) response.getValue();
if (nearCache != null) {
nearCache.put(key, value);
}
return value;
}
@Override
public V put(K key, V value) {
return put(key, value, 0, TimeUnit.MILLISECONDS);
}
@Override
public V remove(Object key) {
return invoke(new ClientReplicatedMapRemoveRequest(getName(), key));
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
invoke(new ClientReplicatedMapPutAllRequest(getName(), new ReplicatedMapEntrySet(m.entrySet())));
}
@Override
public void clear() {
ClientReplicatedMapClearRequest request = new ClientReplicatedMapClearRequest(getName());
invoke(request);
}
@Override
public boolean removeEntryListener(String id) {
final ClientReplicatedMapRemoveEntryListenerRequest request = new ClientReplicatedMapRemoveEntryListenerRequest(getName(),
id);
return stopListening(request, id);
}
@Override
public String addEntryListener(EntryListener<K, V> listener) {
ClientReplicatedMapAddEntryListenerRequest request = new ClientReplicatedMapAddEntryListenerRequest(getName(), null,
null);
EventHandler<ReplicatedMapPortableEntryEvent> handler = createHandler(listener);
return listen(request, null, handler);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, K key) {
ClientReplicatedMapAddEntryListenerRequest request = new ClientReplicatedMapAddEntryListenerRequest(getName(), null, key);
EventHandler<ReplicatedMapPortableEntryEvent> handler = createHandler(listener);
return listen(request, null, handler);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, Predicate<K, V> predicate) {
ClientReplicatedMapAddEntryListenerRequest request = new ClientReplicatedMapAddEntryListenerRequest(getName(), predicate,
null);
EventHandler<ReplicatedMapPortableEntryEvent> handler = createHandler(listener);
return listen(request, null, handler);
}
@Override
public String addEntryListener(EntryListener<K, V> listener, Predicate<K, V> predicate, K key) {
ClientRequest request = new ClientReplicatedMapAddEntryListenerRequest(getName(), predicate, key);
EventHandler<ReplicatedMapPortableEntryEvent> handler = createHandler(listener);
return listen(request, null, handler);
}
@Override
public Set<K> keySet() {
return ((ReplicatedMapKeySet) invoke(new ClientReplicatedMapKeySetRequest(getName()))).getKeySet();
}
@Override
public Collection<V> values() {
return ((ReplicatedMapValueCollection) invoke(new ClientReplicatedMapValuesRequest(getName()))).getValues();
}
@Override
public Collection<V> values(Comparator<V> comparator) {
List values = (List) values();
Collections.sort(values, comparator);
return values;
}
@Override
public Set<Entry<K, V>> entrySet() {
return ((ReplicatedMapEntrySet) invoke(new ClientReplicatedMapEntrySetRequest(getName()))).getEntrySet();
}
private EventHandler<ReplicatedMapPortableEntryEvent> createHandler(final EntryListener<K, V> listener) {
return new EventHandler<ReplicatedMapPortableEntryEvent>() {
public void handle(ReplicatedMapPortableEntryEvent event) {
V value = (V) event.getValue();
V oldValue = (V) event.getOldValue();
K key = (K) event.getKey();
Member member = getContext().getClusterService().getMember(event.getUuid());
EntryEvent<K, V> entryEvent = new EntryEvent<K, V>(getName(), member, event.getEventType().getType(), key,
oldValue, value);
switch (event.getEventType()) {
case ADDED:
listener.entryAdded(entryEvent);
break;
case REMOVED:
listener.entryRemoved(entryEvent);
break;
case UPDATED:
listener.entryUpdated(entryEvent);
break;
case EVICTED:
listener.entryEvicted(entryEvent);
break;
default:
throw new IllegalArgumentException("Not a known event type " + event.getEventType());
}
}
@Override
public void onListenerRegister() {
}
};
}
private void initNearCache() {
if (nearCacheInitialized.compareAndSet(false, true)) {
final NearCacheConfig nearCacheConfig = getContext().getClientConfig().getNearCacheConfig(getName());
if (nearCacheConfig == null) {
return;
}
ClientNearCache<Object> nearCache = new ClientNearCache<Object>(getName(), ClientNearCacheType.ReplicatedMap,
getContext(), nearCacheConfig);
this.nearCache = nearCache;
}
}
@Override
public String toString() {
return "ReplicatedMap{" + "name='" + getName() + '\'' + '}';
}
} | 1no label
| hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientReplicatedMapProxy.java |
997 | public class SemaphoreWaitNotifyKey extends AbstractWaitNotifyKey {
private final String type;
public SemaphoreWaitNotifyKey(String name, String type) {
super(SemaphoreService.SERVICE_NAME, name);
this.type = ValidationUtil.isNotNull(type, "type");
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof SemaphoreWaitNotifyKey)) {
return false;
}
if (!super.equals(o)) {
return false;
}
SemaphoreWaitNotifyKey that = (SemaphoreWaitNotifyKey) o;
if (!type.equals(that.type)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + type.hashCode();
return result;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_semaphore_SemaphoreWaitNotifyKey.java |
123 | client.getLifecycleService().addLifecycleListener(new LifecycleListener() {
@Override
public void stateChanged(LifecycleEvent event) {
connectedLatch.countDown();
}
}); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_ClientReconnectTest.java |
224 | public class RuntimeEnvironmentPropertiesManager implements BeanFactoryAware {
private static final Log LOG = LogFactory.getLog(RuntimeEnvironmentPropertiesManager.class);
protected ConfigurableBeanFactory beanFactory;
protected String prefix;
public String getPrefix() {
return prefix;
}
public String setPrefix(String prefix) {
return this.prefix = prefix;
}
public String getProperty(String key, String suffix) {
if(key==null) {
return null;
}
String name = prefix + "." + key + "." + suffix;
if (prefix == null) {
name = key + "." + suffix;
}
String rv = beanFactory.resolveEmbeddedValue("${" + name + "}");
if (rv == null ||rv.equals("${" + name + "}")) {
LOG.warn("property ${" + name + "} not found, Reverting to property without suffix"+suffix);
rv = getProperty(key);
}
return rv;
}
public String getProperty(String key) {
if(key==null) {
return null;
}
String name = prefix + "." + key;
if (prefix == null) {
name = key;
}
String rv = beanFactory.resolveEmbeddedValue("${" + name + "}");
if(rv.equals("${" + name + "}")) {
return null;
}
return rv;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
this.beanFactory = (ConfigurableBeanFactory) beanFactory;
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_config_RuntimeEnvironmentPropertiesManager.java |
42 | Arrays.sort(properties, new Comparator<Property>() {
@Override
public int compare(Property o1, Property o2) {
/*
* First, compare properties based on order fields
*/
if (o1.getMetadata().getOrder() != null && o2.getMetadata().getOrder() != null) {
return o1.getMetadata().getOrder().compareTo(o2.getMetadata().getOrder());
} else if (o1.getMetadata().getOrder() != null && o2.getMetadata().getOrder() == null) {
/*
* Always favor fields that have an order identified
*/
return -1;
} else if (o1.getMetadata().getOrder() == null && o2.getMetadata().getOrder() != null) {
/*
* Always favor fields that have an order identified
*/
return 1;
} else if (o1.getMetadata().getFriendlyName() != null && o2.getMetadata().getFriendlyName() != null) {
return o1.getMetadata().getFriendlyName().compareTo(o2.getMetadata().getFriendlyName());
} else {
return o1.getName().compareTo(o2.getName());
}
}
}); | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_DynamicFieldPersistenceHandlerHelper.java |
512 | public class MinuteType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, MinuteType> TYPES = new LinkedHashMap<String, MinuteType>();
public static final MinuteType ZERO = new MinuteType("0", "00");
public static final MinuteType ONE = new MinuteType("1", "01");
public static final MinuteType TWO = new MinuteType("2", "02");
public static final MinuteType THREE = new MinuteType("3", "03");
public static final MinuteType FOUR = new MinuteType("4", "04");
public static final MinuteType FIVE = new MinuteType("5", "05");
public static final MinuteType SIX = new MinuteType("6", "06");
public static final MinuteType SEVEN = new MinuteType("7", "07");
public static final MinuteType EIGHT = new MinuteType("8", "08");
public static final MinuteType NINE = new MinuteType("9", "09");
public static final MinuteType TEN = new MinuteType("10", "10");
public static final MinuteType ELEVEN = new MinuteType("11", "11");
public static final MinuteType TWELVE = new MinuteType("12", "12");
public static final MinuteType THIRTEEN = new MinuteType("13", "13");
public static final MinuteType FOURTEEN = new MinuteType("14", "14");
public static final MinuteType FIFTEEN = new MinuteType("15", "15");
public static final MinuteType SIXTEEN = new MinuteType("16", "16");
public static final MinuteType SEVENTEEN = new MinuteType("17", "17");
public static final MinuteType EIGHTEEN = new MinuteType("18", "18");
public static final MinuteType NINETEEN = new MinuteType("19", "19");
public static final MinuteType TWENTY = new MinuteType("20", "20");
public static final MinuteType TWENTYONE = new MinuteType("21", "21");
public static final MinuteType TWNETYTWO = new MinuteType("22", "22");
public static final MinuteType TWENTYTHREE = new MinuteType("23", "23");
public static final MinuteType TWENTYFOUR = new MinuteType("24", "24");
public static final MinuteType TWENTYFIVE = new MinuteType("25", "25");
public static final MinuteType TWENTYSIX = new MinuteType("26", "26");
public static final MinuteType TWENTYSEVEN = new MinuteType("27", "27");
public static final MinuteType TWENTYEIGHT = new MinuteType("28", "28");
public static final MinuteType TWENTYNINE = new MinuteType("29", "29");
public static final MinuteType THIRTY = new MinuteType("30", "30");
public static final MinuteType THIRTYONE = new MinuteType("31", "31");
public static final MinuteType THIRTYTWO = new MinuteType("32", "32");
public static final MinuteType THIRTYTHREE = new MinuteType("33", "33");
public static final MinuteType THIRTYFOUR = new MinuteType("34", "34");
public static final MinuteType THIRTYFIVE = new MinuteType("35", "35");
public static final MinuteType THIRTYSIX = new MinuteType("36", "36");
public static final MinuteType THIRTYSEVEN = new MinuteType("37", "37");
public static final MinuteType THIRTYEIGHT = new MinuteType("38", "38");
public static final MinuteType THIRTYNINE = new MinuteType("39", "39");
public static final MinuteType FOURTY = new MinuteType("40", "40");
public static final MinuteType FOURTYONE = new MinuteType("41", "41");
public static final MinuteType FOURTYTWO = new MinuteType("42", "42");
public static final MinuteType FOURTYTHREE = new MinuteType("43", "43");
public static final MinuteType FOURTYFOUR = new MinuteType("44", "44");
public static final MinuteType FOURTYFIVE = new MinuteType("45", "45");
public static final MinuteType FOURTYSIX = new MinuteType("46", "46");
public static final MinuteType FOURTYSEVEN = new MinuteType("47", "47");
public static final MinuteType FOURTYEIGHT = new MinuteType("48", "48");
public static final MinuteType FOURTYNINE = new MinuteType("49", "49");
public static final MinuteType FIFTY = new MinuteType("50", "50");
public static final MinuteType FIFTYONE = new MinuteType("51", "51");
public static final MinuteType FIFTYTWO = new MinuteType("52", "52");
public static final MinuteType FIFTYTHREE = new MinuteType("53", "53");
public static final MinuteType FIFTYFOUR = new MinuteType("54", "54");
public static final MinuteType FIFTYFIVE = new MinuteType("55", "55");
public static final MinuteType FIFTYSIX = new MinuteType("56", "56");
public static final MinuteType FIFTYSEVEN = new MinuteType("57", "57");
public static final MinuteType FIFTYEIGHT = new MinuteType("58", "58");
public static final MinuteType FIFTYNINE = new MinuteType("59", "59");
public static MinuteType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public MinuteType() {
//do nothing
}
public MinuteType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
} else {
throw new RuntimeException("Cannot add the type: (" + type + "). It already exists as a type via " + getInstance(type).getClass().getName());
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MinuteType other = (MinuteType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_time_MinuteType.java |
1,346 | completableFuture.andThen(new ExecutionCallback() {
@Override
public void onResponse(Object response) {
reference1.set(response);
latch2.countDown();
}
@Override
public void onFailure(Throwable t) {
reference1.set(t);
latch2.countDown();
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_executor_ExecutorServiceTest.java |
1,004 | @Entity
@DiscriminatorColumn(name = "TYPE")
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_FULFILLMENT_GROUP_ITEM")
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
public class FulfillmentGroupItemImpl implements FulfillmentGroupItem, Cloneable, CurrencyCodeIdentifiable {
private static final Log LOG = LogFactory.getLog(FulfillmentGroupItemImpl.class);
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "FulfillmentGroupItemId")
@GenericGenerator(
name="FulfillmentGroupItemId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="FulfillmentGroupItemImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.order.domain.FulfillmentGroupItemImpl")
}
)
@Column(name = "FULFILLMENT_GROUP_ITEM_ID")
protected Long id;
@ManyToOne(targetEntity = FulfillmentGroupImpl.class, optional=false)
@JoinColumn(name = "FULFILLMENT_GROUP_ID")
@Index(name="FGITEM_FG_INDEX", columnNames={"FULFILLMENT_GROUP_ID"})
protected FulfillmentGroup fulfillmentGroup;
//this needs to stay OrderItem in order to provide backwards compatibility for those implementations that place a BundleOrderItem
@ManyToOne(targetEntity = OrderItemImpl.class, optional=false)
@JoinColumn(name = "ORDER_ITEM_ID")
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Order_Item", prominent = true, order = 1000, gridOrder = 1000)
@AdminPresentationToOneLookup()
protected OrderItem orderItem;
@Column(name = "QUANTITY", nullable=false)
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Quantity", prominent = true, order = 2000, gridOrder = 2000)
protected int quantity;
@Column(name = "STATUS")
@Index(name="FGITEM_STATUS_INDEX", columnNames={"STATUS"})
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Status", prominent = true, order = 3000, gridOrder = 3000)
private String status;
@OneToMany(fetch = FetchType.LAZY, targetEntity = TaxDetailImpl.class, cascade = { CascadeType.ALL }, orphanRemoval = true)
@JoinTable(name = "BLC_FG_ITEM_TAX_XREF", joinColumns = @JoinColumn(name = "FULFILLMENT_GROUP_ITEM_ID"), inverseJoinColumns = @JoinColumn(name = "TAX_DETAIL_ID"))
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
protected List<TaxDetail> taxes = new ArrayList<TaxDetail>();
@Column(name = "TOTAL_ITEM_TAX", precision=19, scale=5)
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Total_Item_Tax", order=4000, fieldType=SupportedFieldType.MONEY)
protected BigDecimal totalTax;
@Column(name = "TOTAL_ITEM_AMOUNT", precision = 19, scale = 5)
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Total_Item_Amount", order = 5000, fieldType = SupportedFieldType.MONEY)
protected BigDecimal totalItemAmount;
@Column(name = "TOTAL_ITEM_TAXABLE_AMOUNT", precision = 19, scale = 5)
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Total_Item_Taxable_Amount", order = 6000, fieldType = SupportedFieldType.MONEY)
protected BigDecimal totalItemTaxableAmount;
@Column(name = "PRORATED_ORDER_ADJ")
@AdminPresentation(friendlyName = "FulfillmentGroupItemImpl_Prorated_Adjustment", order = 7000, fieldType = SupportedFieldType.MONEY)
protected BigDecimal proratedOrderAdjustment;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public FulfillmentGroup getFulfillmentGroup() {
return fulfillmentGroup;
}
@Override
public void setFulfillmentGroup(FulfillmentGroup fulfillmentGroup) {
this.fulfillmentGroup = fulfillmentGroup;
}
@Override
public OrderItem getOrderItem() {
return orderItem;
}
@Override
public void setOrderItem(OrderItem orderItem) {
this.orderItem = orderItem;
}
@Override
public int getQuantity() {
return quantity;
}
@Override
public void setQuantity(int quantity) {
this.quantity = quantity;
}
@Override
public Money getRetailPrice() {
return orderItem.getRetailPrice();
}
@Override
public Money getSalePrice() {
return orderItem.getSalePrice();
}
@Override
public Money getPrice() {
return orderItem.getAveragePrice();
}
protected Money convertToMoney(BigDecimal amount) {
return amount == null ? null : BroadleafCurrencyUtils.getMoney(amount, orderItem.getOrder().getCurrency());
}
@Override
public Money getTotalItemAmount() {
return convertToMoney(totalItemAmount);
}
@Override
public void setTotalItemAmount(Money amount) {
totalItemAmount = amount.getAmount();
}
@Override
public Money getProratedOrderAdjustmentAmount() {
return convertToMoney(proratedOrderAdjustment);
}
@Override
public void setProratedOrderAdjustmentAmount(Money proratedOrderAdjustment) {
this.proratedOrderAdjustment = proratedOrderAdjustment.getAmount();
}
@Override
public Money getTotalItemTaxableAmount() {
return convertToMoney(totalItemTaxableAmount);
}
@Override
public void setTotalItemTaxableAmount(Money taxableAmount) {
totalItemTaxableAmount = taxableAmount.getAmount();
}
@Override
public FulfillmentGroupStatusType getStatus() {
return FulfillmentGroupStatusType.getInstance(this.status);
}
@Override
public void setStatus(FulfillmentGroupStatusType status) {
this.status = status.getType();
}
@Override
public void removeAssociations() {
if (getFulfillmentGroup() != null) {
getFulfillmentGroup().getFulfillmentGroupItems().remove(this);
}
setFulfillmentGroup(null);
setOrderItem(null);
}
@Override
public List<TaxDetail> getTaxes() {
return this.taxes;
}
@Override
public void setTaxes(List<TaxDetail> taxes) {
this.taxes = taxes;
}
@Override
public Money getTotalTax() {
return totalTax == null ? null : BroadleafCurrencyUtils.getMoney(totalTax, getFulfillmentGroup().getOrder().getCurrency());
}
@Override
public void setTotalTax(Money totalTax) {
this.totalTax = Money.toAmount(totalTax);
}
@Override
public String getCurrencyCode() {
return ((CurrencyCodeIdentifiable) fulfillmentGroup).getCurrencyCode();
}
public void checkCloneable(FulfillmentGroupItem fulfillmentGroupItem) throws CloneNotSupportedException, SecurityException, NoSuchMethodException {
Method cloneMethod = fulfillmentGroupItem.getClass().getMethod("clone", new Class[]{});
if (cloneMethod.getDeclaringClass().getName().startsWith("org.broadleafcommerce") && !orderItem.getClass().getName().startsWith("org.broadleafcommerce")) {
//subclass is not implementing the clone method
throw new CloneNotSupportedException("Custom extensions and implementations should implement clone in order to guarantee split and merge operations are performed accurately");
}
}
@Override
public FulfillmentGroupItem clone() {
//this is likely an extended class - instantiate from the fully qualified name via reflection
FulfillmentGroupItem clonedFulfillmentGroupItem;
try {
clonedFulfillmentGroupItem = (FulfillmentGroupItem) Class.forName(this.getClass().getName()).newInstance();
try {
checkCloneable(clonedFulfillmentGroupItem);
} catch (CloneNotSupportedException e) {
LOG.warn("Clone implementation missing in inheritance hierarchy outside of Broadleaf: " + clonedFulfillmentGroupItem.getClass().getName(), e);
}
clonedFulfillmentGroupItem.setFulfillmentGroup(getFulfillmentGroup());
clonedFulfillmentGroupItem.setOrderItem(getOrderItem());
clonedFulfillmentGroupItem.setQuantity(getQuantity());
clonedFulfillmentGroupItem.setTotalItemAmount(getTotalItemAmount());
clonedFulfillmentGroupItem.setTotalItemTaxableAmount(getTotalItemTaxableAmount());
if (getStatus() != null) {
clonedFulfillmentGroupItem.setStatus(getStatus());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return clonedFulfillmentGroupItem;
}
@Override
public boolean getHasProratedOrderAdjustments() {
if (proratedOrderAdjustment != null) {
return (proratedOrderAdjustment.compareTo(BigDecimal.ZERO) == 0);
}
return false;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FulfillmentGroupItemImpl other = (FulfillmentGroupItemImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (orderItem == null) {
if (other.orderItem != null) {
return false;
}
} else if (!orderItem.equals(other.orderItem)) {
return false;
}
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((orderItem == null) ? 0 : orderItem.hashCode());
return result;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_FulfillmentGroupItemImpl.java |
765 | public abstract class ExtensionManager<T extends ExtensionHandler> implements InvocationHandler {
protected boolean handlersSorted = false;
protected static String LOCK_OBJECT = new String("EM_LOCK");
protected T extensionHandler;
protected List<T> handlers = new ArrayList<T>();
/**
* Should take in a className that matches the ExtensionHandler interface being managed.
* @param className
*/
@SuppressWarnings("unchecked")
public ExtensionManager(Class<T> _clazz) {
extensionHandler = (T) Proxy.newProxyInstance(_clazz.getClassLoader(),
new Class[] { _clazz },
this);
}
public T getProxy() {
return extensionHandler;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
public List<T> getHandlers() {
if (!handlersSorted) {
synchronized (LOCK_OBJECT) {
if (!handlersSorted) {
Comparator fieldCompare = new BeanComparator("priority");
Collections.sort(handlers, fieldCompare);
handlersSorted = true;
}
}
}
return handlers;
}
public void setHandlers(List<T> handlers) {
this.handlers = handlers;
}
/**
* Utility method that is useful for determining whether or not an ExtensionManager implementation
* should continue after processing a ExtensionHandler call.
*
* By default, returns true for CONTINUE
*
* @return
*/
public boolean shouldContinue(ExtensionResultStatusType result, ExtensionHandler handler,
Method method, Object[] args) {
if (result != null) {
if (ExtensionResultStatusType.HANDLED_STOP.equals(result)) {
return false;
}
if (ExtensionResultStatusType.HANDLED.equals(result) && ! continueOnHandled()) {
return false;
}
}
return true;
}
/**
* Returns whether or not this extension manager continues on {@link ExtensionResultStatusType}.HANDLED.
*
* @return
*/
public boolean continueOnHandled() {
return false;
}
/**
* {@link ExtensionManager}s don't really need a priority but they pick up this property due to the
* fact that we want them to implement the same interface <T> as the handlers they are managing.
*
* @return
*/
public int getPriority() {
throw new UnsupportedOperationException();
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
boolean notHandled = true;
for (ExtensionHandler handler : getHandlers()) {
try {
ExtensionResultStatusType result = (ExtensionResultStatusType) method.invoke(handler, args);
if (!ExtensionResultStatusType.NOT_HANDLED.equals(result)) {
notHandled = false;
}
if (!shouldContinue(result, handler, method, args)) {
break;
}
} catch (InvocationTargetException e) {
throw e.getCause();
}
}
if (notHandled) {
return ExtensionResultStatusType.NOT_HANDLED;
} else {
return ExtensionResultStatusType.HANDLED;
}
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_extension_ExtensionManager.java |
1,644 | @Component("blMapFieldMetadataProvider")
@Scope("prototype")
public class MapFieldMetadataProvider extends AdvancedCollectionFieldMetadataProvider {
private static final Log LOG = LogFactory.getLog(MapFieldMetadataProvider.class);
protected boolean canHandleFieldForConfiguredMetadata(AddMetadataRequest addMetadataRequest, Map<String, FieldMetadata> metadata) {
AdminPresentationMap annot = addMetadataRequest.getRequestedField().getAnnotation(AdminPresentationMap.class);
return annot != null;
}
protected boolean canHandleAnnotationOverride(OverrideViaAnnotationRequest overrideViaAnnotationRequest, Map<String, FieldMetadata> metadata) {
AdminPresentationOverrides myOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationOverrides.class);
AdminPresentationMergeOverrides myMergeOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationMergeOverrides.class);
return (myOverrides != null && !ArrayUtils.isEmpty(myOverrides.maps())) || myMergeOverrides != null;
}
@Override
public FieldProviderResponse addMetadata(AddMetadataRequest addMetadataRequest, Map<String, FieldMetadata> metadata) {
if (!canHandleFieldForConfiguredMetadata(addMetadataRequest, metadata)) {
return FieldProviderResponse.NOT_HANDLED;
}
AdminPresentationMap annot = addMetadataRequest.getRequestedField().getAnnotation(AdminPresentationMap.class);
FieldInfo info = buildFieldInfo(addMetadataRequest.getRequestedField());
FieldMetadataOverride override = constructMapMetadataOverride(annot);
buildMapMetadata(addMetadataRequest.getParentClass(), addMetadataRequest.getTargetClass(),
metadata, info, override, addMetadataRequest.getDynamicEntityDao(), addMetadataRequest.getPrefix());
setClassOwnership(addMetadataRequest.getParentClass(), addMetadataRequest.getTargetClass(), metadata, info);
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse overrideViaAnnotation(OverrideViaAnnotationRequest overrideViaAnnotationRequest, Map<String, FieldMetadata> metadata) {
if (!canHandleAnnotationOverride(overrideViaAnnotationRequest, metadata)) {
return FieldProviderResponse.NOT_HANDLED;
}
Map<String, AdminPresentationMapOverride> presentationMapOverrides = new HashMap<String, AdminPresentationMapOverride>();
AdminPresentationOverrides myOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationOverrides.class);
if (myOverrides != null) {
for (AdminPresentationMapOverride myOverride : myOverrides.maps()) {
presentationMapOverrides.put(myOverride.name(), myOverride);
}
}
for (String propertyName : presentationMapOverrides.keySet()) {
for (String key : metadata.keySet()) {
if (key.startsWith(propertyName)) {
buildAdminPresentationMapOverride(overrideViaAnnotationRequest.getPrefix(), overrideViaAnnotationRequest.getParentExcluded(), metadata, presentationMapOverrides,
propertyName, key, overrideViaAnnotationRequest.getDynamicEntityDao());
}
}
}
AdminPresentationMergeOverrides myMergeOverrides = overrideViaAnnotationRequest.getRequestedEntity().getAnnotation(AdminPresentationMergeOverrides.class);
if (myMergeOverrides != null) {
for (AdminPresentationMergeOverride override : myMergeOverrides.value()) {
String propertyName = override.name();
Map<String, FieldMetadata> loopMap = new HashMap<String, FieldMetadata>();
loopMap.putAll(metadata);
for (Map.Entry<String, FieldMetadata> entry : loopMap.entrySet()) {
if (entry.getKey().startsWith(propertyName) || StringUtils.isEmpty(propertyName)) {
FieldMetadata targetMetadata = entry.getValue();
if (targetMetadata instanceof MapMetadata) {
MapMetadata serverMetadata = (MapMetadata) targetMetadata;
if (serverMetadata.getTargetClass() != null) {
try {
Class<?> targetClass = Class.forName(serverMetadata.getTargetClass());
Class<?> parentClass = null;
if (serverMetadata.getOwningClass() != null) {
parentClass = Class.forName(serverMetadata.getOwningClass());
}
String fieldName = serverMetadata.getFieldName();
Field field = overrideViaAnnotationRequest.getDynamicEntityDao().getFieldManager()
.getField(targetClass, fieldName);
Map<String, FieldMetadata> temp = new HashMap<String, FieldMetadata>(1);
temp.put(field.getName(), serverMetadata);
FieldInfo info = buildFieldInfo(field);
FieldMetadataOverride fieldMetadataOverride = overrideMapMergeMetadata(override);
if (serverMetadata.getExcluded() != null && serverMetadata.getExcluded() &&
(fieldMetadataOverride.getExcluded() == null || fieldMetadataOverride.getExcluded())) {
continue;
}
buildMapMetadata(parentClass, targetClass, temp, info, fieldMetadataOverride,
overrideViaAnnotationRequest.getDynamicEntityDao(), serverMetadata.getPrefix());
serverMetadata = (MapMetadata) temp.get(field.getName());
metadata.put(entry.getKey(), serverMetadata);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
}
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse overrideViaXml(OverrideViaXmlRequest overrideViaXmlRequest, Map<String, FieldMetadata> metadata) {
Map<String, FieldMetadataOverride> overrides = getTargetedOverride(overrideViaXmlRequest.getDynamicEntityDao(), overrideViaXmlRequest.getRequestedConfigKey(), overrideViaXmlRequest.getRequestedCeilingEntity());
if (overrides != null) {
for (String propertyName : overrides.keySet()) {
final FieldMetadataOverride localMetadata = overrides.get(propertyName);
for (String key : metadata.keySet()) {
if (key.equals(propertyName)) {
try {
if (metadata.get(key) instanceof MapMetadata) {
MapMetadata serverMetadata = (MapMetadata) metadata.get(key);
if (serverMetadata.getTargetClass() != null) {
Class<?> targetClass = Class.forName(serverMetadata.getTargetClass());
Class<?> parentClass = null;
if (serverMetadata.getOwningClass() != null) {
parentClass = Class.forName(serverMetadata.getOwningClass());
}
String fieldName = serverMetadata.getFieldName();
Field field = overrideViaXmlRequest.getDynamicEntityDao().getFieldManager().getField(targetClass, fieldName);
Map<String, FieldMetadata> temp = new HashMap<String, FieldMetadata>(1);
temp.put(field.getName(), serverMetadata);
FieldInfo info = buildFieldInfo(field);
buildMapMetadata(parentClass, targetClass, temp, info, localMetadata, overrideViaXmlRequest.getDynamicEntityDao(), serverMetadata.getPrefix());
serverMetadata = (MapMetadata) temp.get(field.getName());
metadata.put(key, serverMetadata);
if (overrideViaXmlRequest.getParentExcluded()) {
if (LOG.isDebugEnabled()) {
LOG.debug("applyMapMetadataOverrides:Excluding " + key + "because parent is marked as excluded.");
}
serverMetadata.setExcluded(true);
}
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
return FieldProviderResponse.HANDLED;
}
@Override
public FieldProviderResponse addMetadataFromFieldType(AddMetadataFromFieldTypeRequest addMetadataFromFieldTypeRequest, Map<String, FieldMetadata> metadata) {
if (!canHandleFieldForTypeMetadata(addMetadataFromFieldTypeRequest, metadata)) {
return FieldProviderResponse.NOT_HANDLED;
}
//do nothing but add the property without manipulation
metadata.put(addMetadataFromFieldTypeRequest.getRequestedPropertyName(), addMetadataFromFieldTypeRequest.getPresentationAttribute());
return FieldProviderResponse.HANDLED;
}
protected void buildAdminPresentationMapOverride(String prefix, Boolean isParentExcluded, Map<String, FieldMetadata> mergedProperties,
Map<String, AdminPresentationMapOverride> presentationMapOverrides, String propertyName, String key, DynamicEntityDao dynamicEntityDao) {
AdminPresentationMapOverride override = presentationMapOverrides.get(propertyName);
if (override != null) {
AdminPresentationMap annot = override.value();
if (annot != null) {
String testKey = prefix + key;
if ((testKey.startsWith(propertyName + ".") || testKey.equals(propertyName)) && annot.excluded()) {
FieldMetadata metadata = mergedProperties.get(key);
if (LOG.isDebugEnabled()) {
LOG.debug("buildAdminPresentationMapOverride:Excluding " + key + "because an override annotation declared " + testKey + "to be excluded");
}
metadata.setExcluded(true);
return;
}
if ((testKey.startsWith(propertyName + ".") || testKey.equals(propertyName)) && !annot.excluded()) {
FieldMetadata metadata = mergedProperties.get(key);
if (!isParentExcluded) {
if (LOG.isDebugEnabled()) {
LOG.debug("buildAdminPresentationMapOverride:Showing " + key + "because an override annotation declared " + testKey + " to not be excluded");
}
metadata.setExcluded(false);
}
}
if (!(mergedProperties.get(key) instanceof MapMetadata)) {
return;
}
MapMetadata serverMetadata = (MapMetadata) mergedProperties.get(key);
if (serverMetadata.getTargetClass() != null) {
try {
Class<?> targetClass = Class.forName(serverMetadata.getTargetClass());
Class<?> parentClass = null;
if (serverMetadata.getOwningClass() != null) {
parentClass = Class.forName(serverMetadata.getOwningClass());
}
String fieldName = serverMetadata.getFieldName();
Field field = dynamicEntityDao.getFieldManager().getField(targetClass, fieldName);
FieldMetadataOverride localMetadata = constructMapMetadataOverride(annot);
//do not include the previous metadata - we want to construct a fresh metadata from the override annotation
Map<String, FieldMetadata> temp = new HashMap<String, FieldMetadata>(1);
FieldInfo info = buildFieldInfo(field);
buildMapMetadata(parentClass, targetClass, temp, info, localMetadata, dynamicEntityDao, serverMetadata.getPrefix());
MapMetadata result = (MapMetadata) temp.get(field.getName());
result.setInheritedFromType(serverMetadata.getInheritedFromType());
result.setAvailableToTypes(serverMetadata.getAvailableToTypes());
mergedProperties.put(key, result);
if (isParentExcluded) {
if (LOG.isDebugEnabled()) {
LOG.debug("buildAdminPresentationMapOverride:Excluding " + key + "because the parent was excluded");
}
serverMetadata.setExcluded(true);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
}
protected FieldMetadataOverride overrideMapMergeMetadata(AdminPresentationMergeOverride merge) {
FieldMetadataOverride fieldMetadataOverride = new FieldMetadataOverride();
Map<String, AdminPresentationMergeEntry> overrideValues = getAdminPresentationEntries(merge.mergeEntries());
for (Map.Entry<String, AdminPresentationMergeEntry> entry : overrideValues.entrySet()) {
String stringValue = entry.getValue().overrideValue();
if (entry.getKey().equals(PropertyType.AdminPresentationMap.CURRENCYCODEFIELD)) {
fieldMetadataOverride.setCurrencyCodeField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.CUSTOMCRITERIA)) {
fieldMetadataOverride.setCustomCriteria(entry.getValue().stringArrayOverrideValue());
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.DELETEENTITYUPONREMOVE)) {
fieldMetadataOverride.setDeleteEntityUponRemove(StringUtils.isEmpty(stringValue) ? entry.getValue()
.booleanOverrideValue() : Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.EXCLUDED)) {
fieldMetadataOverride.setExcluded(StringUtils.isEmpty(stringValue) ? entry.getValue()
.booleanOverrideValue() : Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.FORCEFREEFORMKEYS)) {
fieldMetadataOverride.setForceFreeFormKeys(StringUtils.isEmpty(stringValue) ? entry.getValue()
.booleanOverrideValue() : Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.FRIENDLYNAME)) {
fieldMetadataOverride.setFriendlyName(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.ISSIMPLEVALUE)) {
fieldMetadataOverride.setSimpleValue(UnspecifiedBooleanType.valueOf(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.KEYCLASS)) {
fieldMetadataOverride.setKeyClass(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.KEYPROPERTYFRIENDLYNAME)) {
fieldMetadataOverride.setKeyPropertyFriendlyName(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.MAPKEYVALUEPROPERTY)) {
fieldMetadataOverride.setMapKeyValueProperty(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.KEYS)) {
if (!ArrayUtils.isEmpty(entry.getValue().keys())) {
String[][] keys = new String[entry.getValue().keys().length][2];
for (int j=0;j<keys.length;j++){
keys[j][0] = entry.getValue().keys()[j].keyName();
keys[j][1] = entry.getValue().keys()[j].friendlyKeyName();
}
fieldMetadataOverride.setKeys(keys);
}
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.MANYTOFIELD)) {
fieldMetadataOverride.setManyToField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.MAPKEYOPTIONENTITYCLASS)) {
fieldMetadataOverride.setMapKeyOptionEntityClass(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.MAPKEYOPTIONENTITYDISPLAYFIELD)) {
fieldMetadataOverride.setMapKeyOptionEntityDisplayField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.MAPKEYOPTIONENTITYVALUEFIELD)) {
fieldMetadataOverride.setMapKeyOptionEntityValueField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.MEDIAFIELD)) {
fieldMetadataOverride.setMediaField(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.OPERATIONTYPES)) {
AdminPresentationOperationTypes operationType = entry.getValue().operationTypes();
fieldMetadataOverride.setAddType(operationType.addType());
fieldMetadataOverride.setRemoveType(operationType.removeType());
fieldMetadataOverride.setUpdateType(operationType.updateType());
fieldMetadataOverride.setFetchType(operationType.fetchType());
fieldMetadataOverride.setInspectType(operationType.inspectType());
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.ORDER)) {
fieldMetadataOverride.setOrder(StringUtils.isEmpty(stringValue) ? entry.getValue().intOverrideValue() :
Integer.parseInt(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.READONLY)) {
fieldMetadataOverride.setReadOnly(StringUtils.isEmpty(stringValue) ? entry.getValue()
.booleanOverrideValue() : Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.SECURITYLEVEL)) {
fieldMetadataOverride.setSecurityLevel(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.SHOWIFPROPERTY)) {
fieldMetadataOverride.setShowIfProperty(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.TAB)) {
fieldMetadataOverride.setTab(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.TABORDER)) {
fieldMetadataOverride.setTabOrder(StringUtils.isEmpty(stringValue) ? entry.getValue()
.intOverrideValue() : Integer.parseInt(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.USESERVERSIDEINSPECTIONCACHE)) {
fieldMetadataOverride.setUseServerSideInspectionCache(StringUtils.isEmpty(stringValue) ? entry
.getValue().booleanOverrideValue() : Boolean.parseBoolean(stringValue));
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.VALUECLASS)) {
fieldMetadataOverride.setValueClass(stringValue);
} else if (entry.getKey().equals(PropertyType.AdminPresentationMap.VALUEPROPERTYFRIENDLYNAME)) {
fieldMetadataOverride.setValuePropertyFriendlyName(stringValue);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Unrecognized type: " + entry.getKey() + ". Not setting on map field.");
}
}
}
return fieldMetadataOverride;
}
protected FieldMetadataOverride constructMapMetadataOverride(AdminPresentationMap map) {
if (map != null) {
FieldMetadataOverride override = new FieldMetadataOverride();
override.setDeleteEntityUponRemove(map.deleteEntityUponRemove());
override.setKeyClass(map.keyClass().getName());
override.setMapKeyValueProperty(map.mapKeyValueProperty());
override.setKeyPropertyFriendlyName(map.keyPropertyFriendlyName());
if (!ArrayUtils.isEmpty(map.keys())) {
String[][] keys = new String[map.keys().length][2];
for (int j=0;j<keys.length;j++){
keys[j][0] = map.keys()[j].keyName();
keys[j][1] = map.keys()[j].friendlyKeyName();
}
override.setKeys(keys);
}
override.setMapKeyOptionEntityClass(map.mapKeyOptionEntityClass().getName());
override.setMapKeyOptionEntityDisplayField(map.mapKeyOptionEntityDisplayField());
override.setMapKeyOptionEntityValueField(map.mapKeyOptionEntityValueField());
override.setMediaField(map.mediaField());
override.setSimpleValue(map.isSimpleValue());
override.setValueClass(map.valueClass().getName());
override.setValuePropertyFriendlyName(map.valuePropertyFriendlyName());
override.setCustomCriteria(map.customCriteria());
override.setUseServerSideInspectionCache(map.useServerSideInspectionCache());
override.setExcluded(map.excluded());
override.setFriendlyName(map.friendlyName());
override.setReadOnly(map.readOnly());
override.setOrder(map.order());
override.setTab(map.tab());
override.setTabOrder(map.tabOrder());
override.setSecurityLevel(map.securityLevel());
override.setAddType(map.operationTypes().addType());
override.setFetchType(map.operationTypes().fetchType());
override.setRemoveType(map.operationTypes().removeType());
override.setUpdateType(map.operationTypes().updateType());
override.setInspectType(map.operationTypes().inspectType());
override.setShowIfProperty(map.showIfProperty());
override.setCurrencyCodeField(map.currencyCodeField());
override.setForceFreeFormKeys(map.forceFreeFormKeys());
override.setManyToField(map.manyToField());
return override;
}
throw new IllegalArgumentException("AdminPresentationMap annotation not found on field");
}
protected void buildMapMetadata(Class<?> parentClass, Class<?> targetClass, Map<String, FieldMetadata> attributes,
FieldInfo field, FieldMetadataOverride map, DynamicEntityDao dynamicEntityDao, String prefix) {
MapMetadata serverMetadata = (MapMetadata) attributes.get(field.getName());
Class<?> resolvedClass = parentClass==null?targetClass:parentClass;
MapMetadata metadata;
if (serverMetadata != null) {
metadata = serverMetadata;
} else {
metadata = new MapMetadata();
}
if (map.getReadOnly() != null) {
metadata.setMutable(!map.getReadOnly());
}
if (map.getShowIfProperty()!=null) {
metadata.setShowIfProperty(map.getShowIfProperty());
}
metadata.setPrefix(prefix);
metadata.setTargetClass(targetClass.getName());
metadata.setFieldName(field.getName());
org.broadleafcommerce.openadmin.dto.OperationTypes dtoOperationTypes = new org.broadleafcommerce.openadmin.dto.OperationTypes(OperationType.MAP, OperationType.MAP, OperationType.MAP, OperationType.MAP, OperationType.MAP);
if (map.getAddType() != null) {
dtoOperationTypes.setAddType(map.getAddType());
}
if (map.getRemoveType() != null) {
dtoOperationTypes.setRemoveType(map.getRemoveType());
}
if (map.getFetchType() != null) {
dtoOperationTypes.setFetchType(map.getFetchType());
}
if (map.getInspectType() != null) {
dtoOperationTypes.setInspectType(map.getInspectType());
}
if (map.getUpdateType() != null) {
dtoOperationTypes.setUpdateType(map.getUpdateType());
}
//don't allow additional non-persistent properties or additional foreign keys for an advanced collection datasource - they don't make sense in this context
PersistencePerspective persistencePerspective;
if (serverMetadata != null) {
persistencePerspective = metadata.getPersistencePerspective();
persistencePerspective.setOperationTypes(dtoOperationTypes);
} else {
persistencePerspective = new PersistencePerspective(dtoOperationTypes, new String[]{}, new ForeignKey[]{});
metadata.setPersistencePerspective(persistencePerspective);
}
String parentObjectClass = resolvedClass.getName();
Map idMetadata;
if(parentClass!=null) {
idMetadata=dynamicEntityDao.getIdMetadata(parentClass);
} else {
idMetadata=dynamicEntityDao.getIdMetadata(targetClass);
}
String parentObjectIdField = (String) idMetadata.get("name");
String keyClassName = null;
if (serverMetadata != null) {
keyClassName = ((MapStructure) metadata.getPersistencePerspective().getPersistencePerspectiveItems().get
(PersistencePerspectiveItemType.MAPSTRUCTURE)).getKeyClassName();
}
if (map.getKeyClass() != null && !void.class.getName().equals(map.getKeyClass())) {
keyClassName = map.getKeyClass();
}
if (keyClassName == null) {
java.lang.reflect.Type type = field.getGenericType();
if (type instanceof ParameterizedType) {
ParameterizedType pType = (ParameterizedType) type;
Class<?> clazz = (Class<?>) pType.getActualTypeArguments()[0];
if (!ArrayUtils.isEmpty(dynamicEntityDao.getAllPolymorphicEntitiesFromCeiling(clazz))) {
throw new IllegalArgumentException("Key class for AdminPresentationMap was determined to be a JPA managed type. Only primitive types for the key type are currently supported.");
}
keyClassName = clazz.getName();
}
}
if (keyClassName == null) {
keyClassName = String.class.getName();
}
String keyPropertyName = "key";
String mapKeyValueProperty = "";
if (StringUtils.isNotBlank(field.getMapKey())) {
mapKeyValueProperty = field.getMapKey();
}
if (StringUtils.isNotBlank(map.getMapKeyValueProperty())) {
mapKeyValueProperty = map.getMapKeyValueProperty();
}
String keyPropertyFriendlyName = null;
if (serverMetadata != null) {
keyPropertyFriendlyName = ((MapStructure) serverMetadata.getPersistencePerspective().getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE)).getKeyPropertyFriendlyName();
}
if (map.getKeyPropertyFriendlyName() != null) {
keyPropertyFriendlyName = map.getKeyPropertyFriendlyName();
}
Boolean deleteEntityUponRemove = null;
if (serverMetadata != null) {
deleteEntityUponRemove = ((MapStructure) serverMetadata.getPersistencePerspective().getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE)).getDeleteValueEntity();
}
if (map.isDeleteEntityUponRemove() != null) {
deleteEntityUponRemove = map.isDeleteEntityUponRemove();
}
String valuePropertyName = "value";
String valuePropertyFriendlyName = null;
if (serverMetadata != null) {
MapStructure structure = (MapStructure) serverMetadata.getPersistencePerspective().getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE);
if (structure instanceof SimpleValueMapStructure) {
valuePropertyFriendlyName = ((SimpleValueMapStructure) structure).getValuePropertyFriendlyName();
} else {
valuePropertyFriendlyName = "";
}
}
if (map.getValuePropertyFriendlyName()!=null) {
valuePropertyFriendlyName = map.getValuePropertyFriendlyName();
}
if (map.getMediaField() != null) {
metadata.setMediaField(map.getMediaField());
}
if (map.getValueClass() != null && !void.class.getName().equals(map.getValueClass())) {
metadata.setValueClassName(map.getValueClass());
}
if (metadata.getValueClassName() == null) {
java.lang.reflect.Type type = field.getGenericType();
if (type instanceof ParameterizedType) {
ParameterizedType pType = (ParameterizedType) type;
Class<?> clazz = (Class<?>) pType.getActualTypeArguments()[1];
Class<?>[] entities = dynamicEntityDao.getAllPolymorphicEntitiesFromCeiling(clazz);
if (!ArrayUtils.isEmpty(entities)) {
metadata.setValueClassName(entities[entities.length-1].getName());
}
}
}
if (metadata.getValueClassName() == null) {
if (!StringUtils.isEmpty(field.getManyToManyTargetEntity())) {
metadata.setValueClassName(field.getManyToManyTargetEntity());
}
}
if (metadata.getValueClassName() == null) {
metadata.setValueClassName(String.class.getName());
}
Boolean simpleValue = null;
if (map.getSimpleValue()!= null && map.getSimpleValue()!= UnspecifiedBooleanType.UNSPECIFIED) {
simpleValue = map.getSimpleValue()==UnspecifiedBooleanType.TRUE;
}
if (simpleValue==null) {
java.lang.reflect.Type type = field.getGenericType();
if (type instanceof ParameterizedType) {
ParameterizedType pType = (ParameterizedType) type;
Class<?> clazz = (Class<?>) pType.getActualTypeArguments()[1];
Class<?>[] entities = dynamicEntityDao.getAllPolymorphicEntitiesFromCeiling(clazz);
simpleValue = ArrayUtils.isEmpty(entities);
}
}
if (simpleValue==null) {
//ManyToMany manyToMany = field.getAnnotation(ManyToMany.class);
if (!StringUtils.isEmpty(field.getManyToManyTargetEntity())) {
simpleValue = false;
}
}
if (simpleValue == null) {
throw new IllegalArgumentException("Unable to infer if the value for the map is of a complex or simple type based on any parameterized type or ManyToMany annotation. Please explicitly set the isSimpleValue property.");
}
metadata.setSimpleValue(simpleValue);
if (map.getKeys() != null) {
metadata.setKeys(map.getKeys());
}
if (map.getMapKeyValueProperty() != null) {
metadata.setMapKeyValueProperty(map.getMapKeyValueProperty());
}
if (map.getMapKeyOptionEntityClass()!=null) {
if (!void.class.getName().equals(map.getMapKeyOptionEntityClass())) {
metadata.setMapKeyOptionEntityClass(map.getMapKeyOptionEntityClass());
} else {
metadata.setMapKeyOptionEntityClass("");
}
}
if (map.getMapKeyOptionEntityDisplayField() != null) {
metadata.setMapKeyOptionEntityDisplayField(map.getMapKeyOptionEntityDisplayField());
}
if (map.getMapKeyOptionEntityValueField()!=null) {
metadata.setMapKeyOptionEntityValueField(map.getMapKeyOptionEntityValueField());
}
if (map.getForceFreeFormKeys() != null) {
if (!map.getForceFreeFormKeys() && ArrayUtils.isEmpty(metadata.getKeys()) && (StringUtils.isEmpty(metadata.getMapKeyOptionEntityClass()) || StringUtils.isEmpty(metadata.getMapKeyOptionEntityValueField()) || StringUtils.isEmpty(metadata.getMapKeyOptionEntityDisplayField()))) {
throw new IllegalArgumentException("Could not ascertain method for generating key options for the annotated map ("+field.getName()+"). Must specify either an array of AdminPresentationMapKey values for the keys property, or utilize the mapOptionKeyClass, mapOptionKeyDisplayField and mapOptionKeyValueField properties. If you wish to allow free form entry for key values, then set forceFreeFormKeys on AdminPresentationMap.");
}
}
MapStructure mapStructure;
if (serverMetadata != null) {
ForeignKey foreignKey = (ForeignKey) persistencePerspective.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.FOREIGNKEY);
foreignKey.setManyToField(parentObjectIdField);
foreignKey.setForeignKeyClass(parentObjectClass);
if (metadata.isSimpleValue()) {
mapStructure = (SimpleValueMapStructure) persistencePerspective.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE);
mapStructure.setKeyClassName(keyClassName);
mapStructure.setKeyPropertyName(keyPropertyName);
mapStructure.setKeyPropertyFriendlyName(keyPropertyFriendlyName);
mapStructure.setValueClassName(metadata.getValueClassName());
((SimpleValueMapStructure) mapStructure).setValuePropertyName(valuePropertyName);
((SimpleValueMapStructure) mapStructure).setValuePropertyFriendlyName(valuePropertyFriendlyName);
mapStructure.setMapProperty(prefix + field.getName());
mapStructure.setMutable(metadata.isMutable());
} else {
mapStructure = (MapStructure) persistencePerspective.getPersistencePerspectiveItems().get(PersistencePerspectiveItemType.MAPSTRUCTURE);
mapStructure.setKeyClassName(keyClassName);
mapStructure.setKeyPropertyName(keyPropertyName);
mapStructure.setKeyPropertyFriendlyName(keyPropertyFriendlyName);
mapStructure.setValueClassName(metadata.getValueClassName());
mapStructure.setMapProperty(prefix + field.getName());
mapStructure.setDeleteValueEntity(deleteEntityUponRemove);
mapStructure.setMutable(metadata.isMutable());
}
} else {
ForeignKey foreignKey = new ForeignKey(parentObjectIdField, parentObjectClass);
persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.FOREIGNKEY, foreignKey);
if (metadata.isSimpleValue()) {
mapStructure = new SimpleValueMapStructure(keyClassName, keyPropertyName, keyPropertyFriendlyName, metadata.getValueClassName(), valuePropertyName, valuePropertyFriendlyName, prefix + field.getName(), mapKeyValueProperty);
mapStructure.setMutable(metadata.isMutable());
} else {
mapStructure = new MapStructure(keyClassName, keyPropertyName, keyPropertyFriendlyName, metadata.getValueClassName(), prefix + field.getName(), deleteEntityUponRemove, mapKeyValueProperty);
mapStructure.setMutable(metadata.isMutable());
}
persistencePerspective.addPersistencePerspectiveItem(PersistencePerspectiveItemType.MAPSTRUCTURE, mapStructure);
}
if (!StringUtils.isEmpty(map.getManyToField())) {
mapStructure.setManyToField(map.getManyToField());
}
if (mapStructure.getManyToField() == null) {
//try to infer the value
if (field.getManyToManyMappedBy() != null) {
mapStructure.setManyToField(field.getManyToManyMappedBy());
}
}
if (mapStructure.getManyToField() == null) {
//try to infer the value
if (field.getOneToManyMappedBy() != null) {
mapStructure.setManyToField(field.getOneToManyMappedBy());
}
}
if (map.getExcluded() != null) {
if (LOG.isDebugEnabled()) {
if (map.getExcluded()) {
LOG.debug("buildMapMetadata:Excluding " + field.getName() + " because it was explicitly declared in config");
} else {
LOG.debug("buildMapMetadata:Showing " + field.getName() + " because it was explicitly declared in config");
}
}
metadata.setExcluded(map.getExcluded());
}
if (map.getFriendlyName() != null) {
metadata.setFriendlyName(map.getFriendlyName());
}
if (map.getSecurityLevel() != null) {
metadata.setSecurityLevel(map.getSecurityLevel());
}
if (map.getOrder() != null) {
metadata.setOrder(map.getOrder());
}
if (map.getTab() != null) {
metadata.setTab(map.getTab());
}
if (map.getTabOrder() != null) {
metadata.setTabOrder(map.getTabOrder());
}
if (map.getCustomCriteria() != null) {
metadata.setCustomCriteria(map.getCustomCriteria());
}
if (map.getUseServerSideInspectionCache() != null) {
persistencePerspective.setUseServerSideInspectionCache(map.getUseServerSideInspectionCache());
}
if (map.getCurrencyCodeField()!=null) {
metadata.setCurrencyCodeField(map.getCurrencyCodeField());
}
if (map.getForceFreeFormKeys()!=null) {
metadata.setForceFreeFormKeys(map.getForceFreeFormKeys());
}
attributes.put(field.getName(), metadata);
}
@Override
public int getOrder() {
return FieldMetadataProvider.MAP;
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_dao_provider_metadata_MapFieldMetadataProvider.java |
945 | public abstract class MasterNodeOperationRequestBuilder<Request extends MasterNodeOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends MasterNodeOperationRequestBuilder<Request, Response, RequestBuilder>>
extends ActionRequestBuilder<Request, Response, RequestBuilder> {
protected MasterNodeOperationRequestBuilder(InternalGenericClient client, Request request) {
super(client, request);
}
/**
* Sets the master node timeout in case the master has not yet been discovered.
*/
@SuppressWarnings("unchecked")
public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) {
request.masterNodeTimeout(timeout);
return (RequestBuilder) this;
}
/**
* Sets the master node timeout in case the master has not yet been discovered.
*/
@SuppressWarnings("unchecked")
public final RequestBuilder setMasterNodeTimeout(String timeout) {
request.masterNodeTimeout(timeout);
return (RequestBuilder) this;
}
} | 0true
| src_main_java_org_elasticsearch_action_support_master_MasterNodeOperationRequestBuilder.java |
1,696 | public class BytesArray implements BytesReference {
public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0);
private byte[] bytes;
private int offset;
private int length;
public BytesArray(String bytes) {
BytesRef bytesRef = new BytesRef();
UnicodeUtil.UTF16toUTF8(bytes, 0, bytes.length(), bytesRef);
this.bytes = bytesRef.bytes;
this.offset = bytesRef.offset;
this.length = bytesRef.length;
}
public BytesArray(BytesRef bytesRef) {
this(bytesRef, false);
}
public BytesArray(BytesRef bytesRef, boolean deepCopy) {
if (deepCopy) {
BytesRef copy = BytesRef.deepCopyOf(bytesRef);
bytes = copy.bytes;
offset = copy.offset;
length = copy.length;
} else {
bytes = bytesRef.bytes;
offset = bytesRef.offset;
length = bytesRef.length;
}
}
public BytesArray(byte[] bytes) {
this.bytes = bytes;
this.offset = 0;
this.length = bytes.length;
}
public BytesArray(byte[] bytes, int offset, int length) {
this.bytes = bytes;
this.offset = offset;
this.length = length;
}
@Override
public byte get(int index) {
return bytes[offset + index];
}
@Override
public int length() {
return length;
}
@Override
public BytesReference slice(int from, int length) {
if (from < 0 || (from + length) > this.length) {
throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]");
}
return new BytesArray(bytes, offset + from, length);
}
@Override
public StreamInput streamInput() {
return new BytesStreamInput(bytes, offset, length, false);
}
@Override
public void writeTo(OutputStream os) throws IOException {
os.write(bytes, offset, length);
}
@Override
public byte[] toBytes() {
if (offset == 0 && bytes.length == length) {
return bytes;
}
return Arrays.copyOfRange(bytes, offset, offset + length);
}
@Override
public BytesArray toBytesArray() {
return this;
}
@Override
public BytesArray copyBytesArray() {
return new BytesArray(Arrays.copyOfRange(bytes, offset, offset + length));
}
@Override
public ChannelBuffer toChannelBuffer() {
return ChannelBuffers.wrappedBuffer(bytes, offset, length);
}
@Override
public boolean hasArray() {
return true;
}
@Override
public byte[] array() {
return bytes;
}
@Override
public int arrayOffset() {
return offset;
}
@Override
public String toUtf8() {
if (length == 0) {
return "";
}
return new String(bytes, offset, length, Charsets.UTF_8);
}
@Override
public BytesRef toBytesRef() {
return new BytesRef(bytes, offset, length);
}
@Override
public BytesRef copyBytesRef() {
return new BytesRef(Arrays.copyOfRange(bytes, offset, offset + length));
}
@Override
public int hashCode() {
return Helper.bytesHashCode(this);
}
@Override
public boolean equals(Object obj) {
return Helper.bytesEqual(this, (BytesReference) obj);
}
} | 1no label
| src_main_java_org_elasticsearch_common_bytes_BytesArray.java |
1,503 | public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class);
@Test
public void testAlways() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
// assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
}
@Test
public void testClusterPrimariesActive1() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
}
@Test
public void testClusterPrimariesActive2() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
@Test
public void testClusterAllActive1() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("start the test2 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node3").size(), equalTo(1));
assertThat(routingNodes.node("node3").get(0).shardId().index().name(), anyOf(equalTo("test1"), equalTo("test2")));
}
@Test
public void testClusterAllActive2() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
@Test
public void testClusterAllActive3() {
AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.allow_rebalance", ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test1"))
.addAsNew(metaData.index("test2"))
.build();
ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test1, replicas will start initializing");
RoutingNodes routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start the test1 replica shards");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards for test2, replicas will start initializing");
routingNodes = clusterState.routingNodes();
prevRoutingTable = routingTable;
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
}
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.routingNodes();
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_ClusterRebalanceRoutingTests.java |
1,860 | boolean b = h1.executeTransaction(opts, new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
final TransactionalMap<String, String> txMap = context.getMap("default");
txMap.getForUpdate("var");
fail();
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java |
647 | public abstract class CollectionContainer implements DataSerializable {
protected String name;
protected NodeEngine nodeEngine;
protected ILogger logger;
protected Map<Long, CollectionItem> itemMap;
protected final Map<Long, TxCollectionItem> txMap = new HashMap<Long, TxCollectionItem>();
private long idGenerator;
protected CollectionContainer() {
}
protected CollectionContainer(String name, NodeEngine nodeEngine) {
this.name = name;
this.nodeEngine = nodeEngine;
this.logger = nodeEngine.getLogger(getClass());
}
public void init(NodeEngine nodeEngine) {
this.nodeEngine = nodeEngine;
this.logger = nodeEngine.getLogger(getClass());
}
protected abstract CollectionConfig getConfig();
protected abstract Collection<CollectionItem> getCollection();
protected abstract Map<Long, CollectionItem> getMap();
protected long add(Data value) {
final CollectionItem item = new CollectionItem(nextId(), value);
if (getCollection().add(item)) {
return item.getItemId();
}
return -1;
}
protected void addBackup(long itemId, Data value) {
final CollectionItem item = new CollectionItem(itemId, value);
getMap().put(itemId, item);
}
protected CollectionItem remove(Data value) {
final Iterator<CollectionItem> iterator = getCollection().iterator();
while (iterator.hasNext()) {
final CollectionItem item = iterator.next();
if (value.equals(item.getValue())) {
iterator.remove();
return item;
}
}
return null;
}
protected void removeBackup(long itemId) {
getMap().remove(itemId);
}
protected int size() {
return getCollection().size();
}
protected Map<Long, Data> clear() {
final Collection<CollectionItem> coll = getCollection();
Map<Long, Data> itemIdMap = new HashMap<Long, Data>(coll.size());
for (CollectionItem item : coll) {
itemIdMap.put(item.getItemId(), (Data) item.getValue());
}
coll.clear();
return itemIdMap;
}
protected void clearBackup(Set<Long> itemIdSet) {
for (Long itemId : itemIdSet) {
removeBackup(itemId);
}
}
protected boolean contains(Set<Data> valueSet) {
for (Data value : valueSet) {
boolean contains = false;
for (CollectionItem item : getCollection()) {
if (value.equals(item.getValue())) {
contains = true;
break;
}
}
if (!contains) {
return false;
}
}
return true;
}
protected Map<Long, Data> addAll(List<Data> valueList) {
final int size = valueList.size();
final Map<Long, Data> map = new HashMap<Long, Data>(size);
List<CollectionItem> list = new ArrayList<CollectionItem>(size);
for (Data value : valueList) {
final long itemId = nextId();
list.add(new CollectionItem(itemId, value));
map.put(itemId, value);
}
getCollection().addAll(list);
return map;
}
protected void addAllBackup(Map<Long, Data> valueMap) {
Map<Long, CollectionItem> map = new HashMap<Long, CollectionItem>(valueMap.size());
for (Map.Entry<Long, Data> entry : valueMap.entrySet()) {
final long itemId = entry.getKey();
map.put(itemId, new CollectionItem(itemId, entry.getValue()));
}
getMap().putAll(map);
}
protected Map<Long, Data> compareAndRemove(boolean retain, Set<Data> valueSet) {
Map<Long, Data> itemIdMap = new HashMap<Long, Data>();
final Iterator<CollectionItem> iterator = getCollection().iterator();
while (iterator.hasNext()) {
final CollectionItem item = iterator.next();
final boolean contains = valueSet.contains(item.getValue());
if ((contains && !retain) || (!contains && retain)) {
itemIdMap.put(item.getItemId(), (Data) item.getValue());
iterator.remove();
}
}
return itemIdMap;
}
protected Collection<Data> getAll() {
final ArrayList<Data> sub = new ArrayList<Data>(getCollection().size());
for (CollectionItem item : getCollection()) {
sub.add((Data) item.getValue());
}
return sub;
}
protected boolean hasEnoughCapacity(int delta) {
return getCollection().size() + delta <= getConfig().getMaxSize();
}
/*
* TX methods
*
*/
public Long reserveAdd(String transactionId, Data value) {
if (value != null && getCollection().contains(new CollectionItem(-1, value))) {
return null;
}
final long itemId = nextId();
txMap.put(itemId, new TxCollectionItem(itemId, null, transactionId, false));
return itemId;
}
public void reserveAddBackup(long itemId, String transactionId) {
TxCollectionItem item = new TxCollectionItem(itemId, null, transactionId, false);
Object o = txMap.put(itemId, item);
if (o != null) {
logger.severe("txnOfferBackupReserve operation-> Item exists already at txMap for itemId: " + itemId);
}
}
public CollectionItem reserveRemove(long reservedItemId, Data value, String transactionId) {
final Iterator<CollectionItem> iterator = getCollection().iterator();
while (iterator.hasNext()) {
final CollectionItem item = iterator.next();
if (value.equals(item.getValue())) {
iterator.remove();
txMap.put(item.getItemId(), new TxCollectionItem(item).setTransactionId(transactionId).setRemoveOperation(true));
return item;
}
}
if (reservedItemId != -1) {
return txMap.remove(reservedItemId);
}
return null;
}
public void reserveRemoveBackup(long itemId, String transactionId) {
final CollectionItem item = getMap().remove(itemId);
if (item == null) {
throw new TransactionException("Backup reserve failed: " + itemId);
}
txMap.put(itemId, new TxCollectionItem(item).setTransactionId(transactionId).setRemoveOperation(true));
}
public void ensureReserve(long itemId) {
if (txMap.get(itemId) == null) {
throw new TransactionException("No reserve for itemId: " + itemId);
}
}
public void rollbackAdd(long itemId) {
if (txMap.remove(itemId) == null) {
logger.warning("rollbackAdd operation-> No txn item for itemId: " + itemId);
}
}
public void rollbackAddBackup(long itemId) {
if (txMap.remove(itemId) == null) {
logger.warning("rollbackAddBackup operation-> No txn item for itemId: " + itemId);
}
}
public void rollbackRemove(long itemId) {
final CollectionItem item = txMap.remove(itemId);
if (item == null) {
logger.warning("rollbackRemove No txn item for itemId: " + itemId);
}
getCollection().add(item);
}
public void rollbackRemoveBackup(long itemId) {
final CollectionItem item = txMap.remove(itemId);
if (item == null) {
logger.warning("rollbackRemoveBackup No txn item for itemId: " + itemId);
}
}
public void commitAdd(long itemId, Data value) {
final CollectionItem item = txMap.remove(itemId);
if (item == null) {
throw new TransactionException("No reserve :" + itemId);
}
item.setValue(value);
getCollection().add(item);
}
public void commitAddBackup(long itemId, Data value) {
CollectionItem item = txMap.remove(itemId);
if (item == null) {
item = new CollectionItem(itemId, value);
}
getMap().put(itemId, item);
}
public CollectionItem commitRemove(long itemId) {
final CollectionItem item = txMap.remove(itemId);
if (item == null) {
logger.warning("commitRemove operation-> No txn item for itemId: " + itemId);
}
return item;
}
public void commitRemoveBackup(long itemId) {
if (txMap.remove(itemId) == null) {
logger.warning("commitRemoveBackup operation-> No txn item for itemId: " + itemId);
}
}
public void rollbackTransaction(String transactionId) {
final Iterator<TxCollectionItem> iterator = txMap.values().iterator();
while (iterator.hasNext()) {
final TxCollectionItem item = iterator.next();
if (transactionId.equals(item.getTransactionId())) {
iterator.remove();
if (item.isRemoveOperation()) {
getCollection().add(item);
}
}
}
}
public long nextId() {
return idGenerator++;
}
void setId(long itemId) {
idGenerator = Math.max(itemId + 1, idGenerator);
}
public void destroy() {
onDestroy();
if (itemMap != null) {
itemMap.clear();
}
txMap.clear();
}
protected abstract void onDestroy();
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(name);
final Collection<CollectionItem> collection = getCollection();
out.writeInt(collection.size());
for (CollectionItem item : collection) {
item.writeData(out);
}
out.writeInt(txMap.size());
for (TxCollectionItem txCollectionItem : txMap.values()) {
txCollectionItem.writeData(out);
}
}
public void readData(ObjectDataInput in) throws IOException {
name = in.readUTF();
final int collectionSize = in.readInt();
final Collection<CollectionItem> collection = getCollection();
for (int i = 0; i < collectionSize; i++) {
final CollectionItem item = new CollectionItem();
item.readData(in);
collection.add(item);
setId(item.getItemId());
}
final int txMapSize = in.readInt();
for (int i = 0; i < txMapSize; i++) {
final TxCollectionItem txCollectionItem = new TxCollectionItem();
txCollectionItem.readData(in);
txMap.put(txCollectionItem.getItemId(), txCollectionItem);
setId(txCollectionItem.itemId);
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_collection_CollectionContainer.java |
2,896 | public static class InPredicate extends AbstractPredicate {
private Comparable[] values;
private volatile Set<Comparable> convertedInValues;
public InPredicate() {
}
public InPredicate(String attribute, Comparable... values) {
super(attribute);
this.values = values;
}
@Override
public boolean apply(Map.Entry entry) {
Comparable entryValue = readAttribute(entry);
Set<Comparable> set = convertedInValues;
if (set == null) {
set = new HashSet<Comparable>(values.length);
for (Comparable value : values) {
set.add(convert(entry, entryValue, value));
}
convertedInValues = set;
}
return entryValue != null && set.contains(entryValue);
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
if (index != null) {
return index.getRecords(values);
} else {
return null;
}
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeInt(values.length);
for (Object value : values) {
out.writeObject(value);
}
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
int len = in.readInt();
values = new Comparable[len];
for (int i = 0; i < len; i++) {
values[i] = in.readObject();
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(attribute);
sb.append(" IN (");
for (int i = 0; i < values.length; i++) {
if (i > 0) {
sb.append(",");
}
sb.append(values[i]);
}
sb.append(")");
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_query_Predicates.java |
1,075 | public class OSQLScriptEngine implements ScriptEngine {
public static final String NAME = "sql";
private ScriptEngineFactory factory;
public OSQLScriptEngine(ScriptEngineFactory factory) {
this.factory = factory;
}
@Override
public Object eval(String script, ScriptContext context) throws ScriptException {
return eval(script, (Bindings) null);
}
@Override
public Object eval(Reader reader, ScriptContext context) throws ScriptException {
return eval(reader, (Bindings) null);
}
@Override
public Object eval(String script) throws ScriptException {
return eval(script, (Bindings) null);
}
@Override
public Object eval(Reader reader) throws ScriptException {
return eval(reader, (Bindings) null);
}
@Override
public Object eval(String script, Bindings n) throws ScriptException {
return new OCommandSQL(script).execute(n);
}
@Override
public Object eval(Reader reader, Bindings n) throws ScriptException {
final StringBuilder buffer = new StringBuilder();
try {
while (reader.ready())
buffer.append((char) reader.read());
} catch (IOException e) {
throw new ScriptException(e);
}
return new OCommandSQL(buffer.toString()).execute(n);
}
@Override
public void put(String key, Object value) {
}
@Override
public Object get(String key) {
return null;
}
@Override
public Bindings getBindings(int scope) {
return new SimpleBindings();
}
@Override
public void setBindings(Bindings bindings, int scope) {
}
@Override
public Bindings createBindings() {
return new SimpleBindings();
}
@Override
public ScriptContext getContext() {
return null;
}
@Override
public void setContext(ScriptContext context) {
}
@Override
public ScriptEngineFactory getFactory() {
return factory;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_OSQLScriptEngine.java |
3,408 | public static class FileInfo {
private final String name;
private final String physicalName;
private final long length;
private final String checksum;
public FileInfo(String name, String physicalName, long length, String checksum) {
this.name = name;
this.physicalName = physicalName;
this.length = length;
this.checksum = checksum;
}
public String name() {
return name;
}
public String physicalName() {
return this.physicalName;
}
public long length() {
return length;
}
@Nullable
public String checksum() {
return checksum;
}
public boolean isSame(StoreFileMetaData md) {
if (checksum == null || md.checksum() == null) {
return false;
}
return length == md.length() && checksum.equals(md.checksum());
}
} | 0true
| src_main_java_org_elasticsearch_index_gateway_CommitPoint.java |
1,419 | public class OAsynchChannelServiceThread extends OSoftThread {
private OChannelBinaryAsynchClient network;
private int sessionId;
private ORemoteServerEventListener remoteServerEventListener;
public OAsynchChannelServiceThread(final ORemoteServerEventListener iRemoteServerEventListener,
final OChannelBinaryAsynchClient iChannel) {
super(Orient.instance().getThreadGroup(), "OrientDB <- Asynch Client (" + iChannel.socket.getRemoteSocketAddress() + ")");
sessionId = Integer.MIN_VALUE;
remoteServerEventListener = iRemoteServerEventListener;
network = iChannel;
start();
}
@Override
protected void execute() throws Exception {
try {
network.beginResponse(sessionId, 0);
final byte request = network.readByte();
Object obj = null;
switch (request) {
case OChannelBinaryProtocol.REQUEST_PUSH_RECORD:
obj = (ORecordInternal<?>) OChannelBinaryProtocol.readIdentifiable(network);
break;
case OChannelBinaryProtocol.REQUEST_PUSH_DISTRIB_CONFIG:
obj = network.readBytes();
break;
}
if (remoteServerEventListener != null)
remoteServerEventListener.onRequest(request, obj);
} catch (IOException ioe) {
// EXCEPTION RECEIVED (THE SOCKET HAS BEEN CLOSED?) ASSURE TO UNLOCK THE READ AND EXIT THIS THREAD
sendShutdown();
if (network != null) {
final OChannelBinaryAsynchClient n = network;
network = null;
n.close();
}
} finally {
if (network != null)
network.endResponse();
}
}
} | 0true
| enterprise_src_main_java_com_orientechnologies_orient_enterprise_channel_binary_OAsynchChannelServiceThread.java |
368 | public interface OLazyObjectListInterface<TYPE> extends List<TYPE> {
public void setConvertToRecord(boolean convertToRecord);
public boolean isConverted();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_object_OLazyObjectListInterface.java |
1,505 | @SuppressWarnings("unchecked")
@PrivateApi
public final class HazelcastInstanceImpl
implements HazelcastInstance {
public final Node node;
final ILogger logger;
final String name;
final ManagementService managementService;
final LifecycleServiceImpl lifecycleService;
final ManagedContext managedContext;
final ThreadMonitoringService threadMonitoringService;
final ThreadGroup threadGroup;
final ConcurrentMap<String, Object> userContext = new ConcurrentHashMap<String, Object>();
HazelcastInstanceImpl(String name, Config config, NodeContext nodeContext)
throws Exception {
this.name = name;
this.threadGroup = new ThreadGroup(name);
threadMonitoringService = new ThreadMonitoringService(threadGroup);
lifecycleService = new LifecycleServiceImpl(this);
ManagedContext configuredManagedContext = config.getManagedContext();
managedContext = new HazelcastManagedContext(this, configuredManagedContext);
//we are going to copy the user-context map of the Config so that each HazelcastInstance will get its own
//user-context map instance instead of having a shared map instance. So changes made to the user-context map
//in one HazelcastInstance will not reflect on other the user-context of other HazelcastInstances.
userContext.putAll(config.getUserContext());
node = new Node(this, config, nodeContext);
logger = node.getLogger(getClass().getName());
lifecycleService.fireLifecycleEvent(STARTING);
node.start();
if (!node.isActive()) {
node.connectionManager.shutdown();
throw new IllegalStateException("Node failed to start!");
}
managementService = new ManagementService(this);
if (configuredManagedContext != null) {
if (configuredManagedContext instanceof HazelcastInstanceAware) {
((HazelcastInstanceAware) configuredManagedContext).setHazelcastInstance(this);
}
}
initHealthMonitor();
}
private void initHealthMonitor() {
String healthMonitorLevelString = node.getGroupProperties().HEALTH_MONITORING_LEVEL.getString();
HealthMonitorLevel healthLevel = HealthMonitorLevel.valueOf(healthMonitorLevelString);
if (healthLevel != HealthMonitorLevel.OFF) {
logger.finest("Starting health monitor");
int delaySeconds = node.getGroupProperties().HEALTH_MONITORING_DELAY_SECONDS.getInteger();
new HealthMonitor(this, healthLevel, delaySeconds).start();
}
}
public ManagementService getManagementService() {
return managementService;
}
@Override
public String getName() {
return name;
}
@Override
public <K, V> IMap<K, V> getMap(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a map instance with a null name is not allowed!");
}
return getDistributedObject(MapService.SERVICE_NAME, name);
}
@Override
public <E> IQueue<E> getQueue(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a queue instance with a null name is not allowed!");
}
return getDistributedObject(QueueService.SERVICE_NAME, name);
}
@Override
public <E> ITopic<E> getTopic(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a topic instance with a null name is not allowed!");
}
return getDistributedObject(TopicService.SERVICE_NAME, name);
}
@Override
public <E> ISet<E> getSet(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a set instance with a null name is not allowed!");
}
return getDistributedObject(SetService.SERVICE_NAME, name);
}
@Override
public <E> IList<E> getList(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a list instance with a null name is not allowed!");
}
return getDistributedObject(ListService.SERVICE_NAME, name);
}
@Override
public <K, V> MultiMap<K, V> getMultiMap(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a multi-map instance with a null name is not allowed!");
}
return getDistributedObject(MultiMapService.SERVICE_NAME, name);
}
@Override
public JobTracker getJobTracker(String name) {
if (name == null) {
throw new NullPointerException("Retrieving a job tracker instance with a null name is not allowed!");
}
return getDistributedObject(MapReduceService.SERVICE_NAME, name);
}
@Deprecated
public ILock getLock(Object key) {
//this method will be deleted in the near future.
if (key == null) {
throw new NullPointerException("Retrieving a lock instance with a null key is not allowed!");
}
String name = LockProxy.convertToStringKey(key, node.getSerializationService());
return getLock(name);
}
@Override
public ILock getLock(String key) {
if (key == null) {
throw new NullPointerException("Retrieving a lock instance with a null key is not allowed!");
}
return getDistributedObject(LockService.SERVICE_NAME, key);
}
@Override
public <T> T executeTransaction(TransactionalTask<T> task)
throws TransactionException {
return executeTransaction(TransactionOptions.getDefault(), task);
}
@Override
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task)
throws TransactionException {
TransactionManagerService transactionManagerService = node.nodeEngine.getTransactionManagerService();
return transactionManagerService.executeTransaction(options, task);
}
@Override
public TransactionContext newTransactionContext() {
return newTransactionContext(TransactionOptions.getDefault());
}
@Override
public TransactionContext newTransactionContext(TransactionOptions options) {
TransactionManagerService transactionManagerService = node.nodeEngine.getTransactionManagerService();
return transactionManagerService.newTransactionContext(options);
}
@Override
public IExecutorService getExecutorService(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an executor instance with a null name is not allowed!");
}
return getDistributedObject(DistributedExecutorService.SERVICE_NAME, name);
}
@Override
public IdGenerator getIdGenerator(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an id-generator instance with a null name is not allowed!");
}
return getDistributedObject(IdGeneratorService.SERVICE_NAME, name);
}
@Override
public IAtomicLong getAtomicLong(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an atomic-long instance with a null name is not allowed!");
}
return getDistributedObject(AtomicLongService.SERVICE_NAME, name);
}
@Override
public <E> IAtomicReference<E> getAtomicReference(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving an atomic-reference instance with a null name is not allowed!");
}
return getDistributedObject(AtomicReferenceService.SERVICE_NAME, name);
}
@Override
public ICountDownLatch getCountDownLatch(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving a countdown-latch instance with a null name is not allowed!");
}
return getDistributedObject(CountDownLatchService.SERVICE_NAME, name);
}
@Override
public ISemaphore getSemaphore(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving a semaphore instance with a null name is not allowed!");
}
return getDistributedObject(SemaphoreService.SERVICE_NAME, name);
}
@Override
public <K, V> ReplicatedMap<K, V> getReplicatedMap(final String name) {
if (name == null) {
throw new NullPointerException("Retrieving a replicated map instance with a null name is not allowed!");
}
return getDistributedObject(ReplicatedMapService.SERVICE_NAME, name);
}
@Override
public Cluster getCluster() {
return node.clusterService.getClusterProxy();
}
@Override
public Member getLocalEndpoint() {
return node.clusterService.getLocalMember();
}
@Override
public Collection<DistributedObject> getDistributedObjects() {
ProxyService proxyService = node.nodeEngine.getProxyService();
return proxyService.getAllDistributedObjects();
}
@Override
public Config getConfig() {
return node.getConfig();
}
@Override
public ConcurrentMap<String, Object> getUserContext() {
return userContext;
}
@Override
public PartitionService getPartitionService() {
return node.partitionService.getPartitionServiceProxy();
}
@Override
public ClientService getClientService() {
return node.clientEngine.getClientService();
}
@Override
public LoggingService getLoggingService() {
return node.loggingService;
}
@Override
public LifecycleServiceImpl getLifecycleService() {
return lifecycleService;
}
@Override
public void shutdown() {
getLifecycleService().shutdown();
}
@Override
@Deprecated
public <T extends DistributedObject> T getDistributedObject(String serviceName, Object id) {
if (id instanceof String) {
return (T) node.nodeEngine.getProxyService().getDistributedObject(serviceName, (String) id);
}
throw new IllegalArgumentException("'id' must be type of String!");
}
@Override
public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) {
ProxyService proxyService = node.nodeEngine.getProxyService();
return (T) proxyService.getDistributedObject(serviceName, name);
}
@Override
public String addDistributedObjectListener(DistributedObjectListener distributedObjectListener) {
final ProxyService proxyService = node.nodeEngine.getProxyService();
return proxyService.addProxyListener(distributedObjectListener);
}
@Override
public boolean removeDistributedObjectListener(String registrationId) {
final ProxyService proxyService = node.nodeEngine.getProxyService();
return proxyService.removeProxyListener(registrationId);
}
public ThreadGroup getThreadGroup() {
return threadGroup;
}
public SerializationService getSerializationService() {
return node.getSerializationService();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || !(o instanceof HazelcastInstance)) {
return false;
}
HazelcastInstance that = (HazelcastInstance) o;
return !(name != null ? !name.equals(that.getName()) : that.getName() != null);
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("HazelcastInstance");
sb.append("{name='").append(name).append('\'');
sb.append(", node=").append(node.getThisAddress());
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_instance_HazelcastInstanceImpl.java |
1,632 | public class OServerSideScriptInterpreter extends OServerPluginAbstract {
private boolean enabled = false;
@Override
public void config(final OServer iServer, OServerParameterConfiguration[] iParams) {
for (OServerParameterConfiguration param : iParams) {
if (param.name.equalsIgnoreCase("enabled")) {
if (Boolean.parseBoolean(param.value))
// ENABLE IT
enabled = true;
}
}
}
@Override
public String getName() {
return "script-interpreter";
}
@Override
public void startup() {
if (!enabled)
return;
OLogManager.instance().info(this,
"Installing Script interpreter. WARN: authenticated clients can execute any kind of code into the server.");
// REGISTER THE SECURE COMMAND SCRIPT
OCommandManager.instance().registerExecutor(OCommandScript.class, OCommandExecutorScript.class);
}
@Override
public void shutdown() {
if (!enabled)
return;
OCommandManager.instance().unregisterExecutor(OCommandScript.class);
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_handler_OServerSideScriptInterpreter.java |
992 | return new DataSerializableFactory() {
@Override
public IdentifiedDataSerializable create(int typeId) {
switch (typeId) {
case ACQUIRE_BACKUP_OPERATION:
return new AcquireBackupOperation();
case ACQUIRE_OPERATION:
return new AcquireOperation();
case AVAILABLE_OPERATION:
return new AvailableOperation();
case DEAD_MEMBER_BACKUP_OPERATION:
return new DeadMemberBackupOperation();
case DRAIN_BACKUP_OPERATION:
return new DrainBackupOperation();
case DRAIN_OPERATION:
return new DrainOperation();
case INIT_BACKUP_OPERATION:
return new InitBackupOperation();
case INIT_OPERATION:
return new InitOperation();
case REDUCE_BACKUP_OPERATION:
return new ReduceBackupOperation();
case REDUCE_OPERATION:
return new ReduceOperation();
case RELEASE_BACKUP_OPERATION:
return new ReleaseBackupOperation();
case RELEASE_OPERATION:
return new ReleaseOperation();
case SEMAPHORE_DEAD_MEMBER_OPERATION:
return new SemaphoreDeadMemberOperation();
case SEMAPHORE_REPLICATION_OPERATION:
return new SemaphoreReplicationOperation();
default:
return null;
}
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_semaphore_SemaphoreDataSerializerHook.java |
246 | public class AstyanaxLockStoreTest extends LockKeyColumnValueStoreTest {
@BeforeClass
public static void startCassandra() {
CassandraStorageSetup.startCleanEmbedded();
}
@Override
public KeyColumnValueStoreManager openStorageManager(int idx) throws BackendException {
return new AstyanaxStoreManager(CassandraStorageSetup.getAstyanaxConfiguration(getClass().getSimpleName()));
}
} | 0true
| titan-cassandra_src_test_java_com_thinkaurelius_titan_diskstorage_cassandra_astyanax_AstyanaxLockStoreTest.java |
809 | public class GetAndSetRequest extends AtomicLongRequest {
public GetAndSetRequest() {
}
public GetAndSetRequest(String name, long value) {
super(name, value);
}
@Override
protected Operation prepareOperation() {
return new GetAndSetOperation(name, delta);
}
@Override
public int getClassId() {
return AtomicLongPortableHook.GET_AND_SET;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_client_GetAndSetRequest.java |
130 | public enum SchemaAction {
/**
* Registers the index with all instances in the graph cluster. After an index is installed, it must be registered
* with all graph instances.
*/
REGISTER_INDEX,
/**
* Re-builds the index from the graph
*/
REINDEX,
/**
* Enables the index so that it can be used by the query processing engine. An index must be registered before it
* can be enabled.
*/
ENABLE_INDEX,
/**
* Disables the index in the graph so that it is no longer used.
*/
DISABLE_INDEX,
/**
* Removes the index from the graph (optional operation)
*/
REMOVE_INDEX;
public Set<SchemaStatus> getApplicableStatus() {
switch(this) {
case REGISTER_INDEX: return ImmutableSet.of(SchemaStatus.INSTALLED);
case REINDEX: return ImmutableSet.of(SchemaStatus.REGISTERED,SchemaStatus.ENABLED);
case ENABLE_INDEX: return ImmutableSet.of(SchemaStatus.REGISTERED);
case DISABLE_INDEX: return ImmutableSet.of(SchemaStatus.REGISTERED,SchemaStatus.INSTALLED,SchemaStatus.ENABLED);
case REMOVE_INDEX: return ImmutableSet.of(SchemaStatus.DISABLED);
default: throw new IllegalArgumentException("Action is invalid: " + this);
}
}
} | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_core_schema_SchemaAction.java |
1,416 | public class DummyEntity {
private long id;
private int version;
private String name;
private double value;
private Date date;
private Set<DummyProperty> properties;
public DummyEntity() {
super();
}
public DummyEntity(long id, String name, double value, Date date) {
super();
this.id = id;
this.name = name;
this.value = value;
this.date = date;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public double getValue() {
return value;
}
public void setValue(double value) {
this.value = value;
}
public Date getDate() {
return date;
}
public void setDate(Date date) {
this.date = date;
}
public void setProperties(Set<DummyProperty> properties) {
this.properties = properties;
}
public Set<DummyProperty> getProperties() {
return properties;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate4_src_test_java_com_hazelcast_hibernate_entity_DummyEntity.java |
785 | public class MultiPercolateAction extends Action<MultiPercolateRequest, MultiPercolateResponse, MultiPercolateRequestBuilder> {
public static final MultiPercolateAction INSTANCE = new MultiPercolateAction();
public static final String NAME = "mpercolate";
private MultiPercolateAction() {
super(NAME);
}
@Override
public MultiPercolateResponse newResponse() {
return new MultiPercolateResponse();
}
@Override
public MultiPercolateRequestBuilder newRequestBuilder(Client client) {
return new MultiPercolateRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_percolate_MultiPercolateAction.java |
1,243 | public class CeylonProjectModulesContainer implements IClasspathContainer {
public static final String CONTAINER_ID = PLUGIN_ID + ".cpcontainer.CEYLON_CONTAINER";
private IClasspathEntry[] classpathEntries;
private IPath path;
//private String jdtVersion;
private IJavaProject javaProject;
private Set<String> modulesWithSourcesAlreadySearched = synchronizedSet(new HashSet<String>());
public IJavaProject getJavaProject() {
return javaProject;
}
public IClasspathAttribute[] getAttributes() {
return attributes;
}
/**
* attributes attached to the container but not Ceylon related (Webtools or AspectJfor instance)
*/
private IClasspathAttribute[] attributes = new IClasspathAttribute[0];
public CeylonProjectModulesContainer(IJavaProject javaProject, IPath path,
IClasspathEntry[] classpathEntries, IClasspathAttribute[] attributes) {
this.path = path;
this.attributes = attributes;
this.classpathEntries = classpathEntries;
this.javaProject = javaProject;
}
public CeylonProjectModulesContainer(IProject project) {
javaProject = JavaCore.create(project);
path = new Path(CeylonProjectModulesContainer.CONTAINER_ID + "/default");
classpathEntries = new IClasspathEntry[0];
attributes = new IClasspathAttribute[0];
}
public CeylonProjectModulesContainer(CeylonProjectModulesContainer cp) {
path = cp.path;
javaProject = cp.javaProject;
classpathEntries = cp.classpathEntries;
attributes = cp.attributes;
modulesWithSourcesAlreadySearched = cp.modulesWithSourcesAlreadySearched;
}
public String getDescription() {
return "Ceylon Project Modules";
}
public int getKind() {
return K_APPLICATION;
}
public IPath getPath() {
return path;
}
public IClasspathEntry[] getClasspathEntries() {
return classpathEntries;
}
public IClasspathEntry addNewClasspathEntryIfNecessary(IPath modulePath) {
synchronized (classpathEntries) {
for (IClasspathEntry cpEntry : classpathEntries) {
if (cpEntry.getPath().equals(modulePath)) {
return null;
}
}
IClasspathEntry newEntry = newLibraryEntry(modulePath, null, null);
IClasspathEntry[] newClasspathEntries = new IClasspathEntry[classpathEntries.length + 1];
if (classpathEntries.length > 0) {
System.arraycopy(classpathEntries, 0, newClasspathEntries, 0, classpathEntries.length);
}
newClasspathEntries[classpathEntries.length] = newEntry;
classpathEntries = newClasspathEntries;
return newEntry;
}
}
/*private static final ISchedulingRule RESOLVE_EVENT_RULE = new ISchedulingRule() {
public boolean contains(ISchedulingRule rule) {
return rule == this;
}
public boolean isConflicting(ISchedulingRule rule) {
return rule == this;
}
};*/
public void runReconfigure() {
modulesWithSourcesAlreadySearched.clear();
Job job = new Job("Resolving dependencies for project " +
getJavaProject().getElementName()) {
@Override
protected IStatus run(IProgressMonitor monitor) {
final IProject project = javaProject.getProject();
try {
final IClasspathEntry[] classpath = constructModifiedClasspath(javaProject);
javaProject.setRawClasspath(classpath, monitor);
boolean changed = resolveClasspath(monitor, false);
if(changed) {
refreshClasspathContainer(monitor);
}
// Rebuild the project :
// - without referenced projects
// - with referencing projects
// - and force the rebuild even if the model is already typechecked
Job job = new BuildProjectAfterClasspathChangeJob("Rebuild of project " +
project.getName(), project, false, true, true);
job.setRule(project.getWorkspace().getRoot());
job.schedule(3000);
job.setPriority(Job.BUILD);
return Status.OK_STATUS;
}
catch (CoreException e) {
e.printStackTrace();
return new Status(IStatus.ERROR, PLUGIN_ID,
"could not resolve dependencies", e);
}
}
};
job.setUser(false);
job.setPriority(Job.BUILD);
job.setRule(getWorkspace().getRoot());
job.schedule();
}
private IClasspathEntry[] constructModifiedClasspath(IJavaProject javaProject)
throws JavaModelException {
IClasspathEntry newEntry = JavaCore.newContainerEntry(path, null,
new IClasspathAttribute[0], false);
IClasspathEntry[] entries = javaProject.getRawClasspath();
List<IClasspathEntry> newEntries = new ArrayList<IClasspathEntry>(asList(entries));
int index = 0;
boolean mustReplace = false;
for (IClasspathEntry entry: newEntries) {
if (entry.getPath().equals(newEntry.getPath()) ) {
mustReplace = true;
break;
}
index++;
}
if (mustReplace) {
newEntries.set(index, newEntry);
}
else {
newEntries.add(newEntry);
}
return (IClasspathEntry[]) newEntries.toArray(new IClasspathEntry[newEntries.size()]);
}
void notifyUpdateClasspathEntries() {
// Changes to resolved classpath are not announced by JDT Core
// and so PackageExplorer does not properly refresh when we update
// the classpath container.
// See https://bugs.eclipse.org/bugs/show_bug.cgi?id=154071
DeltaProcessingState s = JavaModelManager.getJavaModelManager().deltaState;
synchronized (s) {
IElementChangedListener[] listeners = s.elementChangedListeners;
for (int i = 0; i < listeners.length; i++) {
if (listeners[i] instanceof PackageExplorerContentProvider) {
JavaElementDelta delta = new JavaElementDelta(javaProject);
delta.changed(IJavaElementDelta.F_RESOLVED_CLASSPATH_CHANGED);
listeners[i].elementChanged(new ElementChangedEvent(delta,
ElementChangedEvent.POST_CHANGE));
}
}
}
//I've disabled this because I don't really like having it, but
//it does seem to help with the issue of archives appearing
//empty in the package manager
/*try {
javaProject.getProject().refreshLocal(IResource.DEPTH_ONE, null);
}
catch (CoreException e) {
e.printStackTrace();
}*/
}
/**
* Resolves the classpath entries for this container.
* @param monitor
* @param reparse
* @return true if the classpath was changed, false otherwise.
*/
public boolean resolveClasspath(IProgressMonitor monitor, boolean reparse) {
IJavaProject javaProject = getJavaProject();
IProject project = javaProject.getProject();
try {
TypeChecker typeChecker = null;
if (!reparse) {
typeChecker = getProjectTypeChecker(project);
}
IClasspathEntry[] oldEntries = classpathEntries;
if (typeChecker==null) {
IClasspathEntry explodeFolderEntry = null;
if (oldEntries != null) {
for (IClasspathEntry entry : oldEntries) {
if (entry.getPath() != null && entry.getPath().equals(getCeylonClassesOutputFolder(project).getFullPath())) {
explodeFolderEntry = entry;
break;
}
}
}
IClasspathEntry[] resetEntries = explodeFolderEntry == null ?
new IClasspathEntry[] {} :
new IClasspathEntry[] {explodeFolderEntry};
JavaCore.setClasspathContainer(getPath(),
new IJavaProject[]{javaProject},
new IClasspathContainer[]{ new CeylonProjectModulesContainer(javaProject, getPath(), resetEntries, attributes)} , monitor);
typeChecker = parseCeylonModel(project, monitor);
}
IFolder explodedModulesFolder = getCeylonClassesOutputFolder(project);
if (isExplodeModulesEnabled(project)) {
if (!explodedModulesFolder.exists()) {
CoreUtility.createDerivedFolder(explodedModulesFolder, true, true, monitor);
} else {
if (!explodedModulesFolder.isDerived()) {
explodedModulesFolder.setDerived(true, monitor);
}
}
}
else {
if (explodedModulesFolder.exists()) {
explodedModulesFolder.delete(true, monitor);
}
}
final Collection<IClasspathEntry> paths = findModuleArchivePaths(
javaProject, project, typeChecker);
CeylonProjectModulesContainer currentContainer = (CeylonProjectModulesContainer) JavaCore.getClasspathContainer(path, javaProject);
if (oldEntries == null ||
oldEntries != currentContainer.classpathEntries ||
!paths.equals(asList(oldEntries))) {
this.classpathEntries = paths.toArray(new IClasspathEntry[paths.size()]);
return true;
}
}
catch (CoreException e) {
e.printStackTrace();
}
return false;
}
public void refreshClasspathContainer(IProgressMonitor monitor) throws JavaModelException {
IJavaProject javaProject = getJavaProject();
setClasspathContainer(path, new IJavaProject[] { javaProject },
new IClasspathContainer[] {new CeylonProjectModulesContainer(this)}, new SubProgressMonitor(monitor, 1));
JDTModelLoader modelLoader = CeylonBuilder.getProjectModelLoader(javaProject.getProject());
if (modelLoader != null) {
modelLoader.refreshNameEnvironment();
}
//update the package manager UI
new Job("update package manager") {
@Override
protected IStatus run(IProgressMonitor monitor) {
notifyUpdateClasspathEntries();
return Status.OK_STATUS;
}
}.schedule();
}
private Collection<IClasspathEntry> findModuleArchivePaths(
IJavaProject javaProject, IProject project, TypeChecker typeChecker)
throws JavaModelException, CoreException {
final Map<String, IClasspathEntry> paths = new TreeMap<String, IClasspathEntry>();
Context context = typeChecker.getContext();
RepositoryManager provider = context.getRepositoryManager();
Set<Module> modulesToAdd = context.getModules().getListOfModules();
//modulesToAdd.add(projectModules.getLanguageModule());
for (Module module: modulesToAdd) {
JDTModule jdtModule = (JDTModule) module;
String name = module.getNameAsString();
if (name.equals(Module.DEFAULT_MODULE_NAME) ||
JDKUtils.isJDKModule(name) ||
JDKUtils.isOracleJDKModule(name) ||
module.equals(module.getLanguageModule()) ||
isProjectModule(javaProject, module) ||
! module.isAvailable()) {
continue;
}
IPath modulePath = getModuleArchive(provider, jdtModule);
if (modulePath!=null) {
IPath srcPath = null;
for (IProject p: project.getReferencedProjects()) {
if (p.isAccessible()
&& p.getLocation().isPrefixOf(modulePath)) {
//the module belongs to a referenced
//project, so use the project source
srcPath = p.getLocation();
break;
}
}
if (srcPath==null) {
for (IClasspathEntry entry : classpathEntries) {
if (entry.getPath().equals(modulePath)) {
srcPath = entry.getSourceAttachmentPath();
break;
}
}
}
if (srcPath==null &&
!modulesWithSourcesAlreadySearched.contains(module.toString())) {
//otherwise, use the src archive
srcPath = getSourceArchive(provider, jdtModule);
}
modulesWithSourcesAlreadySearched.add(module.toString());
IClasspathEntry newEntry = newLibraryEntry(modulePath, srcPath, null);
paths.put(newEntry.toString(), newEntry);
}
else {
// FIXME: ideally we should find the module.java file and put the marker there, but
// I've no idea how to find it and which import is the cause of the import problem
// as it could be transitive
IMarker marker = project.createMarker(IJavaModelMarker.BUILDPATH_PROBLEM_MARKER);
marker.setAttribute(IMarker.MESSAGE, "no module archive found for classpath container: " +
module.getNameAsString() + "/" + module.getVersion());
marker.setAttribute(IMarker.PRIORITY, IMarker.PRIORITY_HIGH);
marker.setAttribute(IMarker.SEVERITY, IMarker.SEVERITY_ERROR);
}
}
if (isExplodeModulesEnabled(project)) {
IClasspathEntry newEntry = newLibraryEntry(getCeylonClassesOutputFolder(project).getFullPath(),
project.getFullPath(), null, false);
paths.put(newEntry.toString(), newEntry);
}
return asList(paths.values().toArray(new IClasspathEntry[paths.size()]));
}
public static File getSourceArtifact(RepositoryManager provider,
JDTModule module) {
String sourceArchivePath = module.getSourceArchivePath();
if (sourceArchivePath == null) {
return null;
}
File sourceArchive = new File(sourceArchivePath);
if (sourceArchive.exists()) {
return sourceArchive;
}
// BEWARE : here the request to the provider is done in 2 steps, because if
// we do this in a single step, the Aether repo might return the .jar
// archive as a default result when not finding it with the .src extension.
// In this case it will not try the second extension (-sources.jar).
String suffix = module.getArtifactType().equals(ArtifactResultType.MAVEN) ?
ArtifactContext.MAVEN_SRC : ArtifactContext.SRC;
ArtifactContext ctx = new ArtifactContext(module.getNameAsString(),
module.getVersion(), suffix);
File srcArtifact = provider.getArtifact(ctx);
if (srcArtifact!=null) {
if (srcArtifact.getPath().endsWith(suffix)) {
return srcArtifact;
}
}
return null;
}
public static IPath getSourceArchive(RepositoryManager provider,
JDTModule module) {
File srcArtifact = getSourceArtifact(provider, module);
if (srcArtifact!=null) {
return new Path(srcArtifact.getPath());
}
return null;
}
public static File getModuleArtifact(RepositoryManager provider,
JDTModule module) {
File moduleFile = module.getArtifact();
if (moduleFile == null) {
return null;
}
if (moduleFile.exists()) {
return moduleFile;
}
// Shouldn't need to execute this anymore !
// We already retrieved this information during in the ModuleVisitor.
// This should be a performance gain.
ArtifactContext ctx = new ArtifactContext(module.getNameAsString(),
module.getVersion(), ArtifactContext.CAR);
// try first with .car
File moduleArtifact = provider.getArtifact(ctx);
if (moduleArtifact==null){
// try with .jar
ctx = new ArtifactContext(module.getNameAsString(),
module.getVersion(), ArtifactContext.JAR);
moduleArtifact = provider.getArtifact(ctx);
}
return moduleArtifact;
}
public static IPath getModuleArchive(RepositoryManager provider,
JDTModule module) {
File moduleArtifact = getModuleArtifact(provider, module);
if (moduleArtifact!=null) {
return new Path(moduleArtifact.getPath());
}
return null;
}
public static boolean isProjectModule(IJavaProject javaProject, Module module)
throws JavaModelException {
boolean isSource=false;
for (IPackageFragmentRoot s: javaProject.getPackageFragmentRoots()) {
if (s.exists()
&& javaProject.isOnClasspath(s)
&& s.getKind()==IPackageFragmentRoot.K_SOURCE
&& s.getPackageFragment(module.getNameAsString()).exists()) {
isSource=true;
break;
}
}
return isSource;
}
} | 1no label
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_classpath_CeylonProjectModulesContainer.java |
1,794 | public static class CompositeKey implements Serializable
{
static boolean hashCodeCalled = false;
static boolean equalsCalled = false;
@Override
public int hashCode() {
hashCodeCalled = true;
return super.hashCode();
}
@Override
public boolean equals(Object o) {
equalsCalled = true;
return super.equals(o);
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_map_IssuesTest.java |
1,056 | public class ListConfigReadOnly extends ListConfig {
public ListConfigReadOnly(ListConfig config) {
super(config);
}
public List<ItemListenerConfig> getItemListenerConfigs() {
final List<ItemListenerConfig> itemListenerConfigs = super.getItemListenerConfigs();
final List<ItemListenerConfig> readOnlyItemListenerConfigs = new ArrayList<ItemListenerConfig>(itemListenerConfigs.size());
for (ItemListenerConfig itemListenerConfig : itemListenerConfigs) {
readOnlyItemListenerConfigs.add(itemListenerConfig.getAsReadOnly());
}
return Collections.unmodifiableList(readOnlyItemListenerConfigs);
}
public ListConfig setName(String name) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
public ListConfig setItemListenerConfigs(List<ItemListenerConfig> listenerConfigs) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
public ListConfig setBackupCount(int backupCount) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
public ListConfig setAsyncBackupCount(int asyncBackupCount) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
public ListConfig setMaxSize(int maxSize) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
public ListConfig setStatisticsEnabled(boolean statisticsEnabled) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
public void addItemListenerConfig(ItemListenerConfig itemListenerConfig) {
throw new UnsupportedOperationException("This config is read-only list: " + getName());
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_ListConfigReadOnly.java |
2,098 | public final class RemoveBackupOperation extends KeyBasedMapOperation implements BackupOperation, IdentifiedDataSerializable {
private boolean unlockKey = false;
public RemoveBackupOperation(String name, Data dataKey) {
super(name, dataKey);
}
public RemoveBackupOperation(String name, Data dataKey, boolean unlockKey) {
super(name, dataKey);
this.unlockKey = unlockKey;
}
public RemoveBackupOperation() {
}
public void run() {
MapService mapService = getService();
int partitionId = getPartitionId();
RecordStore recordStore = mapService.getRecordStore(partitionId, name);
Record record = recordStore.getRecord(dataKey);
if (record != null) {
recordStore.removeBackup(dataKey);
}
if (unlockKey) {
recordStore.forceUnlock(dataKey);
}
}
@Override
public Object getResponse() {
return Boolean.TRUE;
}
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeBoolean(unlockKey);
}
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
unlockKey = in.readBoolean();
}
public int getFactoryId() {
return MapDataSerializerHook.F_ID;
}
public int getId() {
return MapDataSerializerHook.REMOVE_BACKUP;
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_map_operation_RemoveBackupOperation.java |
4 | public class CeylonAndroidPlugin extends AbstractUIPlugin {
public static final String PLUGIN_ID = "com.redhat.ceylon.eclipse.android.plugin";
private static CeylonAndroidPlugin plugin;
@Override
public void start(BundleContext context) throws Exception {
super.start(context);
plugin = this;
}
@Override
public void stop(BundleContext context) throws Exception {
plugin = null;
super.stop(context);
}
public static CeylonAndroidPlugin getDefault() {
return plugin;
}
public static void logInfo(String msg) {
plugin.getLog().log(new Status(IStatus.INFO, PLUGIN_ID, msg));
}
public static void logInfo(String msg, IOException e) {
plugin.getLog().log(new Status(IStatus.INFO, PLUGIN_ID, msg, e));
}
public static void logError(String msg, Exception e) {
plugin.getLog().log(new Status(IStatus.ERROR, PLUGIN_ID, msg, e));
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.android.plugin_src_com_redhat_ceylon_eclipse_android_plugin_CeylonAndroidPlugin.java |
1,711 | @Service("blAdminEntityService")
public class AdminEntityServiceImpl implements AdminEntityService {
@Resource(name = "blDynamicEntityRemoteService")
protected DynamicEntityService service;
@Resource(name = "blPersistencePackageFactory")
protected PersistencePackageFactory persistencePackageFactory;
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
protected DynamicDaoHelper dynamicDaoHelper = new DynamicDaoHelperImpl();
@Override
public ClassMetadata getClassMetadata(PersistencePackageRequest request)
throws ServiceException {
ClassMetadata cmd = inspect(request).getClassMetaData();
cmd.setCeilingType(request.getCeilingEntityClassname());
return cmd;
}
@Override
public DynamicResultSet getRecords(PersistencePackageRequest request) throws ServiceException {
return fetch(request);
}
@Override
public Entity getRecord(PersistencePackageRequest request, String id, ClassMetadata cmd, boolean isCollectionRequest)
throws ServiceException {
String idProperty = getIdProperty(cmd);
FilterAndSortCriteria fasc = new FilterAndSortCriteria(idProperty);
fasc.setFilterValue(id);
request.addFilterAndSortCriteria(fasc);
Entity[] entities = fetch(request).getRecords();
Assert.isTrue(entities != null && entities.length == 1, "Entity not found");
Entity entity = entities[0];
return entity;
}
@Override
public Entity addEntity(EntityForm entityForm, String[] customCriteria)
throws ServiceException {
PersistencePackageRequest ppr = getRequestForEntityForm(entityForm, customCriteria);
// If the entity form has dynamic forms inside of it, we need to persist those as well.
// They are typically done in their own custom persistence handlers, which will get triggered
// based on the criteria specific in the PersistencePackage.
for (Entry<String, EntityForm> entry : entityForm.getDynamicForms().entrySet()) {
DynamicEntityFormInfo info = entityForm.getDynamicFormInfo(entry.getKey());
customCriteria = new String[] {info.getCriteriaName()};
PersistencePackageRequest subRequest = getRequestForEntityForm(entry.getValue(), customCriteria);
ppr.addSubRequest(info.getPropertyName(), subRequest);
}
return add(ppr);
}
@Override
public Entity updateEntity(EntityForm entityForm, String[] customCriteria)
throws ServiceException {
PersistencePackageRequest ppr = getRequestForEntityForm(entityForm, customCriteria);
// If the entity form has dynamic forms inside of it, we need to persist those as well.
// They are typically done in their own custom persistence handlers, which will get triggered
// based on the criteria specific in the PersistencePackage.
for (Entry<String, EntityForm> entry : entityForm.getDynamicForms().entrySet()) {
DynamicEntityFormInfo info = entityForm.getDynamicFormInfo(entry.getKey());
String propertyName = info.getPropertyName();
String propertyValue = entityForm.getFields().get(propertyName).getValue();
customCriteria = new String[] { info.getCriteriaName(), entityForm.getId(), propertyName, propertyValue };
PersistencePackageRequest subRequest = getRequestForEntityForm(entry.getValue(), customCriteria);
ppr.addSubRequest(info.getPropertyName(), subRequest);
}
return update(ppr);
}
@Override
public void removeEntity(EntityForm entityForm, String[] customCriteria)
throws ServiceException {
PersistencePackageRequest ppr = getRequestForEntityForm(entityForm, customCriteria);
remove(ppr);
}
protected List<Property> getPropertiesFromEntityForm(EntityForm entityForm) {
List<Property> properties = new ArrayList<Property>(entityForm.getFields().size());
for (Entry<String, Field> entry : entityForm.getFields().entrySet()) {
Property p = new Property();
p.setName(entry.getKey());
p.setValue(entry.getValue().getValue());
properties.add(p);
}
return properties;
}
protected PersistencePackageRequest getRequestForEntityForm(EntityForm entityForm, String[] customCriteria) {
// Ensure the ID property is on the form
Field idField = entityForm.getFields().get(entityForm.getIdProperty());
if (idField == null) {
idField = new Field();
idField.setName(entityForm.getIdProperty());
idField.setValue(entityForm.getId());
entityForm.getFields().put(entityForm.getIdProperty(), idField);
} else {
idField.setValue(entityForm.getId());
}
List<Property> propList = getPropertiesFromEntityForm(entityForm);
Property[] properties = new Property[propList.size()];
properties = propList.toArray(properties);
Entity entity = new Entity();
entity.setProperties(properties);
String entityType = entityForm.getEntityType();
if (StringUtils.isEmpty(entityType)) {
entityType = entityForm.getCeilingEntityClassname();
}
entity.setType(new String[] { entityType });
PersistencePackageRequest ppr = PersistencePackageRequest.standard()
.withEntity(entity)
.withCustomCriteria(customCriteria)
.withCeilingEntityClassname(entityForm.getCeilingEntityClassname());
return ppr;
}
@Override
public Entity getAdvancedCollectionRecord(ClassMetadata containingClassMetadata, Entity containingEntity,
Property collectionProperty, String collectionItemId)
throws ServiceException {
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(collectionProperty.getMetadata());
FieldMetadata md = collectionProperty.getMetadata();
String containingEntityId = getContextSpecificRelationshipId(containingClassMetadata, containingEntity,
collectionProperty.getName());
Entity entity = null;
if (md instanceof AdornedTargetCollectionMetadata) {
FilterAndSortCriteria fasc = new FilterAndSortCriteria(ppr.getAdornedList().getCollectionFieldName());
fasc.setFilterValue(containingEntityId);
ppr.addFilterAndSortCriteria(fasc);
fasc = new FilterAndSortCriteria(ppr.getAdornedList().getCollectionFieldName() + "Target");
fasc.setFilterValue(collectionItemId);
ppr.addFilterAndSortCriteria(fasc);
Entity[] entities = fetch(ppr).getRecords();
Assert.isTrue(entities != null && entities.length == 1, "Entity not found");
entity = entities[0];
} else if (md instanceof MapMetadata) {
MapMetadata mmd = (MapMetadata) md;
FilterAndSortCriteria fasc = new FilterAndSortCriteria(ppr.getForeignKey().getManyToField());
fasc.setFilterValue(containingEntityId);
ppr.addFilterAndSortCriteria(fasc);
Entity[] entities = fetch(ppr).getRecords();
for (Entity e : entities) {
String idProperty = getIdProperty(containingClassMetadata);
if (mmd.isSimpleValue()) {
idProperty = "key";
}
Property p = e.getPMap().get(idProperty);
if (p.getValue().equals(collectionItemId)) {
entity = e;
break;
}
}
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was not an " +
"advanced collection field.", collectionProperty.getName(), containingClassMetadata.getCeilingType()));
}
if (entity == null) {
throw new NoResultException(String.format("Could not find record for class [%s], field [%s], main entity id " +
"[%s], collection entity id [%s]", containingClassMetadata.getCeilingType(),
collectionProperty.getName(), containingEntityId, collectionItemId));
}
return entity;
}
@Override
public DynamicResultSet getRecordsForCollection(ClassMetadata containingClassMetadata, Entity containingEntity,
Property collectionProperty, FilterAndSortCriteria[] fascs, Integer startIndex, Integer maxIndex)
throws ServiceException {
return getRecordsForCollection(containingClassMetadata, containingEntity, collectionProperty, fascs, startIndex,
maxIndex, null);
}
@Override
public DynamicResultSet getRecordsForCollection(ClassMetadata containingClassMetadata, Entity containingEntity,
Property collectionProperty, FilterAndSortCriteria[] fascs, Integer startIndex, Integer maxIndex,
String idValueOverride) throws ServiceException {
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(collectionProperty.getMetadata())
.withFilterAndSortCriteria(fascs)
.withStartIndex(startIndex)
.withMaxIndex(maxIndex);
FilterAndSortCriteria fasc;
FieldMetadata md = collectionProperty.getMetadata();
if (md instanceof BasicCollectionMetadata) {
fasc = new FilterAndSortCriteria(ppr.getForeignKey().getManyToField());
} else if (md instanceof AdornedTargetCollectionMetadata) {
fasc = new FilterAndSortCriteria(ppr.getAdornedList().getCollectionFieldName());
} else if (md instanceof MapMetadata) {
fasc = new FilterAndSortCriteria(ppr.getForeignKey().getManyToField());
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was not a " +
"collection field.", collectionProperty.getName(), containingClassMetadata.getCeilingType()));
}
String id;
if (idValueOverride == null) {
id = getContextSpecificRelationshipId(containingClassMetadata, containingEntity, collectionProperty.getName());
} else {
id = idValueOverride;
}
fasc.setFilterValue(id);
ppr.addFilterAndSortCriteria(fasc);
return fetch(ppr);
}
@Override
public Map<String, DynamicResultSet> getRecordsForAllSubCollections(PersistencePackageRequest ppr, Entity containingEntity)
throws ServiceException {
Map<String, DynamicResultSet> map = new HashMap<String, DynamicResultSet>();
ClassMetadata cmd = getClassMetadata(ppr);
for (Property p : cmd.getProperties()) {
if (p.getMetadata() instanceof CollectionMetadata) {
DynamicResultSet drs = getRecordsForCollection(cmd, containingEntity, p, null, null, null);
map.put(p.getName(), drs);
}
}
return map;
}
@Override
public Entity addSubCollectionEntity(EntityForm entityForm, ClassMetadata mainMetadata, Property field,
Entity parentEntity)
throws ServiceException, ClassNotFoundException {
// Assemble the properties from the entity form
List<Property> properties = new ArrayList<Property>();
for (Entry<String, Field> entry : entityForm.getFields().entrySet()) {
Property p = new Property();
p.setName(entry.getKey());
p.setValue(entry.getValue().getValue());
properties.add(p);
}
FieldMetadata md = field.getMetadata();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(md)
.withEntity(new Entity());
if (md instanceof BasicCollectionMetadata) {
BasicCollectionMetadata fmd = (BasicCollectionMetadata) md;
ppr.getEntity().setType(new String[] { entityForm.getEntityType() });
// If we're looking up an entity instead of trying to create one on the fly, let's make sure
// that we're not changing the target entity at all and only creating the association to the id
if (fmd.getAddMethodType().equals(AddMethodType.LOOKUP)) {
List<String> fieldsToRemove = new ArrayList<String>();
String idProp = getIdProperty(mainMetadata);
for (String key : entityForm.getFields().keySet()) {
if (!idProp.equals(key)) {
fieldsToRemove.add(key);
}
}
for (String key : fieldsToRemove) {
ListIterator<Property> li = properties.listIterator();
while (li.hasNext()) {
if (li.next().getName().equals(key)) {
li.remove();
}
}
}
ppr.setValidateUnsubmittedProperties(false);
}
Property fp = new Property();
fp.setName(ppr.getForeignKey().getManyToField());
fp.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(fp);
} else if (md instanceof AdornedTargetCollectionMetadata) {
ppr.getEntity().setType(new String[] { ppr.getAdornedList().getAdornedTargetEntityClassname() });
String[] maintainedFields = ((AdornedTargetCollectionMetadata) md).getMaintainedAdornedTargetFields();
if (maintainedFields == null || maintainedFields.length == 0) {
ppr.setValidateUnsubmittedProperties(false);
}
} else if (md instanceof MapMetadata) {
ppr.getEntity().setType(new String[] { entityForm.getEntityType() });
Property p = new Property();
p.setName("symbolicId");
p.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(p);
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was" +
" not a collection field.", field.getName(), mainMetadata.getCeilingType()));
}
ppr.setCeilingEntityClassname(ppr.getEntity().getType()[0]);
Property[] propArr = new Property[properties.size()];
properties.toArray(propArr);
ppr.getEntity().setProperties(propArr);
return add(ppr);
}
@Override
public Entity updateSubCollectionEntity(EntityForm entityForm, ClassMetadata mainMetadata, Property field,
Entity parentEntity, String collectionItemId)
throws ServiceException, ClassNotFoundException {
List<Property> properties = getPropertiesFromEntityForm(entityForm);
FieldMetadata md = field.getMetadata();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(md)
.withEntity(new Entity());
if (md instanceof BasicCollectionMetadata) {
BasicCollectionMetadata fmd = (BasicCollectionMetadata) md;
ppr.getEntity().setType(new String[] { fmd.getCollectionCeilingEntity() });
Property fp = new Property();
fp.setName(ppr.getForeignKey().getManyToField());
fp.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(fp);
} else if (md instanceof AdornedTargetCollectionMetadata) {
ppr.getEntity().setType(new String[] { ppr.getAdornedList().getAdornedTargetEntityClassname() });
} else if (md instanceof MapMetadata) {
ppr.getEntity().setType(new String[] { entityForm.getEntityType() });
Property p = new Property();
p.setName("symbolicId");
p.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(p);
} else {
throw new IllegalArgumentException(String.format("The specified field [%s] for class [%s] was" +
" not a collection field.", field.getName(), mainMetadata.getCeilingType()));
}
ppr.setCeilingEntityClassname(ppr.getEntity().getType()[0]);
Property p = new Property();
p.setName(entityForm.getIdProperty());
p.setValue(collectionItemId);
properties.add(p);
Property[] propArr = new Property[properties.size()];
properties.toArray(propArr);
ppr.getEntity().setProperties(propArr);
return update(ppr);
}
@Override
public void removeSubCollectionEntity(ClassMetadata mainMetadata, Property field, Entity parentEntity, String itemId,
String priorKey)
throws ServiceException {
List<Property> properties = new ArrayList<Property>();
Property p;
String parentId = getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName());
Entity entity = new Entity();
PersistencePackageRequest ppr = PersistencePackageRequest.fromMetadata(field.getMetadata())
.withEntity(entity);
if (field.getMetadata() instanceof BasicCollectionMetadata) {
BasicCollectionMetadata fmd = (BasicCollectionMetadata) field.getMetadata();
p = new Property();
p.setName("id");
p.setValue(itemId);
properties.add(p);
p = new Property();
p.setName(ppr.getForeignKey().getManyToField());
p.setValue(parentId);
properties.add(p);
entity.setType(new String[] { fmd.getCollectionCeilingEntity() });
} else if (field.getMetadata() instanceof AdornedTargetCollectionMetadata) {
AdornedTargetList adornedList = ppr.getAdornedList();
p = new Property();
p.setName(adornedList.getLinkedObjectPath() + "." + adornedList.getLinkedIdProperty());
p.setValue(parentId);
properties.add(p);
p = new Property();
p.setName(adornedList.getTargetObjectPath() + "." + adornedList.getTargetIdProperty());
p.setValue(itemId);
properties.add(p);
entity.setType(new String[] { adornedList.getAdornedTargetEntityClassname() });
} else if (field.getMetadata() instanceof MapMetadata) {
MapMetadata fmd = (MapMetadata) field.getMetadata();
p = new Property();
p.setName("symbolicId");
p.setValue(getContextSpecificRelationshipId(mainMetadata, parentEntity, field.getName()));
properties.add(p);
p = new Property();
p.setName("priorKey");
p.setValue(priorKey);
properties.add(p);
MapStructure mapStructure = ppr.getMapStructure();
p = new Property();
p.setName(mapStructure.getKeyPropertyName());
p.setValue(itemId);
properties.add(p);
entity.setType(new String[] { fmd.getTargetClass() });
}
Property[] propArr = new Property[properties.size()];
properties.toArray(propArr);
ppr.getEntity().setProperties(propArr);
remove(ppr);
}
@Override
public String getContextSpecificRelationshipId(ClassMetadata cmd, Entity entity, String propertyName) {
String prefix;
if (propertyName.contains(".")) {
prefix = propertyName.substring(0, propertyName.lastIndexOf("."));
} else {
prefix = "";
}
if (prefix.equals("")) {
return entity.findProperty("id").getValue();
} else {
//we need to check all the parts of the prefix. For example, the prefix could include an @Embedded class like
//defaultSku.dimension. In this case, we want the id from the defaultSku property, since the @Embedded does
//not have an id property - nor should it.
String[] prefixParts = prefix.split("\\.");
for (int j = 0; j < prefixParts.length; j++) {
StringBuilder sb = new StringBuilder();
for (int x = 0; x < prefixParts.length - j; x++) {
sb.append(prefixParts[x]);
if (x < prefixParts.length - j - 1) {
sb.append(".");
}
}
String tempPrefix = sb.toString();
for (Property property : entity.getProperties()) {
if (property.getName().startsWith(tempPrefix)) {
BasicFieldMetadata md = (BasicFieldMetadata) cmd.getPMap().get(property.getName()).getMetadata();
if (md.getFieldType().equals(SupportedFieldType.ID)) {
return property.getValue();
}
}
}
}
}
if (!prefix.contains(".")) {
//this may be an embedded class directly on the root entity (e.g. embeddablePriceList.restrictedPriceLists on OfferImpl)
return entity.findProperty("id").getValue();
}
throw new RuntimeException("Unable to establish a relationship id");
}
@Override
public String getIdProperty(ClassMetadata cmd) throws ServiceException {
for (Property p : cmd.getProperties()) {
if (p.getMetadata() instanceof BasicFieldMetadata) {
BasicFieldMetadata fmd = (BasicFieldMetadata) p.getMetadata();
//check for ID type and also make sure the field we're looking at is not a "ToOne" association
if (SupportedFieldType.ID.equals(fmd.getFieldType()) && !p.getName().contains(".")) {
return p.getName();
}
}
}
throw new ServiceException("Could not determine ID field for " + cmd.getCeilingType());
}
protected Entity add(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
try {
return service.add(pkg);
} catch (ValidationException e) {
return e.getEntity();
}
}
protected Entity update(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
try {
return service.update(pkg);
} catch (ValidationException e) {
return e.getEntity();
}
}
protected DynamicResultSet inspect(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
return service.inspect(pkg);
}
protected void remove(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
service.remove(pkg);
}
protected DynamicResultSet fetch(PersistencePackageRequest request)
throws ServiceException {
PersistencePackage pkg = persistencePackageFactory.create(request);
CriteriaTransferObject cto = getDefaultCto();
if (request.getFilterAndSortCriteria() != null) {
cto.addAll(Arrays.asList(request.getFilterAndSortCriteria()));
}
if (request.getStartIndex() == null) {
cto.setFirstResult(0);
} else {
cto.setFirstResult(request.getStartIndex());
}
if (request.getMaxIndex() != null) {
int requestedMaxResults = request.getMaxIndex() - request.getStartIndex() + 1;
if (requestedMaxResults >= 0 && requestedMaxResults < cto.getMaxResults()) {
cto.setMaxResults(requestedMaxResults);
}
}
return service.fetch(pkg, cto);
}
protected CriteriaTransferObject getDefaultCto() {
CriteriaTransferObject cto = new CriteriaTransferObject();
cto.setMaxResults(50);
return cto;
}
} | 1no label
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_AdminEntityServiceImpl.java |
986 | public static class Tab {
public static class Name {
public static final String OrderItems = "OrderImpl_Order_Items_Tab";
}
public static class Order {
public static final int OrderItems = 2000;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_DiscreteOrderItemImpl.java |
424 | @Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.FIELD})
public @interface OptionFilterParam {
/**
* <p>The field name in the target entity class that should be used to refine the query (i.e. sql where clause). The
* param can be "." delimited in standard bean property fashion. For example, the preferred way of referring to
* DataDrivenEnumerationValueImpl instances belonging to a particular instance of DataDrivenEnumerationImpl is by
* specifying the param value as follows:</p>
*
* <p>param="type.key"</p>
*
* @see org.broadleafcommerce.common.enumeration.domain.DataDrivenEnumerationValueImpl
* @return the field name with which to refine the query
*/
String param();
/**
* <p>The field value that should match for any items returned from the query</p>
*
* @return the field match value
*/
String value();
/**
* <p>This is the type for the value stored in this OptionFilterParam annotation. The system will use this type
* to properly convert the String value to the correct type when executing the query.</p>
*
* @return the final type for the param value
*/
OptionFilterParamType paramType();
} | 0true
| common_src_main_java_org_broadleafcommerce_common_presentation_OptionFilterParam.java |
205 | IPropertyChangeListener fontChangeListener = new IPropertyChangeListener() {
@Override
public void propertyChange(PropertyChangeEvent event) {
if (event.getProperty().equals(EDITOR_FONT_PREFERENCE)) {
updateFontAndCaret();
}
}
}; | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_CeylonEditor.java |
240 | @SuppressWarnings("deprecation")
public static abstract class Configuration extends LogBackedXaDataSource.Configuration
{
public static final Setting<Boolean> read_only= GraphDatabaseSettings.read_only;
public static final Setting<File> store_dir = InternalAbstractGraphDatabase.Configuration.store_dir;
public static final Setting<File> neo_store = InternalAbstractGraphDatabase.Configuration.neo_store;
public static final Setting<File> logical_log = InternalAbstractGraphDatabase.Configuration.logical_log;
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java |
1,610 | threadPool.generic().execute(new Runnable() {
@Override
public void run() {
timeoutUpdateTask.onFailure(task.source, new ProcessClusterEventTimeoutException(timeoutUpdateTask.timeout(), task.source));
}
}); | 0true
| src_main_java_org_elasticsearch_cluster_service_InternalClusterService.java |
81 | public final class ClientEndpoint implements Client {
private final ClientEngineImpl clientEngine;
private final Connection conn;
private final ConcurrentMap<String, TransactionContext> transactionContextMap
= new ConcurrentHashMap<String, TransactionContext>();
private final List<Runnable> removeListenerActions = Collections.synchronizedList(new LinkedList<Runnable>());
private final SocketAddress socketAddress;
private String uuid;
private LoginContext loginContext;
private ClientPrincipal principal;
private boolean firstConnection;
private volatile boolean authenticated;
ClientEndpoint(ClientEngineImpl clientEngine, Connection conn, String uuid) {
this.clientEngine = clientEngine;
this.conn = conn;
if (conn instanceof TcpIpConnection) {
TcpIpConnection tcpIpConnection = (TcpIpConnection) conn;
socketAddress = tcpIpConnection.getSocketChannelWrapper().socket().getRemoteSocketAddress();
} else {
socketAddress = null;
}
this.uuid = uuid;
}
Connection getConnection() {
return conn;
}
@Override
public String getUuid() {
return uuid;
}
public boolean live() {
return conn.live();
}
void setLoginContext(LoginContext loginContext) {
this.loginContext = loginContext;
}
public Subject getSubject() {
return loginContext != null ? loginContext.getSubject() : null;
}
public boolean isFirstConnection() {
return firstConnection;
}
void authenticated(ClientPrincipal principal, boolean firstConnection) {
this.principal = principal;
this.uuid = principal.getUuid();
this.firstConnection = firstConnection;
this.authenticated = true;
}
public boolean isAuthenticated() {
return authenticated;
}
public ClientPrincipal getPrincipal() {
return principal;
}
@Override
public InetSocketAddress getSocketAddress() {
return (InetSocketAddress) socketAddress;
}
@Override
public ClientType getClientType() {
switch (conn.getType()) {
case JAVA_CLIENT:
return ClientType.JAVA;
case CSHARP_CLIENT:
return ClientType.CSHARP;
case CPP_CLIENT:
return ClientType.CPP;
case PYTHON_CLIENT:
return ClientType.PYTHON;
case RUBY_CLIENT:
return ClientType.RUBY;
case BINARY_CLIENT:
return ClientType.OTHER;
default:
throw new IllegalArgumentException("Invalid connection type: " + conn.getType());
}
}
public TransactionContext getTransactionContext(String txnId) {
final TransactionContext transactionContext = transactionContextMap.get(txnId);
if (transactionContext == null) {
throw new TransactionException("No transaction context found for txnId:" + txnId);
}
return transactionContext;
}
public void setTransactionContext(TransactionContext transactionContext) {
transactionContextMap.put(transactionContext.getTxnId(), transactionContext);
}
public void removeTransactionContext(String txnId) {
transactionContextMap.remove(txnId);
}
public void setListenerRegistration(final String service, final String topic, final String id) {
removeListenerActions.add(new Runnable() {
@Override
public void run() {
EventService eventService = clientEngine.getEventService();
eventService.deregisterListener(service, topic, id);
}
});
}
public void setDistributedObjectListener(final String id) {
removeListenerActions.add(new Runnable() {
@Override
public void run() {
clientEngine.getProxyService().removeProxyListener(id);
}
});
}
public void clearAllListeners() {
for (Runnable removeAction : removeListenerActions) {
try {
removeAction.run();
} catch (Exception e) {
getLogger().warning("Exception during destroy action", e);
}
}
removeListenerActions.clear();
}
void destroy() throws LoginException {
for (Runnable removeAction : removeListenerActions) {
try {
removeAction.run();
} catch (Exception e) {
getLogger().warning("Exception during destroy action", e);
}
}
LoginContext lc = loginContext;
if (lc != null) {
lc.logout();
}
for (TransactionContext context : transactionContextMap.values()) {
Transaction transaction = TransactionAccessor.getTransaction(context);
if (context.isXAManaged() && transaction.getState() == PREPARED) {
TransactionManagerServiceImpl transactionManager =
(TransactionManagerServiceImpl) clientEngine.getTransactionManagerService();
transactionManager.addTxBackupLogForClientRecovery(transaction);
} else {
try {
context.rollbackTransaction();
} catch (HazelcastInstanceNotActiveException e) {
getLogger().finest(e);
} catch (Exception e) {
getLogger().warning(e);
}
}
}
authenticated = false;
}
private ILogger getLogger() {
return clientEngine.getLogger(getClass());
}
public void sendResponse(Object response, int callId) {
boolean isError = false;
Object clientResponseObject;
if (response == null) {
clientResponseObject = ClientEngineImpl.NULL;
} else if (response instanceof Throwable) {
isError = true;
ClientExceptionConverter converter = ClientExceptionConverters.get(getClientType());
clientResponseObject = converter.convert((Throwable) response);
} else {
clientResponseObject = response;
}
ClientResponse clientResponse = new ClientResponse(clientEngine.toData(clientResponseObject), isError, callId);
clientEngine.sendResponse(this, clientResponse);
}
public void sendEvent(Object event, int callId) {
Data data = clientEngine.toData(event);
clientEngine.sendResponse(this, new ClientResponse(data, callId, true));
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ClientEndpoint{");
sb.append("conn=").append(conn);
sb.append(", uuid='").append(uuid).append('\'');
sb.append(", firstConnection=").append(firstConnection);
sb.append(", authenticated=").append(authenticated);
sb.append('}');
return sb.toString();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_client_ClientEndpoint.java |
552 | public final class OClusterPositionLong extends OClusterPosition {
private final long value;
public OClusterPositionLong(long value) {
this.value = value;
}
public long getValue() {
return value;
}
@Override
public OClusterPosition inc() {
return new OClusterPositionLong(value + 1);
}
@Override
public OClusterPosition dec() {
return new OClusterPositionLong(value - 1);
}
@Override
public boolean isValid() {
return value != -1;
}
@Override
public boolean isPersistent() {
return value > -1;
}
@Override
public boolean isNew() {
return value < 0;
}
@Override
public boolean isTemporary() {
return value < -1;
}
@Override
public byte[] toStream() {
final byte[] content = new byte[OLongSerializer.LONG_SIZE];
OLongSerializer.INSTANCE.serialize(value, content, 0);
return content;
}
@Override
public int compareTo(OClusterPosition otherPosition) {
final OClusterPositionLong otherLongPosition = (OClusterPositionLong) otherPosition;
if (value > otherLongPosition.value)
return 1;
else if (value < otherLongPosition.value)
return -1;
else
return 0;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
OClusterPositionLong that = (OClusterPositionLong) o;
if (value != that.value)
return false;
return true;
}
@Override
public int hashCode() {
return (int) (value ^ (value >>> 32));
}
@Override
public String toString() {
return Long.toString(value);
}
@Override
public int intValue() {
return (int) value;
}
@Override
public long longValue() {
return value;
}
/**
* This method return same value as longValue because high long and low long are the same.
* @return same value as longValue().
*/
@Override
public long longValueHigh() {
return value;
}
@Override
public float floatValue() {
return value;
}
@Override
public double doubleValue() {
return value;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_id_OClusterPositionLong.java |
1,402 | public abstract class OMVRBTreeProviderAbstract<K, V> implements OMVRBTreeProvider<K, V>, OSerializableStream {
private static final long serialVersionUID = 1L;
protected final String clusterName;
protected final int clusterId;
protected final ORecordInternal<?> record;
protected final OStorage storage;
protected int size;
protected int pageSize;
protected ORecordId root;
protected int keySize = 1;
public OMVRBTreeProviderAbstract(final ORecordInternal<?> iRecord, final OStorage iStorage, final String iClusterName) {
storage = iStorage;
clusterName = iClusterName;
if (storage != null) {
if (clusterName != null)
clusterId = storage.getClusterIdByName(iClusterName);
else
clusterId = storage.getClusterIdByName(OMetadataDefault.CLUSTER_INDEX_NAME);
} else {
// CLUSTER ID NOT USED FOR DATABASE INDEX
clusterId = -1;
}
record = iRecord;
record.setIdentity(new ORecordId());
updateConfig();
}
public int getKeySize() {
return keySize;
}
public void setKeySize(int keySize) {
this.keySize = keySize;
}
public int getSize() {
return size;
}
public int getDefaultPageSize() {
return pageSize;
}
public int getClusterId() {
return clusterId;
}
public ORID getRoot() {
return root;
}
public boolean setSize(final int iSize) {
if (iSize != size) {
size = iSize;
return setDirty();
}
return false;
}
public boolean setRoot(final ORID iRid) {
if (root == null)
root = new ORecordId();
if (iRid == null)
root.reset();
else if (!iRid.equals(root))
root.copyFrom(iRid);
return setDirty();
}
public boolean isDirty() {
return record.isDirty();
}
/**
* Set the tree as dirty. This happens on change of root.
*
* @return
*/
public boolean setDirty() {
if (record.isDirty())
return false;
record.setDirty();
return true;
}
public boolean updateConfig() {
boolean isChanged = false;
int newSize = OGlobalConfiguration.MVRBTREE_NODE_PAGE_SIZE.getValueAsInteger();
if (newSize != pageSize) {
pageSize = newSize;
isChanged = true;
}
return isChanged ? setDirty() : false;
}
public void load() {
if (storage == null)
load(getDatabase());
else
load(storage);
}
protected void load(final ODatabaseRecord iDb) {
if (!record.getIdentity().isValid())
return;
record.reload();
fromStream(record.toStream());
}
protected void load(final OStorage iSt) {
if (!record.getIdentity().isValid())
// NOTHING TO LOAD
return;
ORawBuffer raw = iSt.readRecord((ORecordId) record.getIdentity(), null, false, null, false).getResult();
if (raw == null)
throw new OConfigurationException("Cannot load map with id " + record.getIdentity());
record.getRecordVersion().copyFrom(raw.version);
fromStream(raw.buffer);
}
protected void save(final ODatabaseRecord iDb) {
for (int i = 0; i < 3; ++i)
try {
record.fromStream(toStream());
record.setDirty();
record.save(clusterName);
break;
} catch (OConcurrentModificationException e) {
record.reload();
}
}
public void save() {
if (storage == null)
save(getDatabase());
else
save(storage);
}
protected void save(final OStorage iSt) {
record.fromStream(toStream());
if (record.getIdentity().isValid())
// UPDATE IT WITHOUT VERSION CHECK SINCE ALL IT'S LOCKED
record.getRecordVersion().copyFrom(
iSt.updateRecord((ORecordId) record.getIdentity(), record.toStream(),
OVersionFactory.instance().createUntrackedVersion(), record.getRecordType(), (byte) 0, null).getResult());
else {
// CREATE IT
if (record.getIdentity().getClusterId() == ORID.CLUSTER_ID_INVALID)
((ORecordId) record.getIdentity()).clusterId = clusterId;
final OPhysicalPosition ppos = iSt.createRecord(0, (ORecordId) record.getIdentity(), record.toStream(),
OVersionFactory.instance().createVersion(), record.getRecordType(), (byte) 0, null).getResult();
record.getRecordVersion().copyFrom(ppos.recordVersion);
}
record.unsetDirty();
}
public void delete() {
if (storage == null)
delete(getDatabase());
else
delete(storage);
root = null;
}
protected void delete(final ODatabaseRecord iDb) {
for (int i = 0; i < 3; ++i)
try {
iDb.delete(record);
break;
} catch (OConcurrentModificationException e) {
record.reload();
}
}
protected void delete(final OStorage iSt) {
iSt.deleteRecord((ORecordId) record.getIdentity(), record.getRecordVersion(), (byte) 0, null);
}
public String toString() {
return "index " + record.getIdentity();
}
@Override
public int hashCode() {
final ORID rid = record.getIdentity();
return rid == null ? 0 : rid.hashCode();
}
public ORecord<?> getRecord() {
return record;
}
protected static ODatabaseRecord getDatabase() {
return ODatabaseRecordThreadLocal.INSTANCE.get();
}
public String getClusterName() {
return clusterName;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_type_tree_provider_OMVRBTreeProviderAbstract.java |
1,282 | @Test
public class LocalPaginatedClusterTest {
private static final int RECORD_SYSTEM_INFORMATION = 2 * OByteSerializer.BYTE_SIZE + OIntegerSerializer.INT_SIZE
+ OLongSerializer.LONG_SIZE;
public OPaginatedCluster paginatedCluster = new OPaginatedCluster();
protected String buildDirectory;
protected ODiskCache diskCache;
@BeforeClass
public void beforeClass() throws IOException {
System.out.println("Start LocalPaginatedClusterTest");
buildDirectory = System.getProperty("buildDirectory");
if (buildDirectory == null || buildDirectory.isEmpty())
buildDirectory = ".";
buildDirectory += "/localPaginatedClusterTest";
OLocalPaginatedStorage storage = mock(OLocalPaginatedStorage.class);
OStorageConfiguration storageConfiguration = mock(OStorageConfiguration.class);
storageConfiguration.clusters = new ArrayList<OStorageClusterConfiguration>();
storageConfiguration.fileTemplate = new OStorageSegmentConfiguration();
when(storageConfiguration.getDirectory()).thenReturn(buildDirectory);
diskCache = new OReadWriteDiskCache(400L * 1024 * 1024 * 1024, 2648L * 1024 * 1024,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * 1024, 1000000, 100, storage, null, false, false);
OStorageVariableParser variableParser = new OStorageVariableParser(buildDirectory);
when(storage.getDiskCache()).thenReturn(diskCache);
when(storage.getVariableParser()).thenReturn(variableParser);
when(storage.getConfiguration()).thenReturn(storageConfiguration);
when(storage.getMode()).thenReturn("rw");
when(storage.getStoragePath()).thenReturn(buildDirectory);
when(storageConfiguration.getDirectory()).thenReturn(buildDirectory);
paginatedCluster.configure(storage, 5, "paginatedClusterTest", buildDirectory, -1);
paginatedCluster.create(-1);
}
@AfterClass
public void afterClass() throws IOException {
paginatedCluster.delete();
diskCache.delete();
File file = new File(buildDirectory);
Assert.assertTrue(file.delete());
System.out.println("End LocalPaginatedClusterTest");
}
@BeforeMethod
public void beforeMethod() throws IOException {
paginatedCluster.truncate();
}
public void testDeleteRecordAndAddNewOnItsPlace() throws IOException {
byte[] smallRecord = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
paginatedCluster.deleteRecord(physicalPosition.clusterPosition);
physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(1));
recordVersion.increment();
Assert.assertEquals(physicalPosition.recordVersion, recordVersion);
}
public void testAddOneSmallRecord() throws IOException {
byte[] smallRecord = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
ORawBuffer rawBuffer = paginatedCluster.readRecord(physicalPosition.clusterPosition);
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, smallRecord);
Assert.assertEquals(rawBuffer.recordType, 1);
}
public void testAddOneBigRecord() throws IOException {
byte[] bigRecord = new byte[2 * 65536 + 100];
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast();
mersenneTwisterFast.nextBytes(bigRecord);
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
ORawBuffer rawBuffer = paginatedCluster.readRecord(physicalPosition.clusterPosition);
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, bigRecord);
Assert.assertEquals(rawBuffer.recordType, 1);
}
public void testAddManySmallRecords() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testAddManySmallRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(OClusterPage.MAX_RECORD_SIZE - 1) + 1;
byte[] smallRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(smallRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, smallRecord);
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
public void testAddManyBigRecords() throws IOException {
final int records = 5000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testAddManyBigRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + OClusterPage.MAX_RECORD_SIZE + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, bigRecord);
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
public void testAddManyRecords() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testAddManyRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] smallRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(smallRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, smallRecord);
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
public void testRemoveHalfSmallRecords() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testRemoveHalfSmallRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(OClusterPage.MAX_RECORD_SIZE - 1) + 1;
byte[] smallRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(smallRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, smallRecord);
}
int deletedRecords = 0;
Assert.assertEquals(records, paginatedCluster.getEntries());
Set<OClusterPosition> deletedPositions = new HashSet<OClusterPosition>();
Iterator<OClusterPosition> positionIterator = positionRecordMap.keySet().iterator();
while (positionIterator.hasNext()) {
OClusterPosition clusterPosition = positionIterator.next();
if (mersenneTwisterFast.nextBoolean()) {
deletedPositions.add(clusterPosition);
Assert.assertTrue(paginatedCluster.deleteRecord(clusterPosition));
deletedRecords++;
Assert.assertEquals(records - deletedRecords, paginatedCluster.getEntries());
positionIterator.remove();
}
}
Assert.assertEquals(paginatedCluster.getEntries(), records - deletedRecords);
for (OClusterPosition deletedPosition : deletedPositions) {
Assert.assertNull(paginatedCluster.readRecord(deletedPosition));
Assert.assertFalse(paginatedCluster.deleteRecord(deletedPosition));
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
public void testRemoveHalfBigRecords() throws IOException {
final int records = 5000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testRemoveHalfBigRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + OClusterPage.MAX_RECORD_SIZE + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, bigRecord);
}
int deletedRecords = 0;
Assert.assertEquals(records, paginatedCluster.getEntries());
Set<OClusterPosition> deletedPositions = new HashSet<OClusterPosition>();
Iterator<OClusterPosition> positionIterator = positionRecordMap.keySet().iterator();
while (positionIterator.hasNext()) {
OClusterPosition clusterPosition = positionIterator.next();
if (mersenneTwisterFast.nextBoolean()) {
deletedPositions.add(clusterPosition);
Assert.assertTrue(paginatedCluster.deleteRecord(clusterPosition));
deletedRecords++;
Assert.assertEquals(records - deletedRecords, paginatedCluster.getEntries());
positionIterator.remove();
}
}
Assert.assertEquals(paginatedCluster.getEntries(), records - deletedRecords);
for (OClusterPosition deletedPosition : deletedPositions) {
Assert.assertNull(paginatedCluster.readRecord(deletedPosition));
Assert.assertFalse(paginatedCluster.deleteRecord(deletedPosition));
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
public void testRemoveHalfRecords() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testRemoveHalfRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(3 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, bigRecord);
}
int deletedRecords = 0;
Assert.assertEquals(records, paginatedCluster.getEntries());
Set<OClusterPosition> deletedPositions = new HashSet<OClusterPosition>();
Iterator<OClusterPosition> positionIterator = positionRecordMap.keySet().iterator();
while (positionIterator.hasNext()) {
OClusterPosition clusterPosition = positionIterator.next();
if (mersenneTwisterFast.nextBoolean()) {
deletedPositions.add(clusterPosition);
Assert.assertTrue(paginatedCluster.deleteRecord(clusterPosition));
deletedRecords++;
Assert.assertEquals(records - deletedRecords, paginatedCluster.getEntries());
positionIterator.remove();
}
}
Assert.assertEquals(paginatedCluster.getEntries(), records - deletedRecords);
for (OClusterPosition deletedPosition : deletedPositions) {
Assert.assertNull(paginatedCluster.readRecord(deletedPosition));
Assert.assertFalse(paginatedCluster.deleteRecord(deletedPosition));
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
public void testRemoveHalfRecordsAndAddAnotherHalfAgain() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testRemoveHalfRecordsAndAddAnotherHalfAgain seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(3 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, bigRecord);
}
int deletedRecords = 0;
Assert.assertEquals(records, paginatedCluster.getEntries());
Iterator<OClusterPosition> positionIterator = positionRecordMap.keySet().iterator();
while (positionIterator.hasNext()) {
OClusterPosition clusterPosition = positionIterator.next();
if (mersenneTwisterFast.nextBoolean()) {
Assert.assertTrue(paginatedCluster.deleteRecord(clusterPosition));
deletedRecords++;
Assert.assertEquals(paginatedCluster.getEntries(), records - deletedRecords);
positionIterator.remove();
}
}
Assert.assertEquals(paginatedCluster.getEntries(), records - deletedRecords);
for (int i = 0; i < records / 2; i++) {
int recordSize = mersenneTwisterFast.nextInt(3 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, bigRecord);
}
Assert.assertEquals(paginatedCluster.getEntries(), (long) (1.5 * records - deletedRecords));
}
public void testUpdateOneSmallRecord() throws IOException {
byte[] smallRecord = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
recordVersion.increment();
smallRecord = new byte[] { 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3 };
paginatedCluster.updateRecord(physicalPosition.clusterPosition, smallRecord, recordVersion, (byte) 2);
ORawBuffer rawBuffer = paginatedCluster.readRecord(physicalPosition.clusterPosition);
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, smallRecord);
Assert.assertEquals(rawBuffer.recordType, 2);
}
public void testUpdateOneSmallRecordVersionIsLowerCurrentOne() throws IOException {
byte[] smallRecord = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
ORecordVersion updateRecordVersion = OVersionFactory.instance().createVersion();
updateRecordVersion.increment();
smallRecord = new byte[] { 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3 };
paginatedCluster.updateRecord(physicalPosition.clusterPosition, smallRecord, updateRecordVersion, (byte) 2);
ORawBuffer rawBuffer = paginatedCluster.readRecord(physicalPosition.clusterPosition);
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, smallRecord);
Assert.assertEquals(rawBuffer.recordType, 2);
}
public void testUpdateOneSmallRecordVersionIsMinusTwo() throws IOException {
byte[] smallRecord = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
ORecordVersion updateRecordVersion = OVersionFactory.instance().createVersion();
updateRecordVersion.setCounter(-2);
smallRecord = new byte[] { 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3 };
paginatedCluster.updateRecord(physicalPosition.clusterPosition, smallRecord, updateRecordVersion, (byte) 2);
ORawBuffer rawBuffer = paginatedCluster.readRecord(physicalPosition.clusterPosition);
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, smallRecord);
Assert.assertEquals(rawBuffer.recordType, 2);
}
public void testUpdateOneBigRecord() throws IOException {
byte[] bigRecord = new byte[2 * 65536 + 100];
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast();
mersenneTwisterFast.nextBytes(bigRecord);
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 1);
Assert.assertEquals(physicalPosition.clusterPosition, OClusterPositionFactory.INSTANCE.valueOf(0));
recordVersion.increment();
bigRecord = new byte[2 * 65536 + 20];
mersenneTwisterFast.nextBytes(bigRecord);
paginatedCluster.updateRecord(physicalPosition.clusterPosition, bigRecord, recordVersion, (byte) 2);
ORawBuffer rawBuffer = paginatedCluster.readRecord(physicalPosition.clusterPosition);
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.buffer, bigRecord);
Assert.assertEquals(rawBuffer.recordType, 2);
}
public void testUpdateManySmallRecords() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testUpdateManySmallRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
Set<OClusterPosition> updatedPositions = new HashSet<OClusterPosition>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(OClusterPage.MAX_RECORD_SIZE - 1) + 1;
byte[] smallRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(smallRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(smallRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, smallRecord);
}
ORecordVersion newRecordVersion = OVersionFactory.instance().createVersion();
newRecordVersion.copyFrom(recordVersion);
newRecordVersion.increment();
for (OClusterPosition clusterPosition : positionRecordMap.keySet()) {
if (mersenneTwisterFast.nextBoolean()) {
int recordSize = mersenneTwisterFast.nextInt(OClusterPage.MAX_RECORD_SIZE - 1) + 1;
byte[] smallRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(smallRecord);
paginatedCluster.updateRecord(clusterPosition, smallRecord, newRecordVersion, (byte) 3);
positionRecordMap.put(clusterPosition, smallRecord);
updatedPositions.add(clusterPosition);
}
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
if (updatedPositions.contains(entry.getKey())) {
Assert.assertEquals(rawBuffer.version, newRecordVersion);
Assert.assertEquals(rawBuffer.recordType, 3);
} else {
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
}
public void testUpdateManyBigRecords() throws IOException {
final int records = 5000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testUpdateManyBigRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
Set<OClusterPosition> updatedPositions = new HashSet<OClusterPosition>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + OClusterPage.MAX_RECORD_SIZE + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(bigRecord, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, bigRecord);
}
ORecordVersion newRecordVersion = OVersionFactory.instance().createVersion();
newRecordVersion.copyFrom(recordVersion);
newRecordVersion.increment();
for (OClusterPosition clusterPosition : positionRecordMap.keySet()) {
if (mersenneTwisterFast.nextBoolean()) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + OClusterPage.MAX_RECORD_SIZE + 1;
byte[] bigRecord = new byte[recordSize];
mersenneTwisterFast.nextBytes(bigRecord);
paginatedCluster.updateRecord(clusterPosition, bigRecord, newRecordVersion, (byte) 3);
positionRecordMap.put(clusterPosition, bigRecord);
updatedPositions.add(clusterPosition);
}
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
if (updatedPositions.contains(entry.getKey())) {
Assert.assertEquals(rawBuffer.version, newRecordVersion);
Assert.assertEquals(rawBuffer.recordType, 3);
} else {
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
}
public void testUpdateManyRecords() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testUpdateManyRecords seed : " + seed);
Map<OClusterPosition, byte[]> positionRecordMap = new HashMap<OClusterPosition, byte[]>();
Set<OClusterPosition> updatedPositions = new HashSet<OClusterPosition>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] record = new byte[recordSize];
mersenneTwisterFast.nextBytes(record);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(record, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, record);
}
ORecordVersion newRecordVersion = OVersionFactory.instance().createVersion();
newRecordVersion.copyFrom(recordVersion);
newRecordVersion.increment();
for (OClusterPosition clusterPosition : positionRecordMap.keySet()) {
if (mersenneTwisterFast.nextBoolean()) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] record = new byte[recordSize];
mersenneTwisterFast.nextBytes(record);
paginatedCluster.updateRecord(clusterPosition, record, newRecordVersion, (byte) 3);
positionRecordMap.put(clusterPosition, record);
updatedPositions.add(clusterPosition);
}
}
for (Map.Entry<OClusterPosition, byte[]> entry : positionRecordMap.entrySet()) {
ORawBuffer rawBuffer = paginatedCluster.readRecord(entry.getKey());
Assert.assertNotNull(rawBuffer);
Assert.assertEquals(rawBuffer.buffer, entry.getValue());
if (updatedPositions.contains(entry.getKey())) {
Assert.assertEquals(rawBuffer.version, newRecordVersion);
Assert.assertEquals(rawBuffer.recordType, 3);
} else {
Assert.assertEquals(rawBuffer.version, recordVersion);
Assert.assertEquals(rawBuffer.recordType, 2);
}
}
}
public void testForwardIteration() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testForwardIteration seed : " + seed);
NavigableMap<OClusterPosition, byte[]> positionRecordMap = new TreeMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] record = new byte[recordSize];
mersenneTwisterFast.nextBytes(record);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(record, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, record);
}
Iterator<OClusterPosition> positionIterator = positionRecordMap.keySet().iterator();
while (positionIterator.hasNext()) {
OClusterPosition clusterPosition = positionIterator.next();
if (mersenneTwisterFast.nextBoolean()) {
Assert.assertTrue(paginatedCluster.deleteRecord(clusterPosition));
positionIterator.remove();
}
}
OPhysicalPosition physicalPosition = new OPhysicalPosition();
physicalPosition.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(0);
OPhysicalPosition[] positions = paginatedCluster.ceilingPositions(physicalPosition);
Assert.assertTrue(positions.length > 0);
int counter = 0;
for (OClusterPosition testedPosition : positionRecordMap.keySet()) {
Assert.assertTrue(positions.length > 0);
Assert.assertEquals(positions[0].clusterPosition, testedPosition);
OPhysicalPosition positionToFind = positions[0];
positions = paginatedCluster.higherPositions(positionToFind);
counter++;
}
Assert.assertEquals(paginatedCluster.getEntries(), counter);
Assert.assertEquals(paginatedCluster.getFirstPosition(), positionRecordMap.firstKey());
Assert.assertEquals(paginatedCluster.getLastPosition(), positionRecordMap.lastKey());
}
public void testBackwardIteration() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(1381162033616L);
System.out.println("testBackwardIteration seed : " + seed);
NavigableMap<OClusterPosition, byte[]> positionRecordMap = new TreeMap<OClusterPosition, byte[]>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] record = new byte[recordSize];
mersenneTwisterFast.nextBytes(record);
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(record, recordVersion, (byte) 2);
positionRecordMap.put(physicalPosition.clusterPosition, record);
}
Iterator<OClusterPosition> positionIterator = positionRecordMap.keySet().iterator();
while (positionIterator.hasNext()) {
OClusterPosition clusterPosition = positionIterator.next();
if (mersenneTwisterFast.nextBoolean()) {
Assert.assertTrue(paginatedCluster.deleteRecord(clusterPosition));
positionIterator.remove();
}
}
OPhysicalPosition physicalPosition = new OPhysicalPosition();
physicalPosition.clusterPosition = OClusterPositionFactory.INSTANCE.valueOf(Long.MAX_VALUE);
OPhysicalPosition[] positions = paginatedCluster.floorPositions(physicalPosition);
Assert.assertTrue(positions.length > 0);
positionIterator = positionRecordMap.descendingKeySet().iterator();
int counter = 0;
while (positionIterator.hasNext()) {
Assert.assertTrue(positions.length > 0);
OClusterPosition testedPosition = positionIterator.next();
Assert.assertEquals(positions[positions.length - 1].clusterPosition, testedPosition);
OPhysicalPosition positionToFind = positions[positions.length - 1];
positions = paginatedCluster.lowerPositions(positionToFind);
counter++;
}
Assert.assertEquals(paginatedCluster.getEntries(), counter);
Assert.assertEquals(paginatedCluster.getFirstPosition(), positionRecordMap.firstKey());
Assert.assertEquals(paginatedCluster.getLastPosition(), positionRecordMap.lastKey());
}
public void testGetPhysicalPosition() throws IOException {
final int records = 10000;
long seed = System.currentTimeMillis();
MersenneTwisterFast mersenneTwisterFast = new MersenneTwisterFast(seed);
System.out.println("testGetPhysicalPosition seed : " + seed);
Set<OPhysicalPosition> positions = new HashSet<OPhysicalPosition>();
ORecordVersion recordVersion = OVersionFactory.instance().createVersion();
recordVersion.increment();
recordVersion.increment();
for (int i = 0; i < records; i++) {
int recordSize = mersenneTwisterFast.nextInt(2 * OClusterPage.MAX_RECORD_SIZE) + 1;
byte[] record = new byte[recordSize];
mersenneTwisterFast.nextBytes(record);
recordVersion.increment();
final OPhysicalPosition physicalPosition = paginatedCluster.createRecord(record, recordVersion, (byte) i);
positions.add(physicalPosition);
}
Set<OPhysicalPosition> removedPositions = new HashSet<OPhysicalPosition>();
for (OPhysicalPosition position : positions) {
OPhysicalPosition physicalPosition = new OPhysicalPosition();
physicalPosition.clusterPosition = position.clusterPosition;
physicalPosition = paginatedCluster.getPhysicalPosition(physicalPosition);
Assert.assertEquals(physicalPosition.clusterPosition, position.clusterPosition);
Assert.assertEquals(physicalPosition.recordType, position.recordType);
Assert.assertEquals(physicalPosition.recordSize, position.recordSize);
Assert.assertEquals(physicalPosition.dataSegmentPos, position.dataSegmentPos);
Assert.assertEquals(physicalPosition.dataSegmentId, position.dataSegmentId);
if (mersenneTwisterFast.nextBoolean()) {
paginatedCluster.deleteRecord(position.clusterPosition);
removedPositions.add(position);
}
}
for (OPhysicalPosition position : positions) {
OPhysicalPosition physicalPosition = new OPhysicalPosition();
physicalPosition.clusterPosition = position.clusterPosition;
physicalPosition = paginatedCluster.getPhysicalPosition(physicalPosition);
if (removedPositions.contains(position))
Assert.assertNull(physicalPosition);
else {
Assert.assertEquals(physicalPosition.clusterPosition, position.clusterPosition);
Assert.assertEquals(physicalPosition.recordType, position.recordType);
Assert.assertEquals(physicalPosition.recordSize, position.recordSize);
Assert.assertEquals(physicalPosition.dataSegmentPos, position.dataSegmentPos);
Assert.assertEquals(physicalPosition.dataSegmentId, position.dataSegmentId);
}
}
}
public void testCompressionNothing() throws Exception {
paginatedCluster.set(OCluster.ATTRIBUTES.COMPRESSION, ONothingCompression.NAME);
paginatedCluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 1);
byte[] record = new byte[100];
Random random = new Random();
random.nextBytes(record);
OPhysicalPosition physicalPosition = paginatedCluster
.createRecord(record, OVersionFactory.instance().createVersion(), (byte) 1);
OCacheEntry cacheEntry = diskCache.load(1, 1, false);
OCachePointer pagePointer = cacheEntry.getCachePointer();
OClusterPage page = new OClusterPage(pagePointer.getDataPointer(), false, ODurablePage.TrackMode.NONE);
int recordIndex = (int) (physicalPosition.clusterPosition.longValue() & 0xFFFF);
int recordPageOffset = page.getRecordPageOffset(recordIndex);
byte[] storedEntity = page.getBinaryValue(recordPageOffset, page.getRecordSize(recordIndex));
byte[] storedRecord = new byte[100];
System.arraycopy(storedEntity, OIntegerSerializer.INT_SIZE + OByteSerializer.BYTE_SIZE, storedRecord, 0, storedRecord.length);
Assert.assertEquals(storedRecord, record);
diskCache.release(cacheEntry);
}
public void testCompressionSnappy() throws Exception {
paginatedCluster.set(OCluster.ATTRIBUTES.COMPRESSION, OSnappyCompression.NAME);
paginatedCluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 1);
byte[] record = new byte[100];
Random random = new Random();
random.nextBytes(record);
OPhysicalPosition physicalPosition = paginatedCluster
.createRecord(record, OVersionFactory.instance().createVersion(), (byte) 1);
record = OSnappyCompression.INSTANCE.compress(record);
OCacheEntry cacheEntry = diskCache.load(1, 1, false);
OCachePointer pagePointer = cacheEntry.getCachePointer();
int recordIndex = (int) (physicalPosition.clusterPosition.longValue() & 0xFFFF);
OClusterPage page = new OClusterPage(pagePointer.getDataPointer(), false, ODurablePage.TrackMode.NONE);
int recordPageOffset = page.getRecordPageOffset(recordIndex);
byte[] storedEntity = page.getBinaryValue(recordPageOffset, page.getRecordSize(recordIndex));
byte[] storedRecord = new byte[record.length];
System.arraycopy(storedEntity, OIntegerSerializer.INT_SIZE + OByteSerializer.BYTE_SIZE, storedRecord, 0, storedRecord.length);
Assert.assertEquals(storedRecord, record);
diskCache.release(cacheEntry);
}
public void testRecordGrowFactor() throws Exception {
paginatedCluster.set(OCluster.ATTRIBUTES.COMPRESSION, ONothingCompression.NAME);
paginatedCluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 1.5);
byte[] record = new byte[100];
Random random = new Random();
random.nextBytes(record);
OPhysicalPosition physicalPosition = paginatedCluster
.createRecord(record, OVersionFactory.instance().createVersion(), (byte) 1);
OCacheEntry cacheEntry = diskCache.load(1, 1, false);
OCachePointer pagePointer = cacheEntry.getCachePointer();
OClusterPage page = new OClusterPage(pagePointer.getDataPointer(), false, ODurablePage.TrackMode.NONE);
int recordIndex = (int) (physicalPosition.clusterPosition.longValue() & 0xFFFF);
Assert.assertEquals(page.getRecordSize(recordIndex), ((int) (record.length * 1.5)) + RECORD_SYSTEM_INFORMATION);
diskCache.release(cacheEntry);
paginatedCluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 2);
physicalPosition = paginatedCluster.createRecord(record, OVersionFactory.instance().createVersion(), (byte) 1);
recordIndex = (int) (physicalPosition.clusterPosition.longValue() & 0xFFFF);
cacheEntry = diskCache.load(1, 1, false);
pagePointer = cacheEntry.getCachePointer();
page = new OClusterPage(pagePointer.getDataPointer(), false, ODurablePage.TrackMode.NONE);
Assert.assertEquals(page.getRecordSize(recordIndex), record.length * 2 + RECORD_SYSTEM_INFORMATION);
diskCache.release(cacheEntry);
}
public void testRecordOverflowGrowFactor() throws Exception {
paginatedCluster.set(OCluster.ATTRIBUTES.COMPRESSION, ONothingCompression.NAME);
paginatedCluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 1.5);
paginatedCluster.set(OCluster.ATTRIBUTES.RECORD_OVERFLOW_GROW_FACTOR, 2.5);
byte[] record = new byte[100];
Random random = new Random();
random.nextBytes(record);
ORecordVersion version = OVersionFactory.instance().createVersion();
OPhysicalPosition physicalPosition = paginatedCluster.createRecord(record, version, (byte) 1);
record = new byte[150];
random.nextBytes(record);
paginatedCluster.updateRecord(physicalPosition.clusterPosition, record, version, (byte) 1);
OCacheEntry cacheEntry = diskCache.load(1, 1, false);
int recordIndex = (int) (physicalPosition.clusterPosition.longValue() & 0xFFFF);
OCachePointer pagePointer = cacheEntry.getCachePointer();
OClusterPage page = new OClusterPage(pagePointer.getDataPointer(), false, ODurablePage.TrackMode.NONE);
Assert.assertEquals(page.getRecordSize(recordIndex), record.length + RECORD_SYSTEM_INFORMATION);
diskCache.release(cacheEntry);
record = new byte[200];
random.nextBytes(record);
paginatedCluster.updateRecord(physicalPosition.clusterPosition, record, version, (byte) 1);
cacheEntry = diskCache.load(1, 1, false);
pagePointer = cacheEntry.getCachePointer();
page = new OClusterPage(pagePointer.getDataPointer(), false, ODurablePage.TrackMode.NONE);
int fullContentSize = 500 + OIntegerSerializer.INT_SIZE + OByteSerializer.BYTE_SIZE; // type + real size
Assert.assertEquals(page.getRecordSize(recordIndex), 150 + RECORD_SYSTEM_INFORMATION);
fullContentSize -= 150 + RECORD_SYSTEM_INFORMATION - OByteSerializer.BYTE_SIZE - OLongSerializer.LONG_SIZE;
Assert.assertEquals(page.getRecordSize(recordIndex + 1), fullContentSize
+ (OByteSerializer.BYTE_SIZE + OLongSerializer.LONG_SIZE));
diskCache.release(cacheEntry);
}
} | 0true
| core_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedClusterTest.java |
32 | public class MetaDiskBuffer extends MetaDataBuffer {
private static final Logger LOGGER = LoggerFactory.getLogger(MetaDiskBuffer.class);
private FastDiskBufferEnv metaEnv;
private EntityStore metaDatabase;
private final Timer updateTimer;
public MetaDiskBuffer() {
this(new FastDiskBufferEnv(null));
}
public MetaDiskBuffer(FastDiskBufferEnv env) {
super(env);
metaEnv = env;
metaDatabase = metaEnv.openMetaDiskStore();
loadAllPartitionsInformation();
long metaRefreshTime = metaEnv.getMetaRefresh();
if (metaRefreshTime == -1) {
updateTimer = null;
} else {
updateTimer = new Timer("Meta Data Buffer Update timer");
updateTimer.schedule(new TimerTask() {
@Override
public void run() {
for (int i = 0; i < partitionMetaDatas.length; i++) {
if (partitionMetaDatas[i] != null) {
writePartitionMetaData(i);
}
}
}
}, metaRefreshTime, metaRefreshTime);
}
}
private PrimaryIndex<Integer, PartitionMetaData> getMetaStoreIndex() {
ClassLoader originalClassloader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
return metaDatabase.getPrimaryIndex(Integer.class, PartitionMetaData.class);
} finally {
Thread.currentThread().setContextClassLoader(originalClassloader);
}
}
private void loadAllPartitionsInformation() {
try {
PrimaryIndex<Integer, PartitionMetaData> pi = getMetaStoreIndex();
if (pi.count() == 0) {
writeCurrentBufferPartition(0);
return;
}
EntityCursor<PartitionMetaData> piCursor = pi.entities();
try {
for (PartitionMetaData pObj : piCursor) {
partitionMetaDatas[pObj.getPartitionId()] = pObj;
if (pObj.isCurrentPartition()) {
this.currentPartition = pObj.getPartitionId();
}
}
} finally {
if (piCursor != null) {
piCursor.close();
}
}
} catch (Exception e) {
LOGGER.error("Exception in loadAllPartitionInformation", e);
}
}
public PartitionMetaData removePartitionMetaData(int bufferPartition) {
PartitionMetaData pObj = super.removePartitionMetaData(bufferPartition);
if (pObj == null) { return null; }
try {
getMetaStoreIndex().delete(pObj.getPartitionId());
} catch (Exception e) {
LOGGER.error("Exception in getData", e);
} finally {
metaEnv.flush();
LOGGER.info("Removing partition {} timestamp", bufferPartition);
}
return pObj;
}
public Set<String> resetPartitionMetaData(int bufferPartition) {
Set<String> rowoverFeedIDs = super.resetPartitionMetaData(bufferPartition);
PartitionMetaData pObj = getPartitionMetaData(bufferPartition);
if (pObj != null) {
try {
getMetaStoreIndex().putNoReturn(pObj);
} catch (Exception e) {
LOGGER.error("Exception in getData", e);
} finally {
metaEnv.flush();
LOGGER.info("Removing partition {} timestamp", bufferPartition);
}
}
return rowoverFeedIDs;
}
@Override
public void writePartitionMetaData(int bufferPartition) {
PartitionMetaData pObj = getPartitionMetaData(bufferPartition);
if (pObj == null) {
return;
}
try {
getMetaStoreIndex().putNoReturn(pObj);
} catch (Exception e) {
LOGGER.error("Exception in getData", e);
} finally {
metaEnv.flush();
LOGGER.debug("Putting start time and end time of partition {}", bufferPartition);
}
}
@Override
public void writeCurrentBufferPartition(int newCurrentBufferPartition) {
PartitionMetaData existingPartitionMetaData = getPartitionMetaData(this.currentPartition);
PartitionMetaData newPartitionMetaData = getPartitionMetaData(newCurrentBufferPartition);
if (existingPartitionMetaData != null) {
existingPartitionMetaData.setCurrentPartition(false);
}
if (newPartitionMetaData == null) {
newPartitionMetaData = new PartitionMetaData(newCurrentBufferPartition);
synchronized(this) {
this.partitionMetaDatas[newCurrentBufferPartition] = newPartitionMetaData;
}
}
newPartitionMetaData.setCurrentPartition(true);
boolean failed = false;
try {
if (existingPartitionMetaData != null) {
getMetaStoreIndex().putNoReturn(existingPartitionMetaData);
}
getMetaStoreIndex().putNoReturn(newPartitionMetaData);
} catch (Exception e) {
LOGGER.error("Exception in getData", e);
failed = true;
} finally {
if (!failed) {
metaEnv.flush();
this.currentPartition = newCurrentBufferPartition;
LOGGER.info("moving to partition {}", newCurrentBufferPartition);
}
}
}
public void close() {
metaEnv.closeDatabase(metaDatabase);
super.close();
}
public void closeDatabase() {
metaEnv.closeDatabase(metaDatabase);
super.closeDatabase();
}
public void restart() {
int numOfBufferPartitions = metaEnv.getNumOfBufferPartitions();
for (int i=0; i<numOfBufferPartitions; i++) {
removePartitionMetaData(i);
}
super.restart();
writeCurrentBufferPartition(0);
}
} | 0true
| timeSequenceFeedAggregator_src_main_java_gov_nasa_arc_mct_buffer_disk_internal_MetaDiskBuffer.java |
1,748 | PLANE() {
@Override
public double calculate(double sourceLatitude, double sourceLongitude, double targetLatitude, double targetLongitude, DistanceUnit unit) {
double px = targetLongitude - sourceLongitude;
double py = targetLatitude - sourceLatitude;
return Math.sqrt(px * px + py * py) * unit.getDistancePerDegree();
}
@Override
public double normalize(double distance, DistanceUnit unit) {
return distance;
}
@Override
public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
return new PlaneFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
}
}, | 0true
| src_main_java_org_elasticsearch_common_geo_GeoDistance.java |
49 | class ParameterContextValidator
implements IContextInformationValidator, IContextInformationPresenter {
private int position;
private IContextInformation information;
private int currentParameter;
private CeylonEditor editor;
ParameterContextValidator(CeylonEditor editor) {
this.editor = editor;
}
@Override
public boolean updatePresentation(int brokenPosition,
TextPresentation presentation) {
String s = information.getInformationDisplayString();
presentation.clear();
if (this.position==-1) {
presentation.addStyleRange(new StyleRange(0, s.length(),
null, null, SWT.BOLD));
addItalics(presentation, s);
return true;
}
int currentParameter = -1;
CeylonSourceViewer viewer = editor.getCeylonSourceViewer();
int position = viewer.getSelectedRange().x;
IDocument doc = viewer.getDocument();
try {
boolean namedInvocation = doc.getChar(this.position)=='{';
if (!namedInvocation) Assert.isTrue(doc.getChar(this.position)=='(');
// int paren = doc.get(this.position, position-this.position)
// .indexOf(namedInvocation?'{':'(');
// if (paren<0) { //TODO: is this really useful?
// this.position = doc.get(0, position).lastIndexOf('(');
// }
currentParameter = getCharCount(doc,
this.position+1, position,
namedInvocation?";":",", "", true);
}
catch (BadLocationException x) {
return false;
}
if (currentParameter != -1) {
if (this.currentParameter == currentParameter) {
return false;
}
}
presentation.clear();
this.currentParameter = currentParameter;
int[] commas = computeCommaPositions(s);
if (commas.length - 2 < currentParameter) {
presentation.addStyleRange(new StyleRange(0, s.length(),
null, null, SWT.NORMAL));
addItalics(presentation, s);
return true;
}
int start = commas[currentParameter] + 1;
int end = commas[currentParameter + 1];
if (start > 0) {
presentation.addStyleRange(new StyleRange(0, start,
null, null, SWT.NORMAL));
}
if (end > start) {
presentation.addStyleRange(new StyleRange(start, end - start,
null, null, SWT.BOLD));
}
if (end < s.length()) {
presentation.addStyleRange(new StyleRange(end, s.length() - end,
null, null, SWT.NORMAL));
}
addItalics(presentation, s);
return true;
}
private void addItalics(TextPresentation presentation, String s) {
Matcher m2 = p2.matcher(s);
while (m2.find()) {
presentation.mergeStyleRange(new StyleRange(m2.start(), m2.end()-m2.start(),
null, null, SWT.ITALIC));
}
// Matcher m1 = p1.matcher(s);
// while (m1.find()) {
// presentation.mergeStyleRange(new StyleRange(m1.start(), m1.end()-m1.start()+1,
// typeColor, null));
// }
}
// final Pattern p1 = Pattern.compile("\\b\\p{javaUpperCase}\\w*\\b");
final Pattern p2 = Pattern.compile("\\b\\p{javaLowerCase}\\w*\\b");
// final Color typeColor = color(getCurrentTheme().getColorRegistry(), TYPES);
@Override
public void install(IContextInformation info, ITextViewer viewer,
int documentPosition) {
if (info instanceof InvocationCompletionProposal.ParameterContextInformation) {
ParameterContextInformation pci =
(InvocationCompletionProposal.ParameterContextInformation) info;
this.position = pci.getArgumentListOffset();
}
else {
this.position = -1;
}
Assert.isTrue(viewer==editor.getCeylonSourceViewer());
this.information = info;
this.currentParameter= -1;
}
@Override
public boolean isContextInformationValid(int brokenPosition) {
if (editor.isInLinkedMode()) {
Object linkedModeOwner = editor.getLinkedModeOwner();
if (linkedModeOwner instanceof InvocationCompletionProposal ||
linkedModeOwner instanceof RefinementCompletionProposal) {
return true;
}
}
try {
CeylonSourceViewer viewer = editor.getCeylonSourceViewer();
int position = viewer.getSelectedRange().x;
if (position < this.position) {
return false;
}
IDocument document = viewer.getDocument();
IRegion line =
document.getLineInformationOfOffset(this.position);
if (position < line.getOffset() ||
position >= document.getLength()) {
return false;
}
// System.out.println(document.get(this.position, position-this.position));
int semiCount = getCharCount(document, this.position, position, ";", "", true);
int fenceCount = getCharCount(document, this.position, position, "{(", "})", false);
return semiCount==0 && fenceCount>0;
}
catch (BadLocationException x) {
return false;
}
}
/*@Override
public boolean isContextInformationValid(int offset) {
IContextInformation[] infos= computeContextInformation(viewer, offset);
if (infos != null && infos.length > 0) {
for (int i= 0; i < infos.length; i++)
if (information.equals(infos[i]))
return true;
}
return false;
}*/
private static final int NONE = 0;
private static final int BRACKET = 1;
private static final int BRACE = 2;
private static final int PAREN = 3;
private static final int ANGLE = 4;
private static int getCharCount(IDocument document,
final int start, final int end,
String increments, String decrements,
boolean considerNesting)
throws BadLocationException {
Assert.isTrue((increments.length() != 0 || decrements.length() != 0)
&& !increments.equals(decrements));
int nestingMode = NONE;
int nestingLevel = 0;
int charCount = 0;
int offset = start;
char prev = ' ';
while (offset < end) {
char curr = document.getChar(offset++);
switch (curr) {
case '/':
if (offset < end) {
char next = document.getChar(offset);
if (next == '*') {
// a comment starts, advance to the comment end
offset= getCommentEnd(document, offset + 1, end);
}
else if (next == '/') {
// '//'-comment: nothing to do anymore on this line
int nextLine= document.getLineOfOffset(offset) + 1;
if (nextLine == document.getNumberOfLines()) {
offset= end;
}
else {
offset= document.getLineOffset(nextLine);
}
}
}
break;
case '*':
if (offset < end) {
char next= document.getChar(offset);
if (next == '/') {
// we have been in a comment: forget what we read before
charCount= 0;
++ offset;
}
}
break;
case '"':
case '\'':
offset= getStringEnd(document, offset, end, curr);
break;
case '[':
if (considerNesting) {
if (nestingMode == BRACKET || nestingMode == NONE) {
nestingMode= BRACKET;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case ']':
if (considerNesting) {
if (nestingMode == BRACKET) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
//$FALL-THROUGH$
case '(':
if (considerNesting) {
if (nestingMode == ANGLE) {
// generics heuristic failed
nestingMode=PAREN;
nestingLevel= 1;
}
if (nestingMode == PAREN || nestingMode == NONE) {
nestingMode= PAREN;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case ')':
if (considerNesting) {
if (nestingMode == PAREN) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
//$FALL-THROUGH$
case '{':
if (considerNesting) {
if (nestingMode == ANGLE) {
// generics heuristic failed
nestingMode=BRACE;
nestingLevel= 1;
}
if (nestingMode == BRACE || nestingMode == NONE) {
nestingMode= BRACE;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case '}':
if (considerNesting) {
if (nestingMode == BRACE) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
//$FALL-THROUGH$
case '<':
if (considerNesting) {
if (nestingMode == ANGLE || nestingMode == NONE
/*&& checkGenericsHeuristic(document, offset - 1, start - 1)*/) {
nestingMode= ANGLE;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case '>':
if (considerNesting
&& prev != '=') { //check that it's not a fat arrow
if (nestingMode == ANGLE) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
//$FALL-THROUGH$
default:
if (nestingLevel==0) {
if (increments.indexOf(curr) >= 0) {
++ charCount;
}
if (decrements.indexOf(curr) >= 0) {
-- charCount;
}
}
}
}
return charCount;
}
static int findCharCount(int count, IDocument document,
final int start, final int end,
String increments, String decrements,
boolean considerNesting)
throws BadLocationException {
Assert.isTrue((increments.length() != 0 || decrements.length() != 0)
&& !increments.equals(decrements));
final int NONE= 0;
final int BRACKET= 1;
final int BRACE= 2;
final int PAREN= 3;
final int ANGLE= 4;
int nestingMode= NONE;
int nestingLevel= 0;
int charCount= 0;
int offset= start;
boolean lastWasEquals = false;
while (offset < end) {
if (nestingLevel == 0) {
if (count==charCount) {
return offset-1;
}
}
char curr= document.getChar(offset++);
switch (curr) {
case '/':
if (offset < end) {
char next= document.getChar(offset);
if (next == '*') {
// a comment starts, advance to the comment end
offset= getCommentEnd(document, offset + 1, end);
}
else if (next == '/') {
// '//'-comment: nothing to do anymore on this line
int nextLine= document.getLineOfOffset(offset) + 1;
if (nextLine == document.getNumberOfLines()) {
offset= end;
}
else {
offset= document.getLineOffset(nextLine);
}
}
}
break;
case '*':
if (offset < end) {
char next= document.getChar(offset);
if (next == '/') {
// we have been in a comment: forget what we read before
charCount= 0;
++ offset;
}
}
break;
case '"':
case '\'':
offset= getStringEnd(document, offset, end, curr);
break;
case '[':
if (considerNesting) {
if (nestingMode == BRACKET || nestingMode == NONE) {
nestingMode= BRACKET;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case ']':
if (considerNesting) {
if (nestingMode == BRACKET)
if (--nestingLevel == 0) {
nestingMode= NONE;
}
break;
}
//$FALL-THROUGH$
case '(':
if (considerNesting) {
if (nestingMode == ANGLE) {
// generics heuristic failed
nestingMode=PAREN;
nestingLevel= 1;
}
if (nestingMode == PAREN || nestingMode == NONE) {
nestingMode= PAREN;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case ')':
if (considerNesting) {
if (nestingMode == 0) {
return offset-1;
}
if (nestingMode == PAREN) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
//$FALL-THROUGH$
case '{':
if (considerNesting) {
if (nestingMode == ANGLE) {
// generics heuristic failed
nestingMode=BRACE;
nestingLevel= 1;
}
if (nestingMode == BRACE || nestingMode == NONE) {
nestingMode= BRACE;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case '}':
if (considerNesting) {
if (nestingMode == 0) {
return offset-1;
}
if (nestingMode == BRACE) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
//$FALL-THROUGH$
case '<':
if (considerNesting) {
if (nestingMode == ANGLE || nestingMode == NONE /*&& checkGenericsHeuristic(document, offset - 1, start - 1)*/) {
nestingMode= ANGLE;
nestingLevel++;
}
break;
}
//$FALL-THROUGH$
case '>':
if (!lastWasEquals) {
if (nestingMode == 0) {
return offset-1;
}
if (considerNesting) {
if (nestingMode == ANGLE) {
if (--nestingLevel == 0) {
nestingMode= NONE;
}
}
break;
}
}
//$FALL-THROUGH$
default:
if (nestingLevel == 0) {
if (increments.indexOf(curr) >= 0) {
++ charCount;
}
if (decrements.indexOf(curr) >= 0) {
-- charCount;
}
}
}
lastWasEquals = curr=='=';
}
return -1;
}
private static int[] computeCommaPositions(String code) {
final int length= code.length();
int pos = 0;
int angleLevel = 0;
List<Integer> positions= new ArrayList<Integer>();
positions.add(new Integer(-1));
char prev = ' ';
while (pos < length && pos != -1) {
char ch = code.charAt(pos);
switch (ch) {
case ',':
case ';':
if (angleLevel == 0) {
positions.add(new Integer(pos));
}
break;
case '<':
case '(':
case '{':
case '[':
angleLevel++;
break;
case '>':
if (prev=='=') break;
case ')':
case '}':
case ']':
angleLevel--;
break;
// case '[':
// pos= code.indexOf(']', pos);
// break;
default:
break;
}
if (pos != -1) {
pos++;
}
}
positions.add(new Integer(length));
int[] fields= new int[positions.size()];
for (int i= 0; i < fields.length; i++) {
fields[i]= positions.get(i).intValue();
}
return fields;
}
private static int getCommentEnd(IDocument d, int pos, int end)
throws BadLocationException {
while (pos < end) {
char curr= d.getChar(pos);
pos++;
if (curr == '*') {
if (pos < end && d.getChar(pos) == '/') {
return pos + 1;
}
}
}
return end;
}
private static int getStringEnd(IDocument d, int pos, int end, char ch)
throws BadLocationException {
while (pos < end) {
char curr= d.getChar(pos);
pos++;
if (curr == '\\') {
// ignore escaped characters
pos++;
}
else if (curr == ch) {
return pos;
}
}
return end;
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_ParameterContextValidator.java |
739 | public class CollectionSizeRequest extends CollectionRequest {
public CollectionSizeRequest() {
}
public CollectionSizeRequest(String name) {
super(name);
}
@Override
protected Operation prepareOperation() {
return new CollectionSizeOperation(name);
}
@Override
public int getClassId() {
return CollectionPortableHook.COLLECTION_SIZE;
}
@Override
public String getRequiredAction() {
return ActionConstants.ACTION_READ;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_collection_client_CollectionSizeRequest.java |
1,624 | public class TimedMemberStateFactory {
private final HazelcastInstanceImpl instance;
private final int maxVisibleInstanceCount;
public TimedMemberStateFactory(HazelcastInstanceImpl instance) {
this.instance = instance;
maxVisibleInstanceCount = instance.node.groupProperties.MC_MAX_INSTANCE_COUNT.getInteger();
}
public TimedMemberState createTimedMemberState() {
MemberStateImpl memberState = new MemberStateImpl();
createMemberState(memberState);
GroupConfig groupConfig = instance.getConfig().getGroupConfig();
TimedMemberState timedMemberState = new TimedMemberState();
timedMemberState.setMaster(instance.node.isMaster());
if (timedMemberState.getMaster()) {
timedMemberState.setMemberList(new ArrayList<String>());
Set<Member> memberSet = instance.getCluster().getMembers();
for (Member member : memberSet) {
MemberImpl memberImpl = (MemberImpl) member;
Address address = memberImpl.getAddress();
timedMemberState.getMemberList().add(address.getHost() + ":" + address.getPort());
}
}
timedMemberState.setMemberState(memberState);
timedMemberState.setClusterName(groupConfig.getName());
timedMemberState.setInstanceNames(getLongInstanceNames());
return timedMemberState;
}
private void createMemberState(MemberStateImpl memberState) {
final Node node = instance.node;
memberState.setAddress(node.getThisAddress());
PartitionService partitionService = instance.getPartitionService();
Set<Partition> partitions = partitionService.getPartitions();
memberState.clearPartitions();
for (Partition partition : partitions) {
if (partition.getOwner() != null && partition.getOwner().localMember()) {
memberState.addPartition(partition.getPartitionId());
}
}
Collection<DistributedObject> proxyObjects = new ArrayList<DistributedObject>(instance.getDistributedObjects());
createRuntimeProps(memberState);
createMemState(memberState, proxyObjects);
}
private void createRuntimeProps(MemberStateImpl memberState) {
Runtime runtime = Runtime.getRuntime();
ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
ClassLoadingMXBean clMxBean = ManagementFactory.getClassLoadingMXBean();
MemoryMXBean memoryMxBean = ManagementFactory.getMemoryMXBean();
MemoryUsage heapMemory = memoryMxBean.getHeapMemoryUsage();
MemoryUsage nonHeapMemory = memoryMxBean.getNonHeapMemoryUsage();
Map<String, Long> map = new HashMap<String, Long>();
map.put("runtime.availableProcessors", Integer.valueOf(runtime.availableProcessors()).longValue());
map.put("date.startTime", runtimeMxBean.getStartTime());
map.put("seconds.upTime", runtimeMxBean.getUptime());
map.put("memory.maxMemory", runtime.maxMemory());
map.put("memory.freeMemory", runtime.freeMemory());
map.put("memory.totalMemory", runtime.totalMemory());
map.put("memory.heapMemoryMax", heapMemory.getMax());
map.put("memory.heapMemoryUsed", heapMemory.getUsed());
map.put("memory.nonHeapMemoryMax", nonHeapMemory.getMax());
map.put("memory.nonHeapMemoryUsed", nonHeapMemory.getUsed());
map.put("runtime.totalLoadedClassCount", clMxBean.getTotalLoadedClassCount());
map.put("runtime.loadedClassCount", Integer.valueOf(clMxBean.getLoadedClassCount()).longValue());
map.put("runtime.unloadedClassCount", clMxBean.getUnloadedClassCount());
map.put("runtime.totalStartedThreadCount", threadMxBean.getTotalStartedThreadCount());
map.put("runtime.threadCount", Integer.valueOf(threadMxBean.getThreadCount()).longValue());
map.put("runtime.peakThreadCount", Integer.valueOf(threadMxBean.getPeakThreadCount()).longValue());
map.put("runtime.daemonThreadCount", Integer.valueOf(threadMxBean.getDaemonThreadCount()).longValue());
OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
map.put("osMemory.freePhysicalMemory", get(osMxBean, "getFreePhysicalMemorySize", 0L));
map.put("osMemory.committedVirtualMemory", get(osMxBean, "getCommittedVirtualMemorySize", 0L));
map.put("osMemory.totalPhysicalMemory", get(osMxBean, "getTotalPhysicalMemorySize", 0L));
map.put("osSwap.freeSwapSpace", get(osMxBean, "getFreeSwapSpaceSize", 0L));
map.put("osSwap.totalSwapSpace", get(osMxBean, "getTotalSwapSpaceSize", 0L));
map.put("os.maxFileDescriptorCount", get(osMxBean, "getMaxFileDescriptorCount", 0L));
map.put("os.openFileDescriptorCount", get(osMxBean, "getOpenFileDescriptorCount", 0L));
map.put("os.processCpuLoad", get(osMxBean, "getProcessCpuLoad", -1L));
map.put("os.systemLoadAverage", get(osMxBean, "getSystemLoadAverage", -1L));
map.put("os.systemCpuLoad", get(osMxBean, "getSystemCpuLoad", -1L));
map.put("os.processCpuTime", get(osMxBean, "getProcessCpuTime", 0L));
map.put("os.availableProcessors", get(osMxBean, "getAvailableProcessors", 0L));
memberState.setRuntimeProps(map);
}
private static Long get(OperatingSystemMXBean mbean, String methodName, Long defaultValue) {
try {
Method method = mbean.getClass().getMethod(methodName);
method.setAccessible(true);
Object value = method.invoke(mbean);
if (value == null) {
return defaultValue;
}
if (value instanceof Integer) {
return (long) (Integer) value;
}
if (value instanceof Double) {
double v = (Double) value;
return Math.round(v * 100);
}
if (value instanceof Long) {
return (Long) value;
}
return defaultValue;
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
return defaultValue;
}
}
private void createMemState(MemberStateImpl memberState,
Collection<DistributedObject> distributedObjects) {
int count = 0;
final Config config = instance.getConfig();
for (DistributedObject distributedObject : distributedObjects) {
if (count < maxVisibleInstanceCount) {
if (distributedObject instanceof IMap) {
IMap map = (IMap) distributedObject;
if (config.findMapConfig(map.getName()).isStatisticsEnabled()) {
memberState.putLocalMapStats(map.getName(), (LocalMapStatsImpl) map.getLocalMapStats());
count++;
}
} else if (distributedObject instanceof IQueue) {
IQueue queue = (IQueue) distributedObject;
if (config.findQueueConfig(queue.getName()).isStatisticsEnabled()) {
LocalQueueStatsImpl stats = (LocalQueueStatsImpl) queue.getLocalQueueStats();
memberState.putLocalQueueStats(queue.getName(), stats);
count++;
}
} else if (distributedObject instanceof ITopic) {
ITopic topic = (ITopic) distributedObject;
if (config.findTopicConfig(topic.getName()).isStatisticsEnabled()) {
LocalTopicStatsImpl stats = (LocalTopicStatsImpl) topic.getLocalTopicStats();
memberState.putLocalTopicStats(topic.getName(), stats);
count++;
}
} else if (distributedObject instanceof MultiMap) {
MultiMap multiMap = (MultiMap) distributedObject;
if (config.findMultiMapConfig(multiMap.getName()).isStatisticsEnabled()) {
LocalMultiMapStatsImpl stats = (LocalMultiMapStatsImpl) multiMap.getLocalMultiMapStats();
memberState.putLocalMultiMapStats(multiMap.getName(), stats);
count++;
}
} else if (distributedObject instanceof IExecutorService) {
IExecutorService executorService = (IExecutorService) distributedObject;
if (config.findExecutorConfig(executorService.getName()).isStatisticsEnabled()) {
LocalExecutorStatsImpl stats = (LocalExecutorStatsImpl) executorService.getLocalExecutorStats();
memberState.putLocalExecutorStats(executorService.getName(), stats);
count++;
}
}
}
}
}
private Set<String> getLongInstanceNames() {
Set<String> setLongInstanceNames = new HashSet<String>(maxVisibleInstanceCount);
Collection<DistributedObject> proxyObjects = new ArrayList<DistributedObject>(instance.getDistributedObjects());
collectInstanceNames(setLongInstanceNames, proxyObjects);
return setLongInstanceNames;
}
private void collectInstanceNames(Set<String> setLongInstanceNames,
Collection<DistributedObject> distributedObjects) {
int count = 0;
final Config config = instance.getConfig();
for (DistributedObject distributedObject : distributedObjects) {
if (count < maxVisibleInstanceCount) {
if (distributedObject instanceof MultiMap) {
MultiMap multiMap = (MultiMap) distributedObject;
if (config.findMultiMapConfig(multiMap.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("m:" + multiMap.getName());
count++;
}
} else if (distributedObject instanceof IMap) {
IMap map = (IMap) distributedObject;
if (config.findMapConfig(map.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("c:" + map.getName());
count++;
}
} else if (distributedObject instanceof IQueue) {
IQueue queue = (IQueue) distributedObject;
if (config.findQueueConfig(queue.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("q:" + queue.getName());
count++;
}
} else if (distributedObject instanceof ITopic) {
ITopic topic = (ITopic) distributedObject;
if (config.findTopicConfig(topic.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("t:" + topic.getName());
count++;
}
} else if (distributedObject instanceof IExecutorService) {
IExecutorService executorService = (IExecutorService) distributedObject;
if (config.findExecutorConfig(executorService.getName()).isStatisticsEnabled()) {
setLongInstanceNames.add("e:" + executorService.getName());
count++;
}
}
}
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_management_TimedMemberStateFactory.java |
727 | @Embeddable
public class Weight implements Serializable {
private static final long serialVersionUID = 1L;
@Column(name = "WEIGHT")
@AdminPresentation(friendlyName = "ProductWeight_Product_Weight", order = 8000,
tab = ProductImpl.Presentation.Tab.Name.Shipping, tabOrder = ProductImpl.Presentation.Tab.Order.Shipping,
group = ProductImpl.Presentation.Group.Name.Shipping, groupOrder = ProductImpl.Presentation.Group.Order.Shipping)
protected BigDecimal weight;
@Column(name = "WEIGHT_UNIT_OF_MEASURE")
@AdminPresentation(friendlyName = "ProductWeight_Product_Weight_Units", order = 9000,
tab = ProductImpl.Presentation.Tab.Name.Shipping, tabOrder = ProductImpl.Presentation.Tab.Order.Shipping,
group = ProductImpl.Presentation.Group.Name.Shipping, groupOrder = ProductImpl.Presentation.Group.Order.Shipping,
fieldType= SupportedFieldType.BROADLEAF_ENUMERATION,
broadleafEnumeration="org.broadleafcommerce.common.util.WeightUnitOfMeasureType")
protected String weightUnitOfMeasure;
public WeightUnitOfMeasureType getWeightUnitOfMeasure() {
return WeightUnitOfMeasureType.getInstance(weightUnitOfMeasure);
}
public void setWeightUnitOfMeasure(WeightUnitOfMeasureType weightUnitOfMeasure) {
if (weightUnitOfMeasure != null) {
this.weightUnitOfMeasure = weightUnitOfMeasure.getType();
}
}
public BigDecimal getWeight() {
return weight;
}
public void setWeight(BigDecimal weight) {
this.weight = weight;
}
} | 1no label
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_Weight.java |
1,988 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_CUSTOMER_PHONE", uniqueConstraints = @UniqueConstraint(name="CSTMR_PHONE_UNIQUE_CNSTRNT", columnNames = { "CUSTOMER_ID", "PHONE_NAME" }))
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "phone.phoneNumber", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.PROMINENT, booleanOverrideValue = true))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE)
public class CustomerPhoneImpl implements CustomerPhone{
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "CustomerPhoneId")
@GenericGenerator(
name="CustomerPhoneId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="CustomerPhoneImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.profile.core.domain.CustomerPhoneImpl")
}
)
@Column(name = "CUSTOMER_PHONE_ID")
protected Long id;
@Column(name = "PHONE_NAME")
@AdminPresentation(friendlyName = "CustomerPhoneImpl_Phone_Name", order=1, group = "CustomerPhoneImpl_Identification",
groupOrder = 1, prominent = true, gridOrder = 1)
protected String phoneName;
@ManyToOne(cascade = {CascadeType.PERSIST, CascadeType.MERGE}, targetEntity = CustomerImpl.class, optional=false)
@JoinColumn(name = "CUSTOMER_ID")
@AdminPresentation(excluded = true, visibility = VisibilityEnum.HIDDEN_ALL)
protected Customer customer;
@ManyToOne(cascade = CascadeType.ALL, targetEntity = PhoneImpl.class, optional=false)
@JoinColumn(name = "PHONE_ID")
@Index(name="CUSTPHONE_PHONE_INDEX", columnNames={"PHONE_ID"})
protected Phone phone;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getPhoneName() {
return phoneName;
}
@Override
public void setPhoneName(String phoneName) {
this.phoneName = phoneName;
}
@Override
public Customer getCustomer() {
return customer;
}
@Override
public void setCustomer(Customer customer) {
this.customer = customer;
}
@Override
public Phone getPhone() {
return phone;
}
@Override
public void setPhone(Phone phone) {
this.phone = phone;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((customer == null) ? 0 : customer.hashCode());
result = prime * result + ((phone == null) ? 0 : phone.hashCode());
result = prime * result + ((phoneName == null) ? 0 : phoneName.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CustomerPhoneImpl other = (CustomerPhoneImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (customer == null) {
if (other.customer != null)
return false;
} else if (!customer.equals(other.customer))
return false;
if (phone == null) {
if (other.phone != null)
return false;
} else if (!phone.equals(other.phone))
return false;
if (phoneName == null) {
if (other.phoneName != null)
return false;
} else if (!phoneName.equals(other.phoneName))
return false;
return true;
}
} | 1no label
| core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_domain_CustomerPhoneImpl.java |
1,000 | protected class ReplicaOperationRequest extends TransportRequest {
public int shardId;
public ReplicaRequest request;
public ReplicaOperationRequest() {
}
public ReplicaOperationRequest(int shardId, ReplicaRequest request) {
super(request);
this.shardId = shardId;
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = in.readVInt();
request = newReplicaRequestInstance();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(shardId);
request.writeTo(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_support_replication_TransportShardReplicationOperationAction.java |
1,167 | public class TransportBenchmark {
static enum Type {
LOCAL {
@Override
public Transport newTransport(Settings settings, ThreadPool threadPool) {
return new LocalTransport(settings, threadPool, Version.CURRENT);
}
},
NETTY {
@Override
public Transport newTransport(Settings settings, ThreadPool threadPool) {
return new NettyTransport(settings, threadPool, new NetworkService(ImmutableSettings.EMPTY), Version.CURRENT);
}
};
public abstract Transport newTransport(Settings settings, ThreadPool threadPool);
}
public static void main(String[] args) {
final String executor = ThreadPool.Names.GENERIC;
final boolean waitForRequest = true;
final ByteSizeValue payloadSize = new ByteSizeValue(100, ByteSizeUnit.BYTES);
final int NUMBER_OF_CLIENTS = 10;
final int NUMBER_OF_ITERATIONS = 100000;
final byte[] payload = new byte[(int) payloadSize.bytes()];
final AtomicLong idGenerator = new AtomicLong();
final Type type = Type.NETTY;
Settings settings = ImmutableSettings.settingsBuilder()
.build();
final ThreadPool serverThreadPool = new ThreadPool();
final TransportService serverTransportService = new TransportService(type.newTransport(settings, serverThreadPool), serverThreadPool).start();
final ThreadPool clientThreadPool = new ThreadPool();
final TransportService clientTransportService = new TransportService(type.newTransport(settings, clientThreadPool), clientThreadPool).start();
final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT);
serverTransportService.registerHandler("benchmark", new BaseTransportRequestHandler<BenchmarkMessageRequest>() {
@Override
public BenchmarkMessageRequest newInstance() {
return new BenchmarkMessageRequest();
}
@Override
public String executor() {
return executor;
}
@Override
public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(new BenchmarkMessageResponse(request));
}
});
clientTransportService.connectToNode(node);
for (int i = 0; i < 10000; i++) {
BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
clientTransportService.submitRequest(node, "benchmark", message, new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
}
}).txGet();
}
Thread[] clients = new Thread[NUMBER_OF_CLIENTS];
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS);
for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
clients[i] = new Thread(new Runnable() {
@Override
public void run() {
for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) {
final long id = idGenerator.incrementAndGet();
BenchmarkMessageRequest request = new BenchmarkMessageRequest(id, payload);
BaseTransportResponseHandler<BenchmarkMessageResponse> handler = new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return executor;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
if (response.id() != id) {
System.out.println("NO ID MATCH [" + response.id() + "] and [" + id + "]");
}
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
latch.countDown();
}
};
if (waitForRequest) {
clientTransportService.submitRequest(node, "benchmark", request, handler).txGet();
} else {
clientTransportService.sendRequest(node, "benchmark", request, handler);
}
}
}
});
}
StopWatch stopWatch = new StopWatch().start();
for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
clients[i].start();
}
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
stopWatch.stop();
System.out.println("Ran [" + NUMBER_OF_CLIENTS + "], each with [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + stopWatch.totalTime() + "], TPS: " + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac());
clientTransportService.close();
clientThreadPool.shutdownNow();
serverTransportService.close();
serverThreadPool.shutdownNow();
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_transport_TransportBenchmark.java |
3,100 | public class EngineClosedException extends IndexShardClosedException {
public EngineClosedException(ShardId shardId) {
super(shardId);
}
public EngineClosedException(ShardId shardId, Throwable t) {
super(shardId, t);
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_EngineClosedException.java |
1,310 | private final static class Holder {
private final static EmptyClusterInfoService instance = new EmptyClusterInfoService();
} | 0true
| src_main_java_org_elasticsearch_cluster_EmptyClusterInfoService.java |
2,045 | return new MembersInjector<T>() {
public void injectMembers(T instance) {
checkState(delegate != null,
"This MembersInjector cannot be used until the Injector has been created.");
delegate.injectMembers(instance);
}
@Override
public String toString() {
return "MembersInjector<" + type + ">";
}
}; | 0true
| src_main_java_org_elasticsearch_common_inject_spi_MembersInjectorLookup.java |
1,603 | public class PolymorphicEntity implements Serializable {
private static final long serialVersionUID = 1L;
private String name;
private String type;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
} | 0true
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_PolymorphicEntity.java |
40 | public class SimpleCommandProcessor extends MemcacheCommandProcessor<SimpleCommand> {
private final ILogger logger;
public SimpleCommandProcessor(TextCommandService textCommandService) {
super(textCommandService);
logger = textCommandService.getNode().getLogger(this.getClass().getName());
}
public void handle(SimpleCommand command) {
if (command.getType() == QUIT) {
try {
command.getSocketTextReader().closeConnection();
} catch (Exception e) {
logger.warning(e);
}
} else if (command.getType() == UNKNOWN) {
command.setResponse(ERROR);
textCommandService.sendResponse(command);
}
}
public void handleRejection(SimpleCommand command) {
handle(command);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_ascii_memcache_SimpleCommandProcessor.java |
200 | public class OClusterRemote implements OCluster {
private String name;
private int id;
private int dataSegmentId;
private String type;
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#configure(com.orientechnologies.orient.core.storage.OStorage, int,
* java.lang.String, java.lang.String, int, java.lang.Object[])
*/
public void configure(OStorage iStorage, int iId, String iClusterName, String iLocation, int iDataSegmentId,
Object... iParameters) throws IOException {
id = iId;
name = iClusterName;
dataSegmentId = iDataSegmentId;
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#configure(com.orientechnologies.orient.core.storage.OStorage,
* com.orientechnologies.orient.core.config.OStorageClusterConfiguration)
*/
public void configure(OStorage iStorage, OStorageClusterConfiguration iConfig) throws IOException {
id = iConfig.getId();
name = iConfig.getName();
dataSegmentId = iConfig.getDataSegmentId();
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#create(int)
*/
public void create(int iStartSize) throws IOException {
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#open()
*/
public void open() throws IOException {
}
public void close() throws IOException {
}
@Override
public OModificationLock getExternalModificationLock() {
throw new UnsupportedOperationException("getExternalModificationLock");
}
@Override
public void close(boolean flush) throws IOException {
}
@Override
public OPhysicalPosition createRecord(byte[] content, ORecordVersion recordVersion, byte recordType) throws IOException {
throw new UnsupportedOperationException("createRecord");
}
@Override
public boolean deleteRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("deleteRecord");
}
@Override
public void updateRecord(OClusterPosition clusterPosition, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
throw new UnsupportedOperationException("updateRecord");
}
@Override
public ORawBuffer readRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("readRecord");
}
@Override
public boolean exists() {
throw new UnsupportedOperationException("exists");
}
public void delete() throws IOException {
}
public void set(ATTRIBUTES iAttribute, Object iValue) throws IOException {
}
public void truncate() throws IOException {
}
public String getType() {
return type;
}
public int getDataSegmentId() {
return dataSegmentId;
}
public boolean addPhysicalPosition(OPhysicalPosition iPPosition) throws IOException {
return false;
}
public OPhysicalPosition getPhysicalPosition(OPhysicalPosition iPPosition) throws IOException {
return null;
}
public void updateDataSegmentPosition(OClusterPosition iPosition, int iDataSegmentId, long iDataPosition) throws IOException {
}
public void removePhysicalPosition(OClusterPosition iPosition) throws IOException {
}
public void updateRecordType(OClusterPosition iPosition, byte iRecordType) throws IOException {
}
public void updateVersion(OClusterPosition iPosition, ORecordVersion iVersion) throws IOException {
}
public long getEntries() {
return 0;
}
@Override
public long getTombstonesCount() {
throw new UnsupportedOperationException("getTombstonesCount()");
}
@Override
public void convertToTombstone(OClusterPosition iPosition) throws IOException {
throw new UnsupportedOperationException("convertToTombstone()");
}
@Override
public boolean hasTombstonesSupport() {
throw new UnsupportedOperationException("hasTombstonesSupport()");
}
public OClusterPosition getFirstPosition() {
return OClusterPositionFactory.INSTANCE.valueOf(0);
}
public OClusterPosition getLastPosition() {
return OClusterPositionFactory.INSTANCE.valueOf(0);
}
public void lock() {
}
public void unlock() {
}
public int getId() {
return id;
}
public void synch() throws IOException {
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
}
@Override
public boolean wasSoftlyClosed() throws IOException {
return true;
}
public String getName() {
return name;
}
public long getRecordsSize() {
throw new UnsupportedOperationException("getRecordsSize()");
}
public boolean isHashBased() {
return false;
}
public OClusterEntryIterator absoluteIterator() {
throw new UnsupportedOperationException("getRecordsSize()");
}
public void setType(String type) {
this.type = type;
}
@Override
public OPhysicalPosition[] higherPositions(OPhysicalPosition position) {
throw new UnsupportedOperationException("higherPositions()");
}
@Override
public OPhysicalPosition[] lowerPositions(OPhysicalPosition position) {
throw new UnsupportedOperationException("lowerPositions()");
}
@Override
public OPhysicalPosition[] ceilingPositions(OPhysicalPosition position) throws IOException {
throw new UnsupportedOperationException("ceilingPositions()");
}
@Override
public OPhysicalPosition[] floorPositions(OPhysicalPosition position) throws IOException {
throw new UnsupportedOperationException("floorPositions()");
}
@Override
public boolean useWal() {
throw new UnsupportedOperationException("useWal()");
}
@Override
public float recordGrowFactor() {
throw new UnsupportedOperationException("recordGrowFactor()");
}
@Override
public float recordOverflowGrowFactor() {
throw new UnsupportedOperationException("recordOverflowGrowFactor()");
}
@Override
public String compression() {
throw new UnsupportedOperationException("compression()");
}
} | 0true
| client_src_main_java_com_orientechnologies_orient_client_remote_OClusterRemote.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.