Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,258 | public class CompositeActivity extends BaseActivity<PricingContext> {
private SequenceProcessor workflow;
@Override
public PricingContext execute(PricingContext context) throws Exception {
ProcessContext subContext = workflow.doActivities(context.getSeedData());
if (subContext.isStopped()) {
context.stopProcess();
}
return context;
}
public SequenceProcessor getWorkflow() {
return workflow;
}
public void setWorkflow(SequenceProcessor workflow) {
this.workflow = workflow;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_service_workflow_CompositeActivity.java |
232 | public class WriteTransactionCommandOrderingTest
{
private final AtomicReference<List<String>> currentRecording = new AtomicReference<>();
private final NeoStore store = mock( NeoStore.class );
private final RecordingRelationshipStore relationshipStore = new RecordingRelationshipStore( currentRecording );
private final RecordingNodeStore nodeStore = new RecordingNodeStore( currentRecording );
private final RecordingPropertyStore propertyStore = new RecordingPropertyStore( currentRecording );
public WriteTransactionCommandOrderingTest()
{
when( store.getPropertyStore() ).thenReturn( propertyStore );
when( store.getNodeStore() ).thenReturn( nodeStore );
when( store.getRelationshipStore() ).thenReturn( relationshipStore );
}
@Test
public void shouldExecuteCommandsInTheSameOrderRegardlessOfItBeingRecoveredOrNot() throws Exception
{
// Given
List<String> nonRecoveredRecording = new ArrayList<>();
NeoStoreTransaction nonRecoveredTx = newWriteTransaction();
injectAllPossibleCommands( nonRecoveredTx );
List<String> recoveredRecording = new ArrayList<>();
NeoStoreTransaction recoveredTx = newWriteTransaction();
recoveredTx.setRecovered();
injectAllPossibleCommands( recoveredTx );
// When
currentRecording.set( nonRecoveredRecording );
nonRecoveredTx.doPrepare();
nonRecoveredTx.doCommit();
currentRecording.set( recoveredRecording );
recoveredTx.doPrepare();
recoveredTx.doCommit();
// Then
assertThat(nonRecoveredRecording, equalTo(recoveredRecording)); // ordering is the same in both cases
assertThat(new HashSet<>( recoveredRecording ).size(), is( 9 )); // we have included all possible commands
}
private void injectAllPossibleCommands( NeoStoreTransaction tx )
{
tx.injectCommand( new Command.NodeCommand( nodeStore, inUseNode(), inUseNode() ) ); // update
tx.injectCommand( new Command.NodeCommand( nodeStore, inUseNode(), missingNode() ) ); // delete
tx.injectCommand( new Command.NodeCommand( nodeStore, missingNode(), createdNode() ) ); // create
tx.injectCommand( new Command.PropertyCommand( propertyStore, inUseProperty(), inUseProperty() ) ); // update
tx.injectCommand( new Command.PropertyCommand( propertyStore, inUseProperty(), missingProperty() ) ); // delete
tx.injectCommand( new Command.PropertyCommand( propertyStore, missingProperty(), createdProperty() ) ); // create
tx.injectCommand( new Command.RelationshipCommand( relationshipStore, inUseRelationship() ) ); // update
tx.injectCommand( new Command.RelationshipCommand( relationshipStore, missingRelationship() ) ); // delete
tx.injectCommand( new Command.RelationshipCommand( relationshipStore, createdRelationship() ) ); // create
}
private static RelationshipRecord missingRelationship()
{
return new RelationshipRecord( -1 );
}
private static RelationshipRecord createdRelationship()
{
RelationshipRecord record = new RelationshipRecord( 2 );
record.setInUse( true );
record.setCreated();
return record;
}
private static RelationshipRecord inUseRelationship()
{
RelationshipRecord record = new RelationshipRecord( 1 );
record.setInUse( true );
return record;
}
private static PropertyRecord missingProperty()
{
return new PropertyRecord( -1 );
}
private static PropertyRecord createdProperty()
{
PropertyRecord record = new PropertyRecord( 2 );
record.setInUse( true );
record.setCreated();
return record;
}
private static PropertyRecord inUseProperty()
{
PropertyRecord record = new PropertyRecord( 1 );
record.setInUse( true );
return record;
}
private static NodeRecord missingNode()
{
return new NodeRecord(-1, -1, -1);
}
private static NodeRecord createdNode()
{
NodeRecord record = new NodeRecord( 2, -1, -1 );
record.setInUse( true );
record.setCreated();
return record;
}
private static NodeRecord inUseNode()
{
NodeRecord record = new NodeRecord( 1, -1, -1 );
record.setInUse( true );
return record;
}
private NeoStoreTransaction newWriteTransaction() {
NeoStoreTransaction tx = new NeoStoreTransaction( 0l, mock( XaLogicalLog.class ), TransactionState.NO_STATE,
store, mock( CacheAccessBackDoor.class ), mock( IndexingService.class ),
WriteTransactionTest.NO_LABEL_SCAN_STORE, mock( IntegrityValidator.class ),
mock( KernelTransactionImplementation.class ), mock( LockService.class, RETURNS_MOCKS ) );
tx.setCommitTxId( store.getLastCommittedTx() + 1 );
return tx;
}
private static String commandActionToken( AbstractBaseRecord record )
{
if ( !record.inUse() )
{
return "deleted";
}
if ( record.isCreated() )
{
return "created";
}
return "updated";
}
private static class RecordingPropertyStore extends PropertyStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingPropertyStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(PropertyRecord record) {
currentRecording.get().add(commandActionToken(record) + " property");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
}
private static class RecordingNodeStore extends NodeStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingNodeStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(NodeRecord record) {
currentRecording.get().add(commandActionToken(record) + " node");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
@Override
public NodeRecord getRecord(long id) {
NodeRecord record = new NodeRecord(id, -1, -1);
record.setInUse(true);
return record;
}
}
private static class RecordingRelationshipStore extends RelationshipStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingRelationshipStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(RelationshipRecord record) {
currentRecording.get().add(commandActionToken(record) + " relationship");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
}
} | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionCommandOrderingTest.java |
1,474 | public class OSQLFunctionLabel extends OSQLFunctionConfigurableAbstract {
public static final String NAME = "label";
public OSQLFunctionLabel() {
super(NAME, 0, 0);
}
public Object execute(final OIdentifiable iCurrentRecord, final Object iCurrentResult, final Object[] iParameters,
OCommandContext iContext) {
final OrientBaseGraph graph = OGraphCommandExecutorSQLFactory.getGraph();
if (iCurrentRecord == null) {
return OSQLEngine.foreachRecord(new OCallable<Object, OIdentifiable>() {
@Override
public Object call(final OIdentifiable iArgument) {
return getLabel(graph, iArgument);
}
}, iCurrentResult, iContext);
} else
return getLabel(graph, iCurrentRecord);
}
private Object getLabel(final OrientBaseGraph graph, final OIdentifiable iCurrentRecord) {
final ODocument rec = iCurrentRecord.getRecord();
if (rec.getSchemaClass().isSubClassOf(OrientVertex.CLASS_NAME)) {
// VERTEX
final OrientVertex vertex = graph.getVertex(iCurrentRecord);
return vertex.getLabel();
} else if (rec.getSchemaClass().isSubClassOf(OrientEdge.CLASS_NAME)) {
// EDGE
final OrientEdge edge = graph.getEdge(iCurrentRecord);
return edge.getLabel();
} else
throw new OCommandExecutionException("Invalid record: is neither a vertex nor an edge. Found class: " + rec.getSchemaClass());
}
public String getSyntax() {
return "Syntax error: label()";
}
} | 1no label
| graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionLabel.java |
1,431 | executor.scheduleWithFixedDelay(new Runnable() {
public void run() {
cache.cleanup();
}
}, 60, 60, TimeUnit.SECONDS); | 0true
| hazelcast-hibernate_hazelcast-hibernate3_src_main_java_com_hazelcast_hibernate_local_CleanupService.java |
3,198 | static class Soft extends FieldBased {
public Soft(@Nullable IndexService indexService, FieldMapper.Names fieldNames, FieldDataType fieldDataType) {
super(indexService, fieldNames, fieldDataType, CacheBuilder.newBuilder().softValues());
}
} | 1no label
| src_main_java_org_elasticsearch_index_fielddata_IndexFieldDataCache.java |
572 | public class ODocumentFieldsHashSet extends AbstractSet<ODocument> {
private final LinkedHashSet<ODocumentWrapper> hashSet;
public ODocumentFieldsHashSet() {
hashSet = new LinkedHashSet<ODocumentWrapper>();
}
@Override
public boolean contains(Object o) {
if (!(o instanceof ODocument))
return false;
return hashSet.contains(new ODocumentWrapper((ODocument) o));
}
@Override
public boolean remove(Object o) {
if (!(o instanceof ODocument))
return false;
return hashSet.remove(new ODocumentWrapper((ODocument) o));
}
@Override
public boolean add(ODocument document) {
return hashSet.add(new ODocumentWrapper(document));
}
@Override
public boolean isEmpty() {
return hashSet.isEmpty();
}
@Override
public void clear() {
hashSet.clear();
}
@Override
public Iterator<ODocument> iterator() {
final Iterator<ODocumentWrapper> iterator = hashSet.iterator();
return new Iterator<ODocument>() {
public boolean hasNext() {
return iterator.hasNext();
}
public ODocument next() {
return iterator.next().document;
}
public void remove() {
iterator.remove();
}
};
}
@Override
public int size() {
return hashSet.size();
}
private static final class ODocumentWrapper {
private final ODocument document;
private ODocumentWrapper(ODocument document) {
this.document = document;
}
@Override
public int hashCode() {
int hashCode = document.getIdentity().hashCode();
for (Object field : document.fieldValues())
hashCode = 31 * hashCode + field.hashCode();
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj == null)
return false;
if (obj == document)
return true;
if (obj.getClass() != document.getClass())
return false;
final ODocument anotherDocument = (ODocument) obj;
if (!document.getIdentity().equals(anotherDocument.getIdentity()))
return false;
final String[] filedNames = document.fieldNames();
final String[] anotherFieldNames = anotherDocument.fieldNames();
if (filedNames.length != anotherFieldNames.length)
return false;
for (final String fieldName : filedNames) {
final Object fieldValue = document.field(fieldName);
final Object anotherFieldValue = anotherDocument.field(fieldName);
if (fieldValue == null && anotherFieldValue != null)
return false;
if (fieldValue != null && !fieldValue.equals(anotherFieldValue))
return false;
}
return true;
}
@Override
public String toString() {
return document.toString();
}
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_ODocumentFieldsHashSet.java |
788 | public interface OMetadata {
public void load();
public void create() throws IOException;
public OSchema getSchema();
public OSecurity getSecurity();
public OIndexManagerProxy getIndexManager();
public int getSchemaClusterId();
/**
* Reloads the internal objects.
*/
public void reload();
/**
* Closes internal objects
*/
public void close();
public OFunctionLibrary getFunctionLibrary();
public OSchedulerListener getSchedulerListener();
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_metadata_OMetadata.java |
1,014 | public class InitOperation extends SemaphoreBackupAwareOperation implements IdentifiedDataSerializable {
public InitOperation() {
}
public InitOperation(String name, int permitCount) {
super(name, permitCount);
}
@Override
public void run() throws Exception {
Permit permit = getPermit();
response = permit.init(permitCount);
}
@Override
public boolean shouldBackup() {
return Boolean.TRUE.equals(response);
}
@Override
public Operation getBackupOperation() {
return new InitBackupOperation(name, permitCount);
}
@Override
public int getFactoryId() {
return SemaphoreDataSerializerHook.F_ID;
}
@Override
public int getId() {
return SemaphoreDataSerializerHook.INIT_OPERATION;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_semaphore_operations_InitOperation.java |
31 | final class NestedLiteralCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final int loc;
private final int index;
private final String value;
NestedLiteralCompletionProposal(String value, int loc,
int index) {
this.value = value;
this.loc = loc;
this.index = index;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
//the following awfulness is necessary because the
//insertion point may have changed (and even its
//text may have changed, since the proposal was
//instantiated).
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
if (offset>0 && document.getChar(offset)==' ') {
offset++;
}
int nextOffset = findCharCount(index+1, document,
loc+startOfArgs, endOfLine,
",;", "", true);
int middleOffset = findCharCount(1, document,
offset, nextOffset,
"=", "", true)+1;
if (middleOffset>0 &&
document.getChar(middleOffset)=='>') {
middleOffset++;
}
while (middleOffset>0 &&
document.getChar(middleOffset)==' ') {
middleOffset++;
}
if (middleOffset>offset &&
middleOffset<nextOffset) {
offset = middleOffset;
}
String str = value;
if (nextOffset==-1) {
nextOffset = offset;
}
if (document.getChar(nextOffset)=='}') {
str += " ";
}
document.replace(offset, nextOffset-offset, str);
}
catch (BadLocationException e) {
e.printStackTrace();
}
//adding imports drops us out of linked mode :(
/*try {
DocumentChange tc = new DocumentChange("imports", document);
tc.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = cpc.getRootNode();
importDeclaration(decs, d, cu);
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
if (!pls.isEmpty()) {
for (Parameter p: pls.get(0).getParameters()) {
MethodOrValue pm = p.getModel();
if (pm instanceof Method) {
for (ParameterList ppl: ((Method) pm).getParameterLists()) {
for (Parameter pp: ppl.getParameters()) {
importSignatureTypes(pp.getModel(), cu, decs);
}
}
}
}
}
}
applyImports(tc, decs, cu, document);
tc.perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}*/
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getDisplayString() {
return value;
}
@Override
public Image getImage() {
return getDecoratedImage(CEYLON_LITERAL, 0, false);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
String content = document.get(offset, currentOffset - offset);
int eq = content.indexOf("=");
if (eq>0) {
content = content.substring(eq+1);
}
String filter = content.trim().toLowerCase();
if (value.toLowerCase().startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java |
2,487 | public static interface Params {
String param(String key);
String param(String key, String defaultValue);
boolean paramAsBoolean(String key, boolean defaultValue);
Boolean paramAsBoolean(String key, Boolean defaultValue);
/**
* @deprecated since 1.0.0
* use {@link ToXContent.Params#paramAsBoolean(String, Boolean)} instead
*/
@Deprecated
Boolean paramAsBooleanOptional(String key, Boolean defaultValue);
} | 0true
| src_main_java_org_elasticsearch_common_xcontent_ToXContent.java |
1,129 | public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeNaiveTFIDFScoreScript(params);
}
} | 0true
| src_test_java_org_elasticsearch_benchmark_scripts_score_script_NativeNaiveTFIDFScoreScript.java |
332 | new Thread() {
public void run() {
boolean result = map.tryRemove("key2", 1, TimeUnit.SECONDS);
if (!result) {
latch.countDown();
}
}
}.start(); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java |
1,567 | public class TestPassThroughActivity extends BaseActivity<CheckoutContext> {
@Override
public CheckoutContext execute(CheckoutContext context) throws Exception {
// TODO Auto-generated method stub
return context;
}
} | 0true
| integration_src_main_java_org_broadleafcommerce_core_workflow_state_test_TestPassThroughActivity.java |
2,885 | public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory {
private final String lang;
@Inject
public LowerCaseTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.lang = settings.get("language", null);
}
@Override
public TokenStream create(TokenStream tokenStream) {
if (lang == null) {
return new LowerCaseFilter(version, tokenStream);
} else if (lang.equalsIgnoreCase("greek")) {
return new GreekLowerCaseFilter(version, tokenStream);
} else if (lang.equalsIgnoreCase("turkish")) {
return new TurkishLowerCaseFilter(tokenStream);
} else {
throw new ElasticsearchIllegalArgumentException("language [" + lang + "] not support for lower case");
}
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_LowerCaseTokenFilterFactory.java |
1,318 | private static final class ValueWrapper {
final Object value;
private ValueWrapper(Object value) {
this.value = value;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_executor_ExecutionCallbackAdapterFactory.java |
886 | public final class GetCountRequest extends KeyBasedClientRequest
implements Portable, RetryableRequest, SecureRequest {
private String name;
public GetCountRequest() {
}
public GetCountRequest(String name) {
this.name = name;
}
@Override
protected Object getKey() {
return name;
}
@Override
protected Operation prepareOperation() {
return new GetCountOperation(name);
}
@Override
public String getServiceName() {
return CountDownLatchService.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return CountDownLatchPortableHook.F_ID;
}
@Override
public int getClassId() {
return CountDownLatchPortableHook.GET_COUNT;
}
@Override
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("name", name);
}
@Override
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("name");
}
@Override
public Permission getRequiredPermission() {
return new CountDownLatchPermission(name, ActionConstants.ACTION_READ);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_countdownlatch_client_GetCountRequest.java |
2,027 | public interface Element {
/**
* Returns an arbitrary object containing information about the "place" where this element was
* configured. Used by Guice in the production of descriptive error messages.
* <p/>
* <p>Tools might specially handle types they know about; {@code StackTraceElement} is a good
* example. Tools should simply call {@code toString()} on the source object if the type is
* unfamiliar.
*/
Object getSource();
/**
* Accepts an element visitor. Invokes the visitor method specific to this element's type.
*
* @param visitor to call back on
*/
<T> T acceptVisitor(ElementVisitor<T> visitor);
/**
* Writes this module element to the given binder (optional operation).
*
* @param binder to apply configuration element to
* @throws UnsupportedOperationException if the {@code applyTo} method is not supported by this
* element.
*/
void applyTo(Binder binder);
} | 0true
| src_main_java_org_elasticsearch_common_inject_spi_Element.java |
48 | public class ODefaultComparator implements Comparator<Object> {
public static final ODefaultComparator INSTANCE = new ODefaultComparator();
@SuppressWarnings("unchecked")
public int compare(final Object objectOne, final Object objectTwo) {
if (objectOne instanceof Comparable)
return ((Comparable<Object>) objectOne).compareTo(objectTwo);
final Comparator<?> comparator = OComparatorFactory.INSTANCE.getComparator(objectOne.getClass());
if (comparator != null)
return ((Comparator<Object>) comparator).compare(objectOne, objectTwo);
throw new IllegalStateException("Object of class" + objectOne.getClass().getName() + " can not be compared");
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_comparator_ODefaultComparator.java |
3,431 | public class RecoveryStatus {
public static enum Stage {
INIT,
INDEX,
START,
TRANSLOG,
DONE
}
private Stage stage = Stage.INIT;
private long startTime = System.currentTimeMillis();
private long time;
private Index index = new Index();
private Translog translog = new Translog();
private Start start = new Start();
public Stage stage() {
return this.stage;
}
public RecoveryStatus updateStage(Stage stage) {
this.stage = stage;
return this;
}
public long startTime() {
return this.startTime;
}
public void startTime(long startTime) {
this.startTime = startTime;
}
public long time() {
return this.time;
}
public void time(long time) {
this.time = time;
}
public Index index() {
return index;
}
public Start start() {
return this.start;
}
public Translog translog() {
return translog;
}
public static class Start {
private long startTime;
private long time;
private long checkIndexTime;
public long startTime() {
return this.startTime;
}
public void startTime(long startTime) {
this.startTime = startTime;
}
public long time() {
return this.time;
}
public void time(long time) {
this.time = time;
}
public long checkIndexTime() {
return checkIndexTime;
}
public void checkIndexTime(long checkIndexTime) {
this.checkIndexTime = checkIndexTime;
}
}
public static class Translog {
private long startTime = 0;
private long time;
private volatile int currentTranslogOperations = 0;
public long startTime() {
return this.startTime;
}
public void startTime(long startTime) {
this.startTime = startTime;
}
public long time() {
return this.time;
}
public void time(long time) {
this.time = time;
}
public void addTranslogOperations(int count) {
this.currentTranslogOperations += count;
}
public int currentTranslogOperations() {
return this.currentTranslogOperations;
}
}
public static class Index {
private long startTime = 0;
private long time = 0;
private long version = -1;
private int numberOfFiles = 0;
private long totalSize = 0;
private int numberOfReusedFiles = 0;
private long reusedTotalSize = 0;
private AtomicLong currentFilesSize = new AtomicLong();
public long startTime() {
return this.startTime;
}
public void startTime(long startTime) {
this.startTime = startTime;
}
public long time() {
return this.time;
}
public void time(long time) {
this.time = time;
}
public long version() {
return this.version;
}
public void files(int numberOfFiles, long totalSize, int numberOfReusedFiles, long reusedTotalSize) {
this.numberOfFiles = numberOfFiles;
this.totalSize = totalSize;
this.numberOfReusedFiles = numberOfReusedFiles;
this.reusedTotalSize = reusedTotalSize;
}
public int numberOfFiles() {
return numberOfFiles;
}
public int numberOfRecoveredFiles() {
return numberOfFiles - numberOfReusedFiles;
}
public long totalSize() {
return this.totalSize;
}
public int numberOfReusedFiles() {
return numberOfReusedFiles;
}
public long reusedTotalSize() {
return this.reusedTotalSize;
}
public long recoveredTotalSize() {
return totalSize - reusedTotalSize;
}
public void updateVersion(long version) {
this.version = version;
}
public long currentFilesSize() {
return this.currentFilesSize.get();
}
public void addCurrentFilesSize(long updatedSize) {
this.currentFilesSize.addAndGet(updatedSize);
}
}
} | 0true
| src_main_java_org_elasticsearch_index_gateway_RecoveryStatus.java |
267 | public class OCommandGenericIterator implements Iterator<Object>, Iterable<Object> {
protected OCommandExecutor command;
protected Iterator<Object> resultSet;
protected Object resultOne;
protected boolean executed = false;
public OCommandGenericIterator(OCommandExecutor command) {
this.command = command;
}
public boolean hasNext() {
checkForExecution();
if (resultOne != null)
return true;
else if (resultSet != null)
return resultSet.hasNext();
return false;
}
public Object next() {
checkForExecution();
if (resultOne != null)
return resultOne;
else if (resultSet != null)
return resultSet.next();
return null;
}
public Iterator<Object> iterator() {
return this;
}
public void remove() {
throw new UnsupportedOperationException("remove()");
}
@SuppressWarnings("unchecked")
protected void checkForExecution() {
if (!executed) {
executed = true;
final Object result = command.execute(null);
if (result instanceof Collection)
resultSet = ((Collection<Object>) result).iterator();
else if (result instanceof Object)
resultOne = (Object) result;
}
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_command_OCommandGenericIterator.java |
39 | @Component("blTimeOptionsExtensionListener")
public class TimeEnumOptionsExtensionListener extends AbstractRuleBuilderEnumOptionsExtensionListener {
@Override
protected Map<String, Class<? extends BroadleafEnumerationType>> getValuesToGenerate() {
Map<String, Class<? extends BroadleafEnumerationType>> map =
new HashMap<String, Class<? extends BroadleafEnumerationType>>();
map.put("blcOptions_HourOfDay", HourOfDayType.class);
map.put("blcOptions_DayOfWeek", DayOfWeekType.class);
map.put("blcOptions_Month", MonthType.class);
map.put("blcOptions_DayOfMonth", DayOfMonthType.class);
map.put("blcOptions_Minute", MinuteType.class);
return map;
}
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_options_TimeEnumOptionsExtensionListener.java |
729 | public class CollectionAddListenerRequest extends CallableClientRequest implements RetryableRequest {
private String name;
private boolean includeValue;
private String serviceName;
public CollectionAddListenerRequest() {
}
public CollectionAddListenerRequest(String name, boolean includeValue) {
this.name = name;
this.includeValue = includeValue;
}
@Override
public Object call() throws Exception {
final ClientEndpoint endpoint = getEndpoint();
final ClientEngine clientEngine = getClientEngine();
ItemListener listener = new ItemListener() {
@Override
public void itemAdded(ItemEvent item) {
send(item);
}
@Override
public void itemRemoved(ItemEvent item) {
send(item);
}
private void send(ItemEvent event) {
if (endpoint.live()) {
Data item = clientEngine.toData(event.getItem());
final ItemEventType eventType = event.getEventType();
final String uuid = event.getMember().getUuid();
PortableItemEvent portableItemEvent = new PortableItemEvent(item, eventType, uuid);
endpoint.sendEvent(portableItemEvent, getCallId());
}
}
};
final EventService eventService = clientEngine.getEventService();
final CollectionEventFilter filter = new CollectionEventFilter(includeValue);
final EventRegistration registration = eventService.registerListener(getServiceName(), name, filter, listener);
final String registrationId = registration.getId();
endpoint.setListenerRegistration(getServiceName(), name, registrationId);
return registrationId;
}
@Override
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
@Override
public int getFactoryId() {
return CollectionPortableHook.F_ID;
}
@Override
public int getClassId() {
return CollectionPortableHook.COLLECTION_ADD_LISTENER;
}
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
writer.writeBoolean("i", includeValue);
writer.writeUTF("s", serviceName);
}
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
includeValue = reader.readBoolean("i");
serviceName = reader.readUTF("s");
}
@Override
public Permission getRequiredPermission() {
if (ListService.SERVICE_NAME.equals(serviceName)) {
return new ListPermission(name, ActionConstants.ACTION_LISTEN);
} else if (SetService.SERVICE_NAME.equals(serviceName)) {
return new SetPermission(name, ActionConstants.ACTION_LISTEN);
}
throw new IllegalArgumentException("No service matched!!!");
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_collection_client_CollectionAddListenerRequest.java |
1,034 | public static class Presentation {
public static class Tab {
public static class Name {
public static final String Advanced = "OrderImpl_Advanced";
}
public static class Order {
public static final int Advanced = 2000;
}
}
public static class Group {
public static class Name {
public static final String Description = "OrderItemImpl_Description";
public static final String Pricing = "OrderItemImpl_Pricing";
public static final String Catalog = "OrderItemImpl_Catalog";
}
public static class Order {
public static final int Description = 1000;
public static final int Pricing = 2000;
public static final int Catalog = 3000;
}
}
public static class FieldOrder {
public static final int NAME = 1000;
public static final int PRICE = 2000;
public static final int QUANTITY = 3000;
public static final int RETAILPRICE = 4000;
public static final int SALEPRICE = 5000;
public static final int TOTALTAX = 6000;
public static final int CATEGORY = 1000;
public static final int PRICEDETAILS = 1000;
public static final int ADJUSTMENTS = 2000;
public static final int DISCOUNTALLOWED = 3000;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_OrderItemImpl.java |
3,598 | private static ThreadLocal<NumericTokenStream> tokenStream8 = new ThreadLocal<NumericTokenStream>() {
@Override
protected NumericTokenStream initialValue() {
return new NumericTokenStream(8);
}
}; | 0true
| src_main_java_org_elasticsearch_index_mapper_core_NumberFieldMapper.java |
439 | static final class Fields {
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString MASTER_ONLY = new XContentBuilderString("master_only");
static final XContentBuilderString DATA_ONLY = new XContentBuilderString("data_only");
static final XContentBuilderString MASTER_DATA = new XContentBuilderString("master_data");
static final XContentBuilderString CLIENT = new XContentBuilderString("client");
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java |
1,566 | @XmlRootElement(name = "security")
public class OServerSecurityConfiguration {
@XmlElementWrapper
@XmlAnyElement
@XmlElementRef(type = OServerUserConfiguration.class)
public List<OServerUserConfiguration> users;
@XmlElementWrapper
@XmlAnyElement
@XmlElementRef(type = OServerNetworkListenerConfiguration.class)
public List<OServerResourceConfiguration> resources;
public OServerSecurityConfiguration() {
}
public OServerSecurityConfiguration(Object iObject) {
users = new ArrayList<OServerUserConfiguration>();
resources = new ArrayList<OServerResourceConfiguration>();
}
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_config_OServerSecurityConfiguration.java |
79 | public abstract class OSharedResourceTimeout {
private final ReadWriteLock lock = new ReentrantReadWriteLock();
protected int timeout;
public OSharedResourceTimeout(final int timeout) {
this.timeout = timeout;
}
protected void acquireSharedLock() throws OTimeoutException {
try {
if (timeout == 0) {
lock.readLock().lock();
return;
} else if (lock.readLock().tryLock(timeout, TimeUnit.MILLISECONDS))
// OK
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw new OTimeoutException("Timeout on acquiring shared lock against resource: " + this);
}
protected void releaseSharedLock() {
lock.readLock().unlock();
}
protected void acquireExclusiveLock() throws OTimeoutException {
try {
if (timeout == 0) {
lock.writeLock().lock();
return;
} else if (lock.writeLock().tryLock(timeout, TimeUnit.MILLISECONDS))
// OK
return;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
throw new OTimeoutException("Timeout on acquiring exclusive lock against resource: " + this);
}
protected void releaseExclusiveLock() {
lock.writeLock().unlock();
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_concur_resource_OSharedResourceTimeout.java |
34 | public class PartialGetCommand extends GetCommand {
public PartialGetCommand(String key) {
super(TextCommandType.PARTIAL_GET, key);
}
@Override
public String toString() {
return "PartialGetCommand{"
+ "key='"
+ key + '\''
+ '}';
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_ascii_memcache_PartialGetCommand.java |
2,997 | public static class FilterCacheKey {
private final Object readerKey;
private final Object filterKey;
// if we know, we will try and set the removal listener (for statistics)
// its ok that its not volatile because we make sure we only set it when the object is created before its shared between threads
@Nullable
public RemovalListener<WeightedFilterCache.FilterCacheKey, DocIdSet> removalListener;
public FilterCacheKey(Object readerKey, Object filterKey) {
this.readerKey = readerKey;
this.filterKey = filterKey;
}
public Object readerKey() {
return readerKey;
}
public Object filterKey() {
return filterKey;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
FilterCacheKey that = (FilterCacheKey) o;
return (readerKey().equals(that.readerKey()) && filterKey.equals(that.filterKey));
}
@Override
public int hashCode() {
return readerKey().hashCode() + 31 * filterKey.hashCode();
}
} | 0true
| src_main_java_org_elasticsearch_index_cache_filter_weighted_WeightedFilterCache.java |
248 | fProjectionListener = new IProjectionListener() {
public void projectionEnabled() {
update();
}
public void projectionDisabled() {
update();
}
}; | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_FoldingActionGroup.java |
1,465 | public class BroadleafManageCustomerAddressesController extends BroadleafAbstractController {
@Resource(name = "blCustomerAddressService")
private CustomerAddressService customerAddressService;
@Resource(name = "blAddressService")
private AddressService addressService;
@Resource(name = "blCountryService")
private CountryService countryService;
@Resource(name = "blCustomerAddressValidator")
private CustomerAddressValidator customerAddressValidator;
@Resource(name = "blStateService")
private StateService stateService;
protected String addressUpdatedMessage = "Address successfully updated";
protected String addressAddedMessage = "Address successfully added";
protected String addressRemovedMessage = "Address successfully removed";
protected String addressRemovedErrorMessage = "Address could not be removed as it is in use";
protected static String customerAddressesView = "account/manageCustomerAddresses";
protected static String customerAddressesRedirect = "redirect:/account/addresses";
/**
* Initializes some custom binding operations for the managing an address.
* More specifically, this method will attempt to bind state and country
* abbreviations to actual State and Country objects when the String
* representation of the abbreviation is submitted.
*
* @param request
* @param binder
* @throws Exception
*/
protected void initBinder(HttpServletRequest request, ServletRequestDataBinder binder) throws Exception {
binder.registerCustomEditor(State.class, "address.state", new PropertyEditorSupport() {
@Override
public void setAsText(String text) {
State state = stateService.findStateByAbbreviation(text);
setValue(state);
}
});
binder.registerCustomEditor(Country.class, "address.country", new PropertyEditorSupport() {
@Override
public void setAsText(String text) {
Country country = countryService.findCountryByAbbreviation(text);
setValue(country);
}
});
binder.registerCustomEditor(Phone.class, "address.phonePrimary", new PropertyEditorSupport() {
@Override
public void setAsText(String text) {
if (!StringUtils.isBlank(text)) {
Phone phone = new PhoneImpl();
phone.setPhoneNumber(text);
setValue(phone);
} else {
setValue(null);
}
}
});
}
protected List<State> populateStates() {
return stateService.findStates();
}
protected List<Country> populateCountries() {
return countryService.findCountries();
}
protected List<CustomerAddress> populateCustomerAddresses() {
return customerAddressService.readActiveCustomerAddressesByCustomerId(CustomerState.getCustomer().getId());
}
public String viewCustomerAddresses(HttpServletRequest request, Model model) {
model.addAttribute("customerAddressForm", new CustomerAddressForm());
return getCustomerAddressesView();
}
public String viewCustomerAddress(HttpServletRequest request, Model model, Long customerAddressId) {
CustomerAddress customerAddress = customerAddressService.readCustomerAddressById(customerAddressId);
if (customerAddress == null) {
throw new IllegalArgumentException("Customer Address not found with the specified customerAddressId");
}
CustomerAddressForm form = new CustomerAddressForm();
form.setAddress(customerAddress.getAddress());
form.setAddressName(customerAddress.getAddressName());
form.setCustomerAddressId(customerAddress.getId());
model.addAttribute("customerAddressForm", form);
return getCustomerAddressesView();
}
public String addCustomerAddress(HttpServletRequest request, Model model, CustomerAddressForm form, BindingResult result, RedirectAttributes redirectAttributes) throws ServiceException {
customerAddressValidator.validate(form, result);
if (result.hasErrors()) {
return getCustomerAddressesView();
}
Address address = addressService.saveAddress(form.getAddress());
CustomerAddress customerAddress = customerAddressService.create();
customerAddress.setAddress(address);
customerAddress.setAddressName(form.getAddressName());
customerAddress.setCustomer(CustomerState.getCustomer());
customerAddress = customerAddressService.saveCustomerAddress(customerAddress);
if (form.getAddress().isDefault()) {
customerAddressService.makeCustomerAddressDefault(customerAddress.getId(), customerAddress.getCustomer().getId());
}
if (!isAjaxRequest(request)) {
List<CustomerAddress> addresses = customerAddressService.readActiveCustomerAddressesByCustomerId(CustomerState.getCustomer().getId());
model.addAttribute("addresses", addresses);
}
redirectAttributes.addFlashAttribute("successMessage", getAddressAddedMessage());
return getCustomerAddressesRedirect();
}
public String updateCustomerAddress(HttpServletRequest request, Model model, Long customerAddressId, CustomerAddressForm form, BindingResult result, RedirectAttributes redirectAttributes) throws ServiceException {
customerAddressValidator.validate(form, result);
if (result.hasErrors()) {
return getCustomerAddressesView();
}
CustomerAddress customerAddress = customerAddressService.readCustomerAddressById(customerAddressId);
if (customerAddress == null) {
throw new IllegalArgumentException("Customer Address not found with the specified customerAddressId");
}
customerAddress.setAddress(form.getAddress());
customerAddress.setAddressName(form.getAddressName());
customerAddress = customerAddressService.saveCustomerAddress(customerAddress);
if (form.getAddress().isDefault()) {
customerAddressService.makeCustomerAddressDefault(customerAddress.getId(), customerAddress.getCustomer().getId());
}
redirectAttributes.addFlashAttribute("successMessage", getAddressUpdatedMessage());
return getCustomerAddressesRedirect();
}
public String removeCustomerAddress(HttpServletRequest request, Model model, Long customerAddressId, RedirectAttributes redirectAttributes) {
try {
customerAddressService.deleteCustomerAddressById(customerAddressId);
redirectAttributes.addFlashAttribute("successMessage", getAddressRemovedMessage());
} catch (DataIntegrityViolationException e) {
// This likely occurred because there is an order or cart in the system that is currently utilizing this
// address. Therefore, we're not able to remove it as it breaks a foreign key constraint
redirectAttributes.addFlashAttribute("errorMessage", getAddressRemovedErrorMessage());
}
return getCustomerAddressesRedirect();
}
public String getCustomerAddressesView() {
return customerAddressesView;
}
public String getCustomerAddressesRedirect() {
return customerAddressesRedirect;
}
public String getAddressUpdatedMessage() {
return addressUpdatedMessage;
}
public String getAddressAddedMessage() {
return addressAddedMessage;
}
public String getAddressRemovedMessage() {
return addressRemovedMessage;
}
public String getAddressRemovedErrorMessage() {
return addressRemovedErrorMessage;
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_controller_account_BroadleafManageCustomerAddressesController.java |
1,291 | public interface SearchFacetDao {
/**
* Returns the distinct values for the given fieldName inside of the search clas sas a list of the specified
* type. For example, reading the distinct values for "manufacturer" in the ProductImpl class and specifying
* the value class as String would search the ProductImpl entity's distinct manufacturers and return a
* List<String> of these values.
*
* @param fieldName
* @param fieldValueClass
* @return the distinct values for the field
*/
public <T> List<T> readDistinctValuesForField(String fieldName, Class<T> fieldValueClass);
/**
* Returns all SearchFacets that are tagged with showOnSearch
*
* @return the facets to display on searches
*/
public List<SearchFacet> readAllSearchFacets();
/**
* Persist to the data layer.
*
* @param searchFacet the instance to persist
* @return the instance after it has been persisted
*/
public SearchFacet save(SearchFacet searchFacet);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_dao_SearchFacetDao.java |
2,789 | public class IndexAlias {
private String alias;
private CompressedString filter;
private Filter parsedFilter;
public IndexAlias(String alias, @Nullable CompressedString filter, @Nullable Filter parsedFilter) {
this.alias = alias;
this.filter = filter;
this.parsedFilter = parsedFilter;
}
public String alias() {
return alias;
}
@Nullable
public CompressedString filter() {
return filter;
}
@Nullable
public Filter parsedFilter() {
return parsedFilter;
}
} | 0true
| src_main_java_org_elasticsearch_index_aliases_IndexAlias.java |
3,550 | public class TopicService implements ManagedService, RemoteService, EventPublishingService {
public static final String SERVICE_NAME = "hz:impl:topicService";
public static final int ORDERING_LOCKS_LENGTH = 1000;
final ConcurrentMap<String, LocalTopicStatsImpl> statsMap = new ConcurrentHashMap<String, LocalTopicStatsImpl>();
private final Lock[] orderingLocks = new Lock[ORDERING_LOCKS_LENGTH];
private NodeEngine nodeEngine;
private final ConstructorFunction<String, LocalTopicStatsImpl> localTopicStatsConstructorFunction =
new ConstructorFunction<String, LocalTopicStatsImpl>() {
public LocalTopicStatsImpl createNew(String mapName) {
return new LocalTopicStatsImpl();
}
};
private EventService eventService;
private ILogger logger;
@Override
public void init(NodeEngine nodeEngine, Properties properties) {
this.nodeEngine = nodeEngine;
for (int i = 0; i < orderingLocks.length; i++) {
orderingLocks[i] = new ReentrantLock();
}
eventService = nodeEngine.getEventService();
this.logger = nodeEngine.getLogger(TopicService.class);
}
@Override
public void reset() {
statsMap.clear();
}
@Override
public void shutdown(boolean terminate) {
reset();
}
public Lock getOrderLock(String key) {
int index = getOrderLockIndex(key);
return orderingLocks[index];
}
private int getOrderLockIndex(String key) {
int hash = key.hashCode();
if (hash == Integer.MIN_VALUE) {
return 0;
} else {
return Math.abs(hash) % orderingLocks.length;
}
}
@Override
public TopicProxy createDistributedObject(String name) {
if (isGlobalOrderingEnabled(name)) {
return new TotalOrderedTopicProxy(name, nodeEngine, this);
} else {
return new TopicProxy(name, nodeEngine, this);
}
}
private boolean isGlobalOrderingEnabled(String name) {
TopicConfig topicConfig = nodeEngine.getConfig().findTopicConfig(name);
return topicConfig.isGlobalOrderingEnabled();
}
@Override
public void destroyDistributedObject(String objectId) {
statsMap.remove(objectId);
}
@Override
public void dispatchEvent(Object event, Object listener) {
TopicEvent topicEvent = (TopicEvent) event;
Object msgObject = nodeEngine.toObject(topicEvent.data);
ClusterService clusterService = nodeEngine.getClusterService();
MemberImpl member = clusterService.getMember(topicEvent.publisherAddress);
if (member == null) {
if (logger.isLoggable(Level.INFO)) {
logger.info("Dropping message " + msgObject + " from unknown address:" + topicEvent.publisherAddress);
}
return;
}
Message message = new Message(topicEvent.name, msgObject, topicEvent.publishTime, member);
incrementReceivedMessages(topicEvent.name);
MessageListener messageListener = (MessageListener) listener;
messageListener.onMessage(message);
}
public LocalTopicStatsImpl getLocalTopicStats(String name) {
return getOrPutSynchronized(statsMap, name, statsMap, localTopicStatsConstructorFunction);
}
public void incrementPublishes(String topicName) {
getLocalTopicStats(topicName).incrementPublishes();
}
public void incrementReceivedMessages(String topicName) {
getLocalTopicStats(topicName).incrementReceives();
}
public void publishEvent(String name, TopicEvent event) {
Collection<EventRegistration> registrations = eventService.getRegistrations(TopicService.SERVICE_NAME, name);
eventService.publishEvent(TopicService.SERVICE_NAME, registrations, event, name.hashCode());
}
public String addMessageListener(String name, MessageListener listener) {
EventRegistration eventRegistration = eventService.registerListener(TopicService.SERVICE_NAME, name, listener);
return eventRegistration.getId();
}
public boolean removeMessageListener(String name, String registrationId) {
return eventService.deregisterListener(TopicService.SERVICE_NAME, name, registrationId);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_topic_TopicService.java |
1,551 | @ManagedDescription("HazelcastInstance")
public class InstanceMBean extends HazelcastMBean<HazelcastInstanceImpl> {
final Config config;
final Cluster cluster;
private final NodeMBean nodeMBean;
private final ConnectionManagerMBean connectionManagerMBean;
private final EventServiceMBean eventServiceMBean;
private final OperationServiceMBean operationServiceMBean;
private final ProxyServiceMBean proxyServiceMBean;
private final ClientEngineMBean clientEngineMBean;
private final ManagedExecutorServiceMBean systemExecutorMBean;
private final ManagedExecutorServiceMBean operationExecutorMBean;
private final ManagedExecutorServiceMBean asyncExecutorMBean;
private final ManagedExecutorServiceMBean scheduledExecutorMBean;
private final ManagedExecutorServiceMBean clientExecutorMBean;
private final ManagedExecutorServiceMBean queryExecutorMBean;
private final ManagedExecutorServiceMBean ioExecutorMBean;
private final PartitionServiceMBean partitionServiceMBean;
protected InstanceMBean(HazelcastInstanceImpl hazelcastInstance, ManagementService managementService) {
super(hazelcastInstance, managementService);
Hashtable<String, String> properties = new Hashtable<String, String>(3);
properties.put("type", quote("HazelcastInstance"));
properties.put("instance", quote(hazelcastInstance.getName()));
properties.put("name", quote(hazelcastInstance.getName()));
setObjectName(properties);
config = hazelcastInstance.getConfig();
cluster = hazelcastInstance.getCluster();
Node node = hazelcastInstance.node;
ExecutionService executionService = node.nodeEngine.getExecutionService();
nodeMBean = new NodeMBean(hazelcastInstance, node, managementService);
register(nodeMBean);
connectionManagerMBean = new ConnectionManagerMBean(hazelcastInstance, node.connectionManager, service);
register(connectionManagerMBean);
eventServiceMBean = new EventServiceMBean(hazelcastInstance, node.nodeEngine.getEventService(), service);
register(eventServiceMBean);
OperationService operationService = node.nodeEngine.getOperationService();
operationServiceMBean = new OperationServiceMBean(hazelcastInstance, operationService, service);
register(operationServiceMBean);
proxyServiceMBean = new ProxyServiceMBean(hazelcastInstance, node.nodeEngine.getProxyService(), service);
register(proxyServiceMBean);
partitionServiceMBean = new PartitionServiceMBean(hazelcastInstance, node.partitionService,service);
register(partitionServiceMBean);
clientEngineMBean = new ClientEngineMBean(hazelcastInstance, node.clientEngine, service);
register(clientEngineMBean);
systemExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.SYSTEM_EXECUTOR), service);
register(systemExecutorMBean);
operationExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.OPERATION_EXECUTOR), service);
register(operationExecutorMBean);
asyncExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.ASYNC_EXECUTOR), service);
register(asyncExecutorMBean);
scheduledExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.SCHEDULED_EXECUTOR), service);
register(scheduledExecutorMBean);
clientExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.CLIENT_EXECUTOR), service);
register(clientExecutorMBean);
queryExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.QUERY_EXECUTOR), service);
register(queryExecutorMBean);
ioExecutorMBean = new ManagedExecutorServiceMBean(
hazelcastInstance, executionService.getExecutor(ExecutionService.IO_EXECUTOR), service);
register(ioExecutorMBean);
}
public PartitionServiceMBean getPartitionServiceMBean() {
return partitionServiceMBean;
}
public ManagedExecutorServiceMBean getSystemExecutorMBean() {
return systemExecutorMBean;
}
public ManagedExecutorServiceMBean getOperationExecutorMBean() {
return operationExecutorMBean;
}
public ManagedExecutorServiceMBean getAsyncExecutorMBean() {
return asyncExecutorMBean;
}
public ManagedExecutorServiceMBean getScheduledExecutorMBean() {
return scheduledExecutorMBean;
}
public ManagedExecutorServiceMBean getClientExecutorMBean() {
return clientExecutorMBean;
}
public ManagedExecutorServiceMBean getQueryExecutorMBean() {
return queryExecutorMBean;
}
public ManagedExecutorServiceMBean getIoExecutorMBean() {
return ioExecutorMBean;
}
public OperationServiceMBean getOperationServiceMBean() {
return operationServiceMBean;
}
public ProxyServiceMBean getProxyServiceMBean() {
return proxyServiceMBean;
}
public ClientEngineMBean getClientEngineMBean() {
return clientEngineMBean;
}
public ConnectionManagerMBean getConnectionManagerMBean() {
return connectionManagerMBean;
}
public EventServiceMBean getEventServiceMBean() {
return eventServiceMBean;
}
public NodeMBean getNodeMBean() {
return nodeMBean;
}
public HazelcastInstance getHazelcastInstance(){
return managedObject;
}
@ManagedAnnotation("name")
@ManagedDescription("Name of the Instance")
public String getName() {
return managedObject.getName();
}
@ManagedAnnotation("version")
@ManagedDescription("The Hazelcast version")
public String getVersion() {
return managedObject.node.getBuildInfo().getVersion();
}
@ManagedAnnotation("build")
@ManagedDescription("The Hazelcast build")
public String getBuild() {
return managedObject.node.getBuildInfo().getBuild();
}
@ManagedAnnotation("config")
@ManagedDescription("String representation of config")
public String getConfig() {
return config.toString();
}
@ManagedAnnotation("configSource")
@ManagedDescription("The source of config")
public String getConfigSource() {
File configurationFile = config.getConfigurationFile();
if (configurationFile != null) {
return configurationFile.getAbsolutePath();
}
URL configurationUrl = config.getConfigurationUrl();
if (configurationUrl != null) {
return configurationUrl.toString();
}
return null;
}
@ManagedAnnotation("groupName")
@ManagedDescription("Group Name")
public String getGroupName(){
return config.getGroupConfig().getName();
}
@ManagedAnnotation("port")
@ManagedDescription("Network Port")
public int getPort(){
return config.getNetworkConfig().getPort();
}
@ManagedAnnotation("clusterTime")
@ManagedDescription("Cluster-wide Time")
public long getClusterTime(){
return cluster.getClusterTime();
}
@ManagedAnnotation("memberCount")
@ManagedDescription("size of the cluster")
public int getMemberCount(){
return cluster.getMembers().size();
}
@ManagedAnnotation("Members")
@ManagedDescription("List of Members")
public List<String> getMembers(){
Set<Member> members = cluster.getMembers();
List<String> list = new ArrayList<String>(members.size());
for (Member member: members){
list.add(member.getInetSocketAddress().toString());
}
return list;
}
@ManagedAnnotation("running")
@ManagedDescription("Running state")
public boolean isRunning(){
return managedObject.getLifecycleService().isRunning();
}
@ManagedAnnotation(value = "shutdown", operation = true)
@ManagedDescription("Shutdown the Node")
public void shutdown(){
managedObject.getLifecycleService().shutdown();
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_jmx_InstanceMBean.java |
457 | public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder<PendingClusterTasksRequest, PendingClusterTasksResponse, PendingClusterTasksRequestBuilder> {
public PendingClusterTasksRequestBuilder(ClusterAdminClient client) {
super((InternalClusterAdminClient) client, new PendingClusterTasksRequest());
}
@Override
protected void doExecute(ActionListener<PendingClusterTasksResponse> listener) {
((InternalClusterAdminClient) client).pendingClusterTasks(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_cluster_tasks_PendingClusterTasksRequestBuilder.java |
1,994 | assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(TestEventBasedMapStore.STORE_EVENTS.LOAD_ALL_KEYS, testMapStore.getEvents().poll());
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java |
45 | public class HeartbeatIAmAliveProcessor implements MessageProcessor
{
private final MessageHolder output;
private final ClusterContext clusterContext;
public HeartbeatIAmAliveProcessor( MessageHolder output, ClusterContext clusterContext )
{
this.output = output;
this.clusterContext = clusterContext;
}
@Override
public boolean process( Message<? extends MessageType> message )
{
if ( !message.isInternal() &&
!message.getMessageType().equals( HeartbeatMessage.i_am_alive ) )
{
// We assume the FROM header always exists.
String from = message.getHeader( Message.FROM );
if ( !from.equals( message.getHeader( Message.TO ) ) )
{
InstanceId theId;
if ( message.hasHeader( Message.INSTANCE_ID ) )
{
// INSTANCE_ID is there since after 1.9.6
theId = new InstanceId( Integer.parseInt( message.getHeader( Message.INSTANCE_ID ) ) );
}
else
{
theId = clusterContext.getConfiguration().getIdForUri( URI.create( from ) );
}
if ( theId != null && clusterContext.getConfiguration().getMembers().containsKey( theId )
&& !clusterContext.isMe( theId ) )
{
output.offer( message.copyHeadersTo(
Message.internal( HeartbeatMessage.i_am_alive,
new HeartbeatMessage.IAmAliveState( theId ) ),
Message.FROM, Message.INSTANCE_ID ) );
}
}
}
return true;
}
} | 1no label
| enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessor.java |
1,918 | public class FactoryProvider<F> implements Provider<F>, HasDependencies {
/*
* This class implements the old @AssistedInject implementation that manually matches constructors
* to factory methods. The new child injector implementation lives in FactoryProvider2.
*/
private Injector injector;
private final TypeLiteral<F> factoryType;
private final Map<Method, AssistedConstructor<?>> factoryMethodToConstructor;
public static <F> Provider<F> newFactory(
Class<F> factoryType, Class<?> implementationType) {
return newFactory(TypeLiteral.get(factoryType), TypeLiteral.get(implementationType));
}
public static <F> Provider<F> newFactory(
TypeLiteral<F> factoryType, TypeLiteral<?> implementationType) {
Map<Method, AssistedConstructor<?>> factoryMethodToConstructor
= createMethodMapping(factoryType, implementationType);
if (!factoryMethodToConstructor.isEmpty()) {
return new FactoryProvider<F>(factoryType, factoryMethodToConstructor);
} else {
return new FactoryProvider2<F>(factoryType, Key.get(implementationType));
}
}
private FactoryProvider(TypeLiteral<F> factoryType,
Map<Method, AssistedConstructor<?>> factoryMethodToConstructor) {
this.factoryType = factoryType;
this.factoryMethodToConstructor = factoryMethodToConstructor;
checkDeclaredExceptionsMatch();
}
@Inject
void setInjectorAndCheckUnboundParametersAreInjectable(Injector injector) {
this.injector = injector;
for (AssistedConstructor<?> c : factoryMethodToConstructor.values()) {
for (Parameter p : c.getAllParameters()) {
if (!p.isProvidedByFactory() && !paramCanBeInjected(p, injector)) {
// this is lame - we're not using the proper mechanism to add an
// error to the injector. Throughout this class we throw exceptions
// to add errors, which isn't really the best way in Guice
throw newConfigurationException("Parameter of type '%s' is not injectable or annotated "
+ "with @Assisted for Constructor '%s'", p, c);
}
}
}
}
private void checkDeclaredExceptionsMatch() {
for (Map.Entry<Method, AssistedConstructor<?>> entry : factoryMethodToConstructor.entrySet()) {
for (Class<?> constructorException : entry.getValue().getDeclaredExceptions()) {
if (!isConstructorExceptionCompatibleWithFactoryExeception(
constructorException, entry.getKey().getExceptionTypes())) {
throw newConfigurationException("Constructor %s declares an exception, but no compatible "
+ "exception is thrown by the factory method %s", entry.getValue(), entry.getKey());
}
}
}
}
private boolean isConstructorExceptionCompatibleWithFactoryExeception(
Class<?> constructorException, Class<?>[] factoryExceptions) {
for (Class<?> factoryException : factoryExceptions) {
if (factoryException.isAssignableFrom(constructorException)) {
return true;
}
}
return false;
}
private boolean paramCanBeInjected(Parameter parameter, Injector injector) {
return parameter.isBound(injector);
}
private static Map<Method, AssistedConstructor<?>> createMethodMapping(
TypeLiteral<?> factoryType, TypeLiteral<?> implementationType) {
List<AssistedConstructor<?>> constructors = Lists.newArrayList();
for (Constructor<?> constructor : implementationType.getRawType().getDeclaredConstructors()) {
if (constructor.getAnnotation(AssistedInject.class) != null) {
@SuppressWarnings("unchecked") // the constructor type and implementation type agree
AssistedConstructor assistedConstructor = new AssistedConstructor(
constructor, implementationType.getParameterTypes(constructor));
constructors.add(assistedConstructor);
}
}
if (constructors.isEmpty()) {
return ImmutableMap.of();
}
Method[] factoryMethods = factoryType.getRawType().getMethods();
if (constructors.size() != factoryMethods.length) {
throw newConfigurationException("Constructor mismatch: %s has %s @AssistedInject "
+ "constructors, factory %s has %s creation methods", implementationType,
constructors.size(), factoryType, factoryMethods.length);
}
Map<ParameterListKey, AssistedConstructor> paramsToConstructor = Maps.newHashMap();
for (AssistedConstructor c : constructors) {
if (paramsToConstructor.containsKey(c.getAssistedParameters())) {
throw new RuntimeException("Duplicate constructor, " + c);
}
paramsToConstructor.put(c.getAssistedParameters(), c);
}
Map<Method, AssistedConstructor<?>> result = Maps.newHashMap();
for (Method method : factoryMethods) {
if (!method.getReturnType().isAssignableFrom(implementationType.getRawType())) {
throw newConfigurationException("Return type of method %s is not assignable from %s",
method, implementationType);
}
List<Type> parameterTypes = Lists.newArrayList();
for (TypeLiteral<?> parameterType : factoryType.getParameterTypes(method)) {
parameterTypes.add(parameterType.getType());
}
ParameterListKey methodParams = new ParameterListKey(parameterTypes);
if (!paramsToConstructor.containsKey(methodParams)) {
throw newConfigurationException("%s has no @AssistInject constructor that takes the "
+ "@Assisted parameters %s in that order. @AssistInject constructors are %s",
implementationType, methodParams, paramsToConstructor.values());
}
method.getParameterAnnotations();
for (Annotation[] parameterAnnotations : method.getParameterAnnotations()) {
for (Annotation parameterAnnotation : parameterAnnotations) {
if (parameterAnnotation.annotationType() == Assisted.class) {
throw newConfigurationException("Factory method %s has an @Assisted parameter, which "
+ "is incompatible with the deprecated @AssistedInject annotation. Please replace "
+ "@AssistedInject with @Inject on the %s constructor.",
method, implementationType);
}
}
}
AssistedConstructor matchingConstructor = paramsToConstructor.remove(methodParams);
result.put(method, matchingConstructor);
}
return result;
}
public Set<Dependency<?>> getDependencies() {
List<Dependency<?>> dependencies = Lists.newArrayList();
for (AssistedConstructor<?> constructor : factoryMethodToConstructor.values()) {
for (Parameter parameter : constructor.getAllParameters()) {
if (!parameter.isProvidedByFactory()) {
dependencies.add(Dependency.get(parameter.getPrimaryBindingKey()));
}
}
}
return ImmutableSet.copyOf(dependencies);
}
public F get() {
InvocationHandler invocationHandler = new InvocationHandler() {
public Object invoke(Object proxy, Method method, Object[] creationArgs) throws Throwable {
// pass methods from Object.class to the proxy
if (method.getDeclaringClass().equals(Object.class)) {
return method.invoke(this, creationArgs);
}
AssistedConstructor<?> constructor = factoryMethodToConstructor.get(method);
Object[] constructorArgs = gatherArgsForConstructor(constructor, creationArgs);
Object objectToReturn = constructor.newInstance(constructorArgs);
injector.injectMembers(objectToReturn);
return objectToReturn;
}
public Object[] gatherArgsForConstructor(
AssistedConstructor<?> constructor,
Object[] factoryArgs) {
int numParams = constructor.getAllParameters().size();
int argPosition = 0;
Object[] result = new Object[numParams];
for (int i = 0; i < numParams; i++) {
Parameter parameter = constructor.getAllParameters().get(i);
if (parameter.isProvidedByFactory()) {
result[i] = factoryArgs[argPosition];
argPosition++;
} else {
result[i] = parameter.getValue(injector);
}
}
return result;
}
};
@SuppressWarnings("unchecked") // we imprecisely treat the class literal of T as a Class<T>
Class<F> factoryRawType = (Class) factoryType.getRawType();
return factoryRawType.cast(Proxy.newProxyInstance(factoryRawType.getClassLoader(),
new Class[]{factoryRawType}, invocationHandler));
}
private static ConfigurationException newConfigurationException(String format, Object... args) {
return new ConfigurationException(ImmutableSet.of(new Message(Errors.format(format, args))));
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_assistedinject_FactoryProvider.java |
1,509 | public class FailedRerouteAllocation extends RoutingAllocation {
private final List<ShardRouting> failedShards;
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<ShardRouting> failedShards, ClusterInfo clusterInfo) {
super(deciders, routingNodes, nodes, clusterInfo);
this.failedShards = failedShards;
}
public List<ShardRouting> failedShards() {
return failedShards;
}
} | 0true
| src_main_java_org_elasticsearch_cluster_routing_allocation_FailedRerouteAllocation.java |
938 | @SuppressWarnings({ "unchecked" })
public class ORecordBytes extends ORecordAbstract<byte[]> {
private static final long serialVersionUID = 1L;
public static final byte RECORD_TYPE = 'b';
private static final byte[] EMPTY_SOURCE = new byte[] {};
public ORecordBytes() {
setup();
}
public ORecordBytes(final ODatabaseRecord iDatabase) {
setup();
ODatabaseRecordThreadLocal.INSTANCE.set(iDatabase);
}
public ORecordBytes(final ODatabaseRecord iDatabase, final byte[] iSource) {
this(iSource);
ODatabaseRecordThreadLocal.INSTANCE.set(iDatabase);
}
public ORecordBytes(final byte[] iSource) {
super(iSource);
_dirty = true;
setup();
}
public ORecordBytes(final ORID iRecordId) {
_recordId = (ORecordId) iRecordId;
setup();
}
public ORecordBytes reset(final byte[] iSource) {
reset();
_source = iSource;
return this;
}
public ORecordBytes copy() {
return (ORecordBytes) copyTo(new ORecordBytes());
}
@Override
public ORecordBytes fromStream(final byte[] iRecordBuffer) {
_source = iRecordBuffer;
_status = ORecordElement.STATUS.LOADED;
return this;
}
@Override
public byte[] toStream() {
return _source;
}
public byte getRecordType() {
return RECORD_TYPE;
}
@Override
protected void setup() {
super.setup();
_recordFormat = ORecordSerializerFactory.instance().getFormat(ORecordSerializerRaw.NAME);
}
/**
* Reads the input stream in memory. This is less efficient than {@link #fromInputStream(InputStream, int)} because allocation is
* made multiple times. If you already know the input size use {@link #fromInputStream(InputStream, int)}.
*
* @param in
* Input Stream, use buffered input stream wrapper to speed up reading
* @return Buffer read from the stream. It's also the internal buffer size in bytes
* @throws IOException
*/
public int fromInputStream(final InputStream in) throws IOException {
final OMemoryStream out = new OMemoryStream();
try {
final byte[] buffer = new byte[OMemoryStream.DEF_SIZE];
int readBytesCount;
while (true) {
readBytesCount = in.read(buffer, 0, buffer.length);
if (readBytesCount == -1) {
break;
}
out.write(buffer, 0, readBytesCount);
}
out.flush();
_source = out.toByteArray();
} finally {
out.close();
}
_size = _source.length;
return _size;
}
/**
* Reads the input stream in memory specifying the maximum bytes to read. This is more efficient than
* {@link #fromInputStream(InputStream)} because allocation is made only once.
*
* @param in
* Input Stream, use buffered input stream wrapper to speed up reading
* @param maxSize
* Maximum size to read
* @return Buffer count of bytes that are read from the stream. It's also the internal buffer size in bytes
* @throws IOException
* if an I/O error occurs.
*/
public int fromInputStream(final InputStream in, final int maxSize) throws IOException {
final byte[] buffer = new byte[maxSize];
int totalBytesCount = 0;
int readBytesCount;
while (totalBytesCount < maxSize) {
readBytesCount = in.read(buffer, totalBytesCount, buffer.length - totalBytesCount);
if (readBytesCount == -1) {
break;
}
totalBytesCount += readBytesCount;
}
if (totalBytesCount == 0) {
_source = EMPTY_SOURCE;
_size = 0;
} else if (totalBytesCount == maxSize) {
_source = buffer;
_size = maxSize;
} else {
_source = Arrays.copyOf(buffer, totalBytesCount);
_size = totalBytesCount;
}
return _size;
}
public void toOutputStream(final OutputStream out) throws IOException {
checkForLoading();
if (_source.length > 0) {
out.write(_source);
}
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_record_impl_ORecordBytes.java |
976 | public abstract class IndicesReplicationOperationRequestBuilder<Request extends IndicesReplicationOperationRequest<Request>, Response extends ActionResponse, RequestBuilder extends IndicesReplicationOperationRequestBuilder<Request, Response, RequestBuilder>>
extends ActionRequestBuilder<Request, Response, RequestBuilder> {
protected IndicesReplicationOperationRequestBuilder(InternalGenericClient client, Request request) {
super(client, request);
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
@SuppressWarnings("unchecked")
public final RequestBuilder setTimeout(TimeValue timeout) {
request.timeout(timeout);
return (RequestBuilder) this;
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
@SuppressWarnings("unchecked")
public final RequestBuilder setTimeout(String timeout) {
request.timeout(timeout);
return (RequestBuilder) this;
}
@SuppressWarnings("unchecked")
public final RequestBuilder setIndices(String... indices) {
request.indices(indices);
return (RequestBuilder) this;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions.
* For example indices that don't exist.
*/
@SuppressWarnings("unchecked")
public RequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return (RequestBuilder) this;
}
/**
* Sets the replication type.
*/
@SuppressWarnings("unchecked")
public RequestBuilder setReplicationType(ReplicationType replicationType) {
request.replicationType(replicationType);
return (RequestBuilder) this;
}
/**
* Sets the replication type.
*/
@SuppressWarnings("unchecked")
public RequestBuilder setReplicationType(String replicationType) {
request.replicationType(replicationType);
return (RequestBuilder) this;
}
/**
* Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
*/
@SuppressWarnings("unchecked")
public RequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) {
request.consistencyLevel(consistencyLevel);
return (RequestBuilder) this;
}
} | 0true
| src_main_java_org_elasticsearch_action_support_replication_IndicesReplicationOperationRequestBuilder.java |
26 | }), new Function<Edge, Vertex>() {
@Override
public Vertex apply(@Nullable Edge edge) {
return edge.getEnd();
}
}); | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java |
950 | Thread t = new Thread() {
public void run() {
lock.lock();
}
}; | 0true
| hazelcast_src_test_java_com_hazelcast_concurrent_lock_LockTestUtils.java |
861 | threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
}
}); | 0true
| src_main_java_org_elasticsearch_action_search_type_TransportSearchDfsQueryAndFetchAction.java |
1,824 | public class AddSearchMappingRequest {
private final PersistencePerspective persistencePerspective;
private final CriteriaTransferObject requestedCto;
private final String ceilingEntityFullyQualifiedClassname;
private final Map<String, FieldMetadata> mergedProperties;
private final String propertyName;
private final FieldManager fieldManager;
private final DataFormatProvider dataFormatProvider;
private final RestrictionFactory restrictionFactory;
public AddSearchMappingRequest(PersistencePerspective persistencePerspective, CriteriaTransferObject
requestedCto, String ceilingEntityFullyQualifiedClassname, Map<String, FieldMetadata> mergedProperties,
String propertyName, FieldManager fieldManager,
DataFormatProvider dataFormatProvider, RestrictionFactory restrictionFactory) {
this.persistencePerspective = persistencePerspective;
this.requestedCto = requestedCto;
this.ceilingEntityFullyQualifiedClassname = ceilingEntityFullyQualifiedClassname;
this.mergedProperties = mergedProperties;
this.propertyName = propertyName;
this.fieldManager = fieldManager;
this.dataFormatProvider = dataFormatProvider;
this.restrictionFactory = restrictionFactory;
}
public PersistencePerspective getPersistencePerspective() {
return persistencePerspective;
}
public CriteriaTransferObject getRequestedCto() {
return requestedCto;
}
public String getCeilingEntityFullyQualifiedClassname() {
return ceilingEntityFullyQualifiedClassname;
}
public Map<String, FieldMetadata> getMergedProperties() {
return mergedProperties;
}
public String getPropertyName() {
return propertyName;
}
public FieldManager getFieldManager() {
return fieldManager;
}
public DataFormatProvider getDataFormatProvider() {
return dataFormatProvider;
}
public RestrictionFactory getRestrictionFactory() {
return restrictionFactory;
}
} | 1no label
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_service_persistence_module_provider_request_AddSearchMappingRequest.java |
485 | static final class Fields {
static final XContentBuilderString TOKENS = new XContentBuilderString("tokens");
static final XContentBuilderString TOKEN = new XContentBuilderString("token");
static final XContentBuilderString START_OFFSET = new XContentBuilderString("start_offset");
static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString POSITION = new XContentBuilderString("position");
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_analyze_AnalyzeResponse.java |
3,676 | public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
ParentFieldMapper.Builder builder = parent();
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("type")) {
builder.type(fieldNode.toString());
} else if (fieldName.equals("postings_format")) {
String postingFormatName = fieldNode.toString();
builder.postingsFormat(parserContext.postingFormatService().get(postingFormatName));
}
}
return builder;
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_internal_ParentFieldMapper.java |
1,638 | asynchMessageManager = new TimerTask() {
@Override
public void run() {
purgePendingMessages();
}
}; | 0true
| distributed_src_main_java_com_orientechnologies_orient_server_hazelcast_OHazelcastDistributedMessageService.java |
4,483 | public class RecoveryTarget extends AbstractComponent {
public static class Actions {
public static final String FILES_INFO = "index/shard/recovery/filesInfo";
public static final String FILE_CHUNK = "index/shard/recovery/fileChunk";
public static final String CLEAN_FILES = "index/shard/recovery/cleanFiles";
public static final String TRANSLOG_OPS = "index/shard/recovery/translogOps";
public static final String PREPARE_TRANSLOG = "index/shard/recovery/prepareTranslog";
public static final String FINALIZE = "index/shard/recovery/finalize";
}
private final ThreadPool threadPool;
private final TransportService transportService;
private final IndicesService indicesService;
private final RecoverySettings recoverySettings;
private final ConcurrentMapLong<RecoveryStatus> onGoingRecoveries = ConcurrentCollections.newConcurrentMapLong();
@Inject
public RecoveryTarget(Settings settings, ThreadPool threadPool, TransportService transportService, IndicesService indicesService,
IndicesLifecycle indicesLifecycle, RecoverySettings recoverySettings) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
this.indicesService = indicesService;
this.recoverySettings = recoverySettings;
transportService.registerHandler(Actions.FILES_INFO, new FilesInfoRequestHandler());
transportService.registerHandler(Actions.FILE_CHUNK, new FileChunkTransportRequestHandler());
transportService.registerHandler(Actions.CLEAN_FILES, new CleanFilesRequestHandler());
transportService.registerHandler(Actions.PREPARE_TRANSLOG, new PrepareForTranslogOperationsRequestHandler());
transportService.registerHandler(Actions.TRANSLOG_OPS, new TranslogOperationsRequestHandler());
transportService.registerHandler(Actions.FINALIZE, new FinalizeRecoveryRequestHandler());
indicesLifecycle.addListener(new IndicesLifecycle.Listener() {
@Override
public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard) {
if (indexShard != null) {
removeAndCleanOnGoingRecovery(findRecoveryByShard(indexShard));
}
}
});
}
public RecoveryStatus peerRecoveryStatus(ShardId shardId) {
RecoveryStatus peerRecoveryStatus = findRecoveryByShardId(shardId);
if (peerRecoveryStatus == null) {
return null;
}
// update how long it takes if we are still recovering...
if (peerRecoveryStatus.startTime > 0 && peerRecoveryStatus.stage != RecoveryStatus.Stage.DONE) {
peerRecoveryStatus.time = System.currentTimeMillis() - peerRecoveryStatus.startTime;
}
return peerRecoveryStatus;
}
public void cancelRecovery(IndexShard indexShard) {
RecoveryStatus recoveryStatus = findRecoveryByShard(indexShard);
// it might be if the recovery source got canceled first
if (recoveryStatus == null) {
return;
}
if (recoveryStatus.sentCanceledToSource) {
return;
}
recoveryStatus.cancel();
try {
if (recoveryStatus.recoveryThread != null) {
recoveryStatus.recoveryThread.interrupt();
}
// give it a grace period of actually getting the sent ack part
final long sleepTime = 100;
final long maxSleepTime = 10000;
long rounds = Math.round(maxSleepTime / sleepTime);
while (!recoveryStatus.sentCanceledToSource && rounds > 0) {
rounds--;
try {
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break; // interrupted - step out!
}
}
} finally {
removeAndCleanOnGoingRecovery(recoveryStatus);
}
}
public void startRecovery(final StartRecoveryRequest request, final InternalIndexShard indexShard, final RecoveryListener listener) {
try {
indexShard.recovering("from " + request.sourceNode());
} catch (IllegalIndexShardStateException e) {
// that's fine, since we might be called concurrently, just ignore this, we are already recovering
listener.onIgnoreRecovery(false, "already in recovering process, " + e.getMessage());
return;
}
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
// create a new recovery status, and process...
RecoveryStatus recoveryStatus = new RecoveryStatus(request.recoveryId(), indexShard);
onGoingRecoveries.put(recoveryStatus.recoveryId, recoveryStatus);
doRecovery(request, recoveryStatus, listener);
}
});
}
public void retryRecovery(final StartRecoveryRequest request, final RecoveryStatus status, final RecoveryListener listener) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
doRecovery(request, status, listener);
}
});
}
private void doRecovery(final StartRecoveryRequest request, final RecoveryStatus recoveryStatus, final RecoveryListener listener) {
if (request.sourceNode() == null) {
listener.onIgnoreRecovery(false, "No node to recover from, retry on next cluster state update");
return;
}
final InternalIndexShard shard = recoveryStatus.indexShard;
if (shard == null) {
listener.onIgnoreRecovery(false, "shard missing locally, stop recovery");
return;
}
if (shard.state() == IndexShardState.CLOSED) {
listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
return;
}
if (recoveryStatus.isCanceled()) {
// don't remove it, the cancellation code will remove it...
listener.onIgnoreRecovery(false, "canceled recovery");
return;
}
recoveryStatus.recoveryThread = Thread.currentThread();
try {
logger.trace("[{}][{}] starting recovery from {}", request.shardId().index().name(), request.shardId().id(), request.sourceNode());
StopWatch stopWatch = new StopWatch().start();
RecoveryResponse recoveryResponse = transportService.submitRequest(request.sourceNode(), RecoverySource.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
@Override
public RecoveryResponse newInstance() {
return new RecoveryResponse();
}
}).txGet();
if (shard.state() == IndexShardState.CLOSED) {
removeAndCleanOnGoingRecovery(recoveryStatus);
listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
return;
}
stopWatch.stop();
if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [{}], took [{}]", request.shardId(), request.sourceNode(), stopWatch.totalTime());
} else if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append('[').append(request.shardId().index().name()).append(']').append('[').append(request.shardId().id()).append("] ");
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(stopWatch.totalTime()).append("]\n");
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]")
.append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']')
.append("\n");
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations")
.append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]")
.append("\n");
sb.append(" phase3: recovered [").append(recoveryResponse.phase3Operations).append("]").append(" transaction log operations")
.append(", took [").append(timeValueMillis(recoveryResponse.phase3Time)).append("]");
logger.trace(sb.toString());
}
removeAndCleanOnGoingRecovery(recoveryStatus);
listener.onRecoveryDone();
} catch (Throwable e) {
// logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().index().name(), request.shardId().id());
if (recoveryStatus.isCanceled()) {
// don't remove it, the cancellation code will remove it...
listener.onIgnoreRecovery(false, "canceled recovery");
return;
}
if (shard.state() == IndexShardState.CLOSED) {
removeAndCleanOnGoingRecovery(recoveryStatus);
listener.onIgnoreRecovery(false, "local shard closed, stop recovery");
return;
}
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// do it twice, in case we have double transport exception
cause = ExceptionsHelper.unwrapCause(cause);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// here, we would add checks against exception that need to be retried (and not removeAndClean in this case)
if (cause instanceof IndexShardNotStartedException || cause instanceof IndexMissingException || cause instanceof IndexShardMissingException) {
// if the target is not ready yet, retry
listener.onRetryRecovery(TimeValue.timeValueMillis(500), recoveryStatus);
return;
}
if (cause instanceof DelayRecoveryException) {
listener.onRetryRecovery(TimeValue.timeValueMillis(500), recoveryStatus);
return;
}
// here, we check against ignore recovery options
// in general, no need to clean the shard on ignored recovery, since we want to try and reuse it later
// it will get deleted in the IndicesStore if all are allocated and no shard exists on this node...
removeAndCleanOnGoingRecovery(recoveryStatus);
if (cause instanceof ConnectTransportException) {
listener.onIgnoreRecovery(true, "source node disconnected (" + request.sourceNode() + ")");
return;
}
if (cause instanceof IndexShardClosedException) {
listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
return;
}
if (cause instanceof AlreadyClosedException) {
listener.onIgnoreRecovery(true, "source shard is closed (" + request.sourceNode() + ")");
return;
}
logger.trace("[{}][{}] recovery from [{}] failed", e, request.shardId().index().name(), request.shardId().id(), request.sourceNode());
listener.onRecoveryFailure(new RecoveryFailedException(request, e), true);
}
}
public static interface RecoveryListener {
void onRecoveryDone();
void onRetryRecovery(TimeValue retryAfter, RecoveryStatus status);
void onIgnoreRecovery(boolean removeShard, String reason);
void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailure);
}
@Nullable
private RecoveryStatus findRecoveryByShardId(ShardId shardId) {
for (RecoveryStatus recoveryStatus : onGoingRecoveries.values()) {
if (recoveryStatus.shardId.equals(shardId)) {
return recoveryStatus;
}
}
return null;
}
@Nullable
private RecoveryStatus findRecoveryByShard(IndexShard indexShard) {
for (RecoveryStatus recoveryStatus : onGoingRecoveries.values()) {
if (recoveryStatus.indexShard == indexShard) {
return recoveryStatus;
}
}
return null;
}
private void removeAndCleanOnGoingRecovery(@Nullable RecoveryStatus status) {
if (status == null) {
return;
}
// clean it from the on going recoveries since it is being closed
status = onGoingRecoveries.remove(status.recoveryId);
if (status == null) {
return;
}
// just mark it as canceled as well, just in case there are in flight requests
// coming from the recovery target
status.cancel();
// clean open index outputs
Set<Entry<String, IndexOutput>> entrySet = status.cancleAndClearOpenIndexInputs();
Iterator<Entry<String, IndexOutput>> iterator = entrySet.iterator();
while (iterator.hasNext()) {
Map.Entry<String, IndexOutput> entry = iterator.next();
synchronized (entry.getValue()) {
IOUtils.closeWhileHandlingException(entry.getValue());
}
iterator.remove();
}
status.checksums = null;
}
class PrepareForTranslogOperationsRequestHandler extends BaseTransportRequestHandler<RecoveryPrepareForTranslogOperationsRequest> {
@Override
public RecoveryPrepareForTranslogOperationsRequest newInstance() {
return new RecoveryPrepareForTranslogOperationsRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
onGoingRecovery.stage = RecoveryStatus.Stage.TRANSLOG;
onGoingRecovery.indexShard.performRecoveryPrepareForTranslog();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class FinalizeRecoveryRequestHandler extends BaseTransportRequestHandler<RecoveryFinalizeRecoveryRequest> {
@Override
public RecoveryFinalizeRecoveryRequest newInstance() {
return new RecoveryFinalizeRecoveryRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
onGoingRecovery.stage = RecoveryStatus.Stage.FINALIZE;
onGoingRecovery.indexShard.performRecoveryFinalization(false, onGoingRecovery);
onGoingRecovery.time = System.currentTimeMillis() - onGoingRecovery.startTime;
onGoingRecovery.stage = RecoveryStatus.Stage.DONE;
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class TranslogOperationsRequestHandler extends BaseTransportRequestHandler<RecoveryTranslogOperationsRequest> {
@Override
public RecoveryTranslogOperationsRequest newInstance() {
return new RecoveryTranslogOperationsRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
InternalIndexShard shard = (InternalIndexShard) indicesService.indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
for (Translog.Operation operation : request.operations()) {
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
shard.performRecoveryOperation(operation);
onGoingRecovery.currentTranslogOperations++;
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class FilesInfoRequestHandler extends BaseTransportRequestHandler<RecoveryFilesInfoRequest> {
@Override
public RecoveryFilesInfoRequest newInstance() {
return new RecoveryFilesInfoRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
onGoingRecovery.phase1FileNames = request.phase1FileNames;
onGoingRecovery.phase1FileSizes = request.phase1FileSizes;
onGoingRecovery.phase1ExistingFileNames = request.phase1ExistingFileNames;
onGoingRecovery.phase1ExistingFileSizes = request.phase1ExistingFileSizes;
onGoingRecovery.phase1TotalSize = request.phase1TotalSize;
onGoingRecovery.phase1ExistingTotalSize = request.phase1ExistingTotalSize;
onGoingRecovery.stage = RecoveryStatus.Stage.INDEX;
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class CleanFilesRequestHandler extends BaseTransportRequestHandler<RecoveryCleanFilesRequest> {
@Override
public RecoveryCleanFilesRequest newInstance() {
return new RecoveryCleanFilesRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
Store store = onGoingRecovery.indexShard.store();
// first, we go and move files that were created with the recovery id suffix to
// the actual names, its ok if we have a corrupted index here, since we have replicas
// to recover from in case of a full cluster shutdown just when this code executes...
String prefix = "recovery." + onGoingRecovery.startTime + ".";
Set<String> filesToRename = Sets.newHashSet();
for (String existingFile : store.directory().listAll()) {
if (existingFile.startsWith(prefix)) {
filesToRename.add(existingFile.substring(prefix.length(), existingFile.length()));
}
}
Exception failureToRename = null;
if (!filesToRename.isEmpty()) {
// first, go and delete the existing ones
final Directory directory = store.directory();
for (String file : filesToRename) {
try {
directory.deleteFile(file);
} catch (Throwable ex) {
logger.debug("failed to delete file [{}]", ex, file);
}
}
for (String fileToRename : filesToRename) {
// now, rename the files... and fail it it won't work
store.renameFile(prefix + fileToRename, fileToRename);
}
}
// now write checksums
store.writeChecksums(onGoingRecovery.checksums);
for (String existingFile : store.directory().listAll()) {
// don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
if (!request.snapshotFiles().contains(existingFile) && !Store.isChecksum(existingFile)) {
try {
store.directory().deleteFile(existingFile);
} catch (Exception e) {
// ignore, we don't really care, will get deleted later on
}
}
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class FileChunkTransportRequestHandler extends BaseTransportRequestHandler<RecoveryFileChunkRequest> {
@Override
public RecoveryFileChunkRequest newInstance() {
return new RecoveryFileChunkRequest();
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception {
RecoveryStatus onGoingRecovery = onGoingRecoveries.get(request.recoveryId());
if (onGoingRecovery == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
Store store = onGoingRecovery.indexShard.store();
IndexOutput indexOutput;
if (request.position() == 0) {
// first request
onGoingRecovery.checksums.remove(request.name());
indexOutput = onGoingRecovery.removeOpenIndexOutputs(request.name());
IOUtils.closeWhileHandlingException(indexOutput);
// we create an output with no checksum, this is because the pure binary data of the file is not
// the checksum (because of seek). We will create the checksum file once copying is done
// also, we check if the file already exists, if it does, we create a file name based
// on the current recovery "id" and later we make the switch, the reason for that is that
// we only want to overwrite the index files once we copied all over, and not create a
// case where the index is half moved
String fileName = request.name();
if (store.directory().fileExists(fileName)) {
fileName = "recovery." + onGoingRecovery.startTime + "." + fileName;
}
indexOutput = onGoingRecovery.openAndPutIndexOutput(request.name(), fileName, store);
} else {
indexOutput = onGoingRecovery.getOpenIndexOutput(request.name());
}
if (indexOutput == null) {
// shard is getting closed on us
throw new IndexShardClosedException(request.shardId());
}
boolean success = false;
synchronized (indexOutput) {
try {
if (recoverySettings.rateLimiter() != null) {
recoverySettings.rateLimiter().pause(request.content().length());
}
BytesReference content = request.content();
if (!content.hasArray()) {
content = content.toBytesArray();
}
indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
onGoingRecovery.currentFilesSize.addAndGet(request.length());
if (indexOutput.getFilePointer() == request.length()) {
// we are done
indexOutput.close();
// write the checksum
if (request.checksum() != null) {
onGoingRecovery.checksums.put(request.name(), request.checksum());
}
store.directory().sync(Collections.singleton(request.name()));
IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
assert remove == indexOutput;
}
success = true;
} finally {
if (!success || onGoingRecovery.isCanceled()) {
IndexOutput remove = onGoingRecovery.removeOpenIndexOutputs(request.name());
assert remove == indexOutput;
IOUtils.closeWhileHandlingException(indexOutput);
}
}
}
if (onGoingRecovery.isCanceled()) {
onGoingRecovery.sentCanceledToSource = true;
throw new IndexShardClosedException(request.shardId());
}
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
} | 1no label
| src_main_java_org_elasticsearch_indices_recovery_RecoveryTarget.java |
3,654 | public interface IncludeInAll extends Mapper {
void includeInAll(Boolean includeInAll);
void includeInAllIfNotSet(Boolean includeInAll);
void unsetIncludeInAll();
} | 0true
| src_main_java_org_elasticsearch_index_mapper_internal_AllFieldMapper.java |
1,607 | public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
private final ThreadPool threadPool;
private final DiscoveryService discoveryService;
private final OperationRouting operationRouting;
private final TransportService transportService;
private final NodeSettingsService nodeSettingsService;
private final TimeValue reconnectInterval;
private volatile PrioritizedEsThreadPoolExecutor updateTasksExecutor;
private final List<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<ClusterStateListener>();
private final List<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<ClusterStateListener>();
private final List<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<ClusterStateListener>();
private final LocalNodeMasterListeners localNodeMasterListeners;
private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue();
private volatile ClusterState clusterState = ClusterState.builder().build();
private final ClusterBlocks.Builder initialBlocks = ClusterBlocks.builder().addGlobalBlock(Discovery.NO_MASTER_BLOCK);
private volatile ScheduledFuture reconnectToNodes;
@Inject
public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService,
NodeSettingsService nodeSettingsService, ThreadPool threadPool) {
super(settings);
this.operationRouting = operationRouting;
this.transportService = transportService;
this.discoveryService = discoveryService;
this.threadPool = threadPool;
this.nodeSettingsService = nodeSettingsService;
this.nodeSettingsService.setClusterService(this);
this.reconnectInterval = componentSettings.getAsTime("reconnect_interval", TimeValue.timeValueSeconds(10));
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
}
public NodeSettingsService settingsService() {
return this.nodeSettingsService;
}
public void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException {
if (lifecycle.started()) {
throw new ElasticsearchIllegalStateException("can't set initial block when started");
}
initialBlocks.addGlobalBlock(block);
}
@Override
public void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException {
if (lifecycle.started()) {
throw new ElasticsearchIllegalStateException("can't set initial block when started");
}
initialBlocks.removeGlobalBlock(block);
}
@Override
protected void doStart() throws ElasticsearchException {
add(localNodeMasterListeners);
this.clusterState = ClusterState.builder().blocks(initialBlocks).build();
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(daemonThreadFactory(settings, "clusterService#updateTask"));
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
}
@Override
protected void doStop() throws ElasticsearchException {
this.reconnectToNodes.cancel(true);
for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
onGoingTimeout.cancel();
onGoingTimeout.listener.onClose();
}
updateTasksExecutor.shutdown();
try {
updateTasksExecutor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
remove(localNodeMasterListeners);
}
@Override
protected void doClose() throws ElasticsearchException {
}
@Override
public DiscoveryNode localNode() {
return discoveryService.localNode();
}
@Override
public OperationRouting operationRouting() {
return operationRouting;
}
public ClusterState state() {
return this.clusterState;
}
public void addFirst(ClusterStateListener listener) {
priorityClusterStateListeners.add(listener);
}
public void addLast(ClusterStateListener listener) {
lastClusterStateListeners.add(listener);
}
public void add(ClusterStateListener listener) {
clusterStateListeners.add(listener);
}
public void remove(ClusterStateListener listener) {
clusterStateListeners.remove(listener);
priorityClusterStateListeners.remove(listener);
lastClusterStateListeners.remove(listener);
for (Iterator<NotifyTimeout> it = onGoingTimeouts.iterator(); it.hasNext(); ) {
NotifyTimeout timeout = it.next();
if (timeout.listener.equals(listener)) {
timeout.cancel();
it.remove();
}
}
}
@Override
public void add(LocalNodeMasterListener listener) {
localNodeMasterListeners.add(listener);
}
@Override
public void remove(LocalNodeMasterListener listener) {
localNodeMasterListeners.remove(listener);
}
public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) {
if (lifecycle.stoppedOrClosed()) {
listener.onClose();
return;
}
// call the post added notification on the same event thread
try {
updateTasksExecutor.execute(new PrioritizedRunnable(Priority.HIGH) {
@Override
public void run() {
NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout);
notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout);
onGoingTimeouts.add(notifyTimeout);
clusterStateListeners.add(listener);
listener.postAdded();
}
});
} catch (EsRejectedExecutionException e) {
if (lifecycle.stoppedOrClosed()) {
listener.onClose();
} else {
throw e;
}
}
}
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
submitStateUpdateTask(source, Priority.NORMAL, updateTask);
}
public void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask) {
if (!lifecycle.started()) {
return;
}
try {
final UpdateTask task = new UpdateTask(source, priority, updateTask);
if (updateTask instanceof TimeoutClusterStateUpdateTask) {
final TimeoutClusterStateUpdateTask timeoutUpdateTask = (TimeoutClusterStateUpdateTask) updateTask;
updateTasksExecutor.execute(task, threadPool.scheduler(), timeoutUpdateTask.timeout(), new Runnable() {
@Override
public void run() {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
timeoutUpdateTask.onFailure(task.source, new ProcessClusterEventTimeoutException(timeoutUpdateTask.timeout(), task.source));
}
});
}
});
} else {
updateTasksExecutor.execute(task);
}
} catch (EsRejectedExecutionException e) {
// ignore cases where we are shutting down..., there is really nothing interesting
// to be done here...
if (!lifecycle.stoppedOrClosed()) {
throw e;
}
}
}
@Override
public List<PendingClusterTask> pendingTasks() {
long now = System.currentTimeMillis();
PrioritizedEsThreadPoolExecutor.Pending[] pendings = updateTasksExecutor.getPending();
List<PendingClusterTask> pendingClusterTasks = new ArrayList<PendingClusterTask>(pendings.length);
for (PrioritizedEsThreadPoolExecutor.Pending pending : pendings) {
final String source;
final long timeInQueue;
if (pending.task instanceof UpdateTask) {
UpdateTask updateTask = (UpdateTask) pending.task;
source = updateTask.source;
timeInQueue = now - updateTask.addedAt;
} else {
source = "unknown";
timeInQueue = -1;
}
pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue));
}
return pendingClusterTasks;
}
class UpdateTask extends PrioritizedRunnable {
public final String source;
public final ClusterStateUpdateTask updateTask;
public final long addedAt = System.currentTimeMillis();
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
super(priority);
this.source = source;
this.updateTask = updateTask;
}
@Override
public void run() {
if (!lifecycle.started()) {
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
return;
}
logger.debug("processing [{}]: execute", source);
ClusterState previousClusterState = clusterState;
ClusterState newClusterState;
try {
newClusterState = updateTask.execute(previousClusterState);
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e);
}
updateTask.onFailure(source, e);
return;
}
if (previousClusterState == newClusterState) {
logger.debug("processing [{}]: no change in cluster_state", source);
if (updateTask instanceof AckedClusterStateUpdateTask) {
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
return;
}
try {
Discovery.AckListener ackListener = new NoOpAckListener();
if (newClusterState.nodes().localNodeMaster()) {
// only the master controls the version numbers
Builder builder = ClusterState.builder(newClusterState).version(newClusterState.version() + 1);
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1));
}
if (previousClusterState.metaData() != newClusterState.metaData()) {
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
}
newClusterState = builder.build();
if (updateTask instanceof AckedClusterStateUpdateTask) {
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
ackedUpdateTask.onAckTimeout();
} else {
try {
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
} catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
}
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
ackedUpdateTask.onAckTimeout();
}
}
}
} else {
if (previousClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK) && !newClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
// force an update, its a fresh update from the master as we transition from a start of not having a master to having one
// have a fresh instances of routing and metadata to remove the chance that version might be the same
Builder builder = ClusterState.builder(newClusterState);
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()));
builder.metaData(MetaData.builder(newClusterState.metaData()));
newClusterState = builder.build();
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().masterNodeId());
} else if (newClusterState.version() < previousClusterState.version()) {
// we got this cluster state from the master, filter out based on versions (don't call listeners)
logger.debug("got old cluster state [" + newClusterState.version() + "<" + previousClusterState.version() + "] from source [" + source + "], ignoring");
return;
}
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("cluster state updated:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString());
} else if (logger.isDebugEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
}
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
// new cluster state, notify all listeners
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
String summary = nodesDelta.shortSummary();
if (summary.length() > 0) {
logger.info("{}, reason: {}", summary, source);
}
}
// TODO, do this in parallel (and wait)
for (DiscoveryNode node : nodesDelta.addedNodes()) {
if (!nodeRequiresConnection(node)) {
continue;
}
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
// if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails
// we don't want to notify
if (newClusterState.nodes().localNodeMaster()) {
logger.debug("publishing cluster state version {}", newClusterState.version());
discoveryService.publish(newClusterState, ackListener);
}
// update the current cluster state
clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version());
for (ClusterStateListener listener : priorityClusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
for (ClusterStateListener listener : clusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
for (ClusterStateListener listener : lastClusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
if (!nodesDelta.removedNodes().isEmpty()) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
for (DiscoveryNode node : nodesDelta.removedNodes()) {
transportService.disconnectFromNode(node);
}
}
});
}
//manual ack only from the master at the end of the publish
if (newClusterState.nodes().localNodeMaster()) {
try {
ackListener.onNodeAck(localNode(), null);
} catch (Throwable t) {
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
}
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
logger.debug("processing [{}]: done applying updated cluster_state (version: {})", source, newClusterState.version());
} catch (Throwable t) {
StringBuilder sb = new StringBuilder("failed to apply updated cluster state:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
logger.warn(sb.toString(), t);
// TODO: do we want to call updateTask.onFailure here?
}
}
}
class NotifyTimeout implements Runnable {
final TimeoutClusterStateListener listener;
final TimeValue timeout;
ScheduledFuture future;
NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) {
this.listener = listener;
this.timeout = timeout;
}
public void cancel() {
future.cancel(false);
}
@Override
public void run() {
if (future.isCancelled()) {
return;
}
if (lifecycle.stoppedOrClosed()) {
listener.onClose();
} else {
listener.onTimeout(this.timeout);
}
// note, we rely on the listener to remove itself in case of timeout if needed
}
}
private class ReconnectToNodes implements Runnable {
private ConcurrentMap<DiscoveryNode, Integer> failureCount = ConcurrentCollections.newConcurrentMap();
@Override
public void run() {
// master node will check against all nodes if its alive with certain discoveries implementations,
// but we can't rely on that, so we check on it as well
for (DiscoveryNode node : clusterState.nodes()) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (!nodeRequiresConnection(node)) {
continue;
}
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
if (!transportService.nodeConnected(node)) {
try {
transportService.connectToNode(node);
} catch (Exception e) {
if (lifecycle.stoppedOrClosed()) {
return;
}
if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone?
Integer nodeFailureCount = failureCount.get(node);
if (nodeFailureCount == null) {
nodeFailureCount = 1;
} else {
nodeFailureCount = nodeFailureCount + 1;
}
// log every 6th failure
if ((nodeFailureCount % 6) == 0) {
// reset the failure count...
nodeFailureCount = 0;
logger.warn("failed to reconnect to node {}", e, node);
}
failureCount.put(node, nodeFailureCount);
}
}
}
}
}
// go over and remove failed nodes that have been removed
DiscoveryNodes nodes = clusterState.nodes();
for (Iterator<DiscoveryNode> failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) {
DiscoveryNode failedNode = failedNodesIt.next();
if (!nodes.nodeExists(failedNode.id())) {
failedNodesIt.remove();
}
}
if (lifecycle.started()) {
reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this);
}
}
}
private boolean nodeRequiresConnection(DiscoveryNode node) {
return localNode().shouldConnectTo(node);
}
private static class LocalNodeMasterListeners implements ClusterStateListener {
private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<LocalNodeMasterListener>();
private final ThreadPool threadPool;
private volatile boolean master = false;
private LocalNodeMasterListeners(ThreadPool threadPool) {
this.threadPool = threadPool;
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (!master && event.localNodeMaster()) {
master = true;
for (LocalNodeMasterListener listener : listeners) {
Executor executor = threadPool.executor(listener.executorName());
executor.execute(new OnMasterRunnable(listener));
}
return;
}
if (master && !event.localNodeMaster()) {
master = false;
for (LocalNodeMasterListener listener : listeners) {
Executor executor = threadPool.executor(listener.executorName());
executor.execute(new OffMasterRunnable(listener));
}
}
}
private void add(LocalNodeMasterListener listener) {
listeners.add(listener);
}
private void remove(LocalNodeMasterListener listener) {
listeners.remove(listener);
}
private void clear() {
listeners.clear();
}
}
private static class OnMasterRunnable implements Runnable {
private final LocalNodeMasterListener listener;
private OnMasterRunnable(LocalNodeMasterListener listener) {
this.listener = listener;
}
@Override
public void run() {
listener.onMaster();
}
}
private static class OffMasterRunnable implements Runnable {
private final LocalNodeMasterListener listener;
private OffMasterRunnable(LocalNodeMasterListener listener) {
this.listener = listener;
}
@Override
public void run() {
listener.offMaster();
}
}
private static class NoOpAckListener implements Discovery.AckListener {
@Override
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
}
@Override
public void onTimeout() {
}
}
private static class AckCountDownListener implements Discovery.AckListener {
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
private final AckedClusterStateUpdateTask ackedUpdateTask;
private final CountDown countDown;
private final DiscoveryNodes nodes;
private final long clusterStateVersion;
private final Future<?> ackTimeoutCallback;
private Throwable lastFailure;
AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
this.ackedUpdateTask = ackedUpdateTask;
this.clusterStateVersion = clusterStateVersion;
this.nodes = nodes;
int countDown = 0;
for (DiscoveryNode node : nodes) {
if (ackedUpdateTask.mustAck(node)) {
countDown++;
}
}
//we always wait for at least 1 node (the master)
countDown = Math.max(1, countDown);
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
this.countDown = new CountDown(countDown);
this.ackTimeoutCallback = threadPool.schedule(ackedUpdateTask.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
@Override
public void run() {
onTimeout();
}
});
}
@Override
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
if (!ackedUpdateTask.mustAck(node)) {
//we always wait for the master ack anyway
if (!node.equals(nodes.masterNode())) {
return;
}
}
if (t == null) {
logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion);
} else {
this.lastFailure = t;
logger.debug("ack received from node [{}], cluster_state update (version: {})", t, node, clusterStateVersion);
}
if (countDown.countDown()) {
logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
ackTimeoutCallback.cancel(true);
ackedUpdateTask.onAllNodesAcked(lastFailure);
}
}
@Override
public void onTimeout() {
if (countDown.fastForward()) {
logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion);
ackedUpdateTask.onAckTimeout();
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_cluster_service_InternalClusterService.java |
346 | public class SpaceDelimitedNodeValueMerge extends NodeValueMerge {
@Override
public String getDelimiter() {
return " ";
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_SpaceDelimitedNodeValueMerge.java |
765 | execute(request, new ActionListener<MultiGetResponse>() {
@Override
public void onResponse(MultiGetResponse response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send error response for action [" + MultiGetAction.NAME + "] and request [" + request + "]", e1);
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_get_TransportMultiGetAction.java |
1,365 | @ClusterScope(scope=Scope.TEST, numNodes=0)
public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest {
public void testLoadDefaultShardsAllocator() {
assertAllocatorInstance(ImmutableSettings.Builder.EMPTY_SETTINGS, BalancedShardsAllocator.class);
}
public void testLoadByShortKeyShardsAllocator() {
Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.EVEN_SHARD_COUNT_ALLOCATOR_KEY)
.build();
assertAllocatorInstance(build, EvenShardsCountAllocator.class);
build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.BALANCED_ALLOCATOR_KEY).build();
assertAllocatorInstance(build, BalancedShardsAllocator.class);
}
public void testLoadByClassNameShardsAllocator() {
Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "EvenShardsCount").build();
assertAllocatorInstance(build, EvenShardsCountAllocator.class);
build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY,
"org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator").build();
assertAllocatorInstance(build, EvenShardsCountAllocator.class);
}
private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) {
while (cluster().size() != 0) {
cluster().stopRandomNode();
}
cluster().startNode(settings);
ShardsAllocator instance = cluster().getInstance(ShardsAllocator.class);
assertThat(instance, instanceOf(clazz));
}
} | 0true
| src_test_java_org_elasticsearch_cluster_allocation_ShardsAllocatorModuleTests.java |
1,518 | private class ShutdownMembershipListener implements MembershipListener {
@Override
public void memberAdded(MembershipEvent membershipEvent) {
}
@Override
public void memberRemoved(MembershipEvent event) {
if (localMember.equals(event.getMember())) {
latch.release();
}
}
@Override
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_instance_NodeShutdownLatch.java |
2,072 | return binding.acceptScopingVisitor(new DefaultBindingScopingVisitor<Scope>() {
public Scope visitScope(Scope scope) {
return scope;
}
}); | 0true
| src_main_java_org_elasticsearch_common_inject_util_Modules.java |
2,785 | public enum VersionType {
INTERNAL((byte) 0) {
/**
* - always returns false if currentVersion == {@link Versions#NOT_SET}
* - always accepts expectedVersion == {@link Versions#MATCH_ANY}
* - if expectedVersion is set, always conflict if currentVersion == {@link Versions#NOT_FOUND}
*/
@Override
public boolean isVersionConflict(long currentVersion, long expectedVersion) {
return currentVersion != Versions.NOT_SET && expectedVersion != Versions.MATCH_ANY
&& (currentVersion == Versions.NOT_FOUND || currentVersion != expectedVersion);
}
@Override
public long updateVersion(long currentVersion, long expectedVersion) {
return (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
}
},
EXTERNAL((byte) 1) {
/**
* - always returns false if currentVersion == {@link Versions#NOT_SET}
* - always conflict if expectedVersion == {@link Versions#MATCH_ANY} (we need something to set)
* - accepts currentVersion == {@link Versions#NOT_FOUND}
*/
@Override
public boolean isVersionConflict(long currentVersion, long expectedVersion) {
return currentVersion != Versions.NOT_SET && currentVersion != Versions.NOT_FOUND
&& (expectedVersion == Versions.MATCH_ANY || currentVersion >= expectedVersion);
}
@Override
public long updateVersion(long currentVersion, long expectedVersion) {
return expectedVersion;
}
};
private final byte value;
VersionType(byte value) {
this.value = value;
}
public byte getValue() {
return value;
}
/**
* Checks whether the current version conflicts with the expected version, based on the current version type.
*
* @return true if versions conflict false o.w.
*/
public abstract boolean isVersionConflict(long currentVersion, long expectedVersion);
/**
* Returns the new version for a document, based on its current one and the specified in the request
*
* @return new version
*/
public abstract long updateVersion(long currentVersion, long expectedVersion);
public static VersionType fromString(String versionType) {
if ("internal".equals(versionType)) {
return INTERNAL;
} else if ("external".equals(versionType)) {
return EXTERNAL;
}
throw new ElasticsearchIllegalArgumentException("No version type match [" + versionType + "]");
}
public static VersionType fromString(String versionType, VersionType defaultVersionType) {
if (versionType == null) {
return defaultVersionType;
}
if ("internal".equals(versionType)) {
return INTERNAL;
} else if ("external".equals(versionType)) {
return EXTERNAL;
}
throw new ElasticsearchIllegalArgumentException("No version type match [" + versionType + "]");
}
public static VersionType fromValue(byte value) {
if (value == 0) {
return INTERNAL;
} else if (value == 1) {
return EXTERNAL;
}
throw new ElasticsearchIllegalArgumentException("No version type match [" + value + "]");
}
} | 0true
| src_main_java_org_elasticsearch_index_VersionType.java |
97 | private static class TxLockElement
{
private final Transaction tx;
// access to these is guarded by synchronized blocks
private int readCount;
private int writeCount;
private boolean movedOn;
TxLockElement( Transaction tx )
{
this.tx = tx;
}
boolean isFree()
{
return readCount == 0 && writeCount == 0;
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_RWLock.java |
1,709 | runnable = new Runnable() { public void run() { map.unlock(null); } }; | 0true
| hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java |
18 | @Component("blTargetItemRulesValidator")
public class TargetItemRulesValidator implements PropertyValidator {
@Override
public PropertyValidationResult validate(Entity entity, Serializable instance, Map<String, FieldMetadata> entityFieldMetadata, Map<String, String> validationConfiguration, BasicFieldMetadata propertyMetadata, String propertyName, String value) {
Offer offer = (Offer)instance;
if (OfferType.ORDER_ITEM.equals(offer.getType())) {
return new PropertyValidationResult(CollectionUtils.isNotEmpty(offer.getTargetItemCriteria()), RequiredPropertyValidator.ERROR_MESSAGE);
} else {
return new PropertyValidationResult(true);
}
}
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_persistence_validation_TargetItemRulesValidator.java |
861 | public class OSecurityProxy extends OProxedResource<OSecurity> implements OSecurity {
public OSecurityProxy(final OSecurity iDelegate, final ODatabaseRecord iDatabase) {
super(iDelegate, iDatabase);
}
@Override
public boolean isAllowed(final Set<OIdentifiable> iAllowAll, final Set<OIdentifiable> iAllowOperation) {
return delegate.isAllowed(iAllowAll, iAllowOperation);
}
public OIdentifiable allowUser(final ODocument iDocument, final String iAllowFieldName, final String iUserName) {
return delegate.allowUser(iDocument, iAllowFieldName, iUserName);
}
public OIdentifiable allowRole(final ODocument iDocument, final String iAllowFieldName, final String iRoleName) {
return delegate.allowRole(iDocument, iAllowFieldName, iRoleName);
}
@Override
public OIdentifiable allowIdentity(ODocument iDocument, String iAllowFieldName, OIdentifiable iId) {
return delegate.allowIdentity(iDocument, iAllowFieldName, iId);
}
public OIdentifiable disallowUser(final ODocument iDocument, final String iAllowFieldName, final String iUserName) {
return delegate.disallowUser(iDocument, iAllowFieldName, iUserName);
}
public OIdentifiable disallowRole(final ODocument iDocument, final String iAllowFieldName, final String iRoleName) {
return delegate.disallowRole(iDocument, iAllowFieldName, iRoleName);
}
@Override
public OIdentifiable disallowIdentity(ODocument iDocument, String iAllowFieldName, OIdentifiable iId) {
return delegate.disallowIdentity(iDocument, iAllowFieldName, iId);
}
public OUser create() {
return delegate.create();
}
public void load() {
delegate.load();
}
public void close() {
if (delegate != null)
delegate.close();
}
public OUser authenticate(final String iUsername, final String iUserPassword) {
return delegate.authenticate(iUsername, iUserPassword);
}
public OUser getUser(final String iUserName) {
return delegate.getUser(iUserName);
}
public OUser createUser(final String iUserName, final String iUserPassword, final String... iRoles) {
return delegate.createUser(iUserName, iUserPassword, iRoles);
}
public OUser createUser(final String iUserName, final String iUserPassword, final ORole... iRoles) {
return delegate.createUser(iUserName, iUserPassword, iRoles);
}
public ORole getRole(final String iRoleName) {
return delegate.getRole(iRoleName);
}
public ORole getRole(final OIdentifiable iRole) {
return delegate.getRole(iRole);
}
public ORole createRole(final String iRoleName, final ALLOW_MODES iAllowMode) {
return delegate.createRole(iRoleName, iAllowMode);
}
public ORole createRole(final String iRoleName, final ORole iParent, final ALLOW_MODES iAllowMode) {
return delegate.createRole(iRoleName, iParent, iAllowMode);
}
public List<ODocument> getAllUsers() {
return delegate.getAllUsers();
}
public List<ODocument> getAllRoles() {
return delegate.getAllRoles();
}
public String toString() {
return delegate.toString();
}
public OUser repair() {
return delegate.repair();
}
public boolean dropUser(final String iUserName) {
return delegate.dropUser(iUserName);
}
public boolean dropRole(final String iRoleName) {
return delegate.dropRole(iRoleName);
}
public void createClassTrigger() {
delegate.createClassTrigger();
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_metadata_security_OSecurityProxy.java |
565 | public class HibernateToolTask extends Task {
public HibernateToolTask() {
super();
}
@SuppressWarnings("rawtypes")
protected List configurationTasks = new ArrayList();
protected File destDir;
@SuppressWarnings("rawtypes")
protected List generators = new ArrayList();
protected List<Task> appContexts = new ArrayList<Task>();
protected Path classPath;
protected boolean combinePersistenceUnits = true;
protected boolean refineFileNames = true;
public ExporterTask createHbm2DDL() {
ExporterTask generator = new Hbm2DDLExporterTask(this);
addGenerator( generator );
return generator;
}
public ClassPathApplicationContextTask createClassPathApplicationContext() {
ClassPathApplicationContextTask task = new ClassPathApplicationContextTask();
appContexts.add(task);
return task;
}
public FileSystemApplicationContextTask createFileSystemApplicationContext() {
FileSystemApplicationContextTask task = new FileSystemApplicationContextTask();
appContexts.add(task);
return task;
}
public JPAConfigurationTask createJPAConfiguration() {
JPAConfigurationTask task = new JPAConfigurationTask();
addConfiguration(task);
return task;
}
@SuppressWarnings("unchecked")
protected boolean addConfiguration(ConfigurationTask config) {
return configurationTasks.add(config);
}
@SuppressWarnings("unchecked")
protected boolean addGenerator(ExporterTask generator) {
return generators.add(generator);
}
/**
* Set the classpath to be used when running the Java class
*
* @param s an Ant Path object containing the classpath.
*/
public void setClasspath(Path s) {
classPath = s;
}
/**
* Adds a path to the classpath.
*
* @return created classpath
*/
public Path createClasspath() {
classPath = new Path(getProject() );
return classPath;
}
@Override
public void execute() {
MergeFileSystemAndClassPathXMLApplicationContext mergeContext;
try {
ConfigurationOnlyState state = new ConfigurationOnlyState();
state.setConfigurationOnly(true);
ConfigurationOnlyState.setState(state);
Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());
// launch the service merge application context to get the entity configuration for the entire framework
String[] contexts = StandardConfigLocations.retrieveAll(StandardConfigLocations.TESTCONTEXTTYPE);
LinkedHashMap<String, MergeFileSystemAndClassPathXMLApplicationContext.ResourceType> locations = new LinkedHashMap<String, MergeFileSystemAndClassPathXMLApplicationContext.ResourceType>();
for (String context : contexts) {
locations.put(context, MergeFileSystemAndClassPathXMLApplicationContext.ResourceType.CLASSPATH);
}
for (Task task : appContexts) {
if (task instanceof ClassPathApplicationContextTask) {
locations.put(((ClassPathApplicationContextTask) task).getPath(), MergeFileSystemAndClassPathXMLApplicationContext.ResourceType.CLASSPATH);
} else if (task instanceof FileSystemApplicationContextTask) {
locations.put(((FileSystemApplicationContextTask) task).getPath(), MergeFileSystemAndClassPathXMLApplicationContext.ResourceType.FILESYSTEM);
}
}
mergeContext = new MergeFileSystemAndClassPathXMLApplicationContext(locations, null);
} catch (Exception e) {
throw new BuildException(e, getLocation());
} finally {
ConfigurationOnlyState.setState(null);
}
int count = 1;
ExporterTask generatorTask = null;
try {
for (Object configuration : configurationTasks) {
JPAConfigurationTask configurationTask = (JPAConfigurationTask) configuration;
log("Executing Hibernate Tool with a " + configurationTask.getDescription());
@SuppressWarnings("rawtypes")
Iterator iterator = generators.iterator();
while (iterator.hasNext()) {
generatorTask = (ExporterTask) iterator.next();
log(count++ + ". task: " + generatorTask.getName());
generatorTask.setOutputFileName(configurationTask.getDialect() + "_" + configurationTask.getPersistenceUnit() + ".sql");
generatorTask.setDestdir(destDir);
generatorTask.setConfiguration(configurationTask.createConfiguration(mergeContext));
generatorTask.execute();
}
}
} catch (RuntimeException re) {
reportException(re, count, generatorTask);
}
try {
if (combinePersistenceUnits) {
ArrayList<File> combine = new ArrayList<File>();
for (Object configuration : configurationTasks) {
JPAConfigurationTask configurationTask = (JPAConfigurationTask) configuration;
File[] sqlFiles = destDir.listFiles(new SqlFileFilter());
for (File file : sqlFiles) {
if (file.getName().startsWith(configurationTask.getDialect())){
combine.add(file);
}
}
combineFiles(combine);
combine.clear();
}
}
if (refineFileNames) {
File[] sqlFiles = destDir.listFiles(new SqlFileFilter());
for (File file : sqlFiles) {
String filename = file.getName();
String[] starters = {"org.hibernate.dialect.", "org.broadleafcommerce.profile.util.sql."};
for (String starter : starters) {
if (filename.startsWith(starter)) {
String newFileName = filename.substring(starter.length(), filename.length());
file.renameTo(new File(destDir, newFileName));
}
}
}
}
} catch (Exception e) {
throw new BuildException(e, getLocation());
}
}
private void combineFiles(ArrayList<File> combine) throws Exception {
Iterator<File> itr = combine.iterator();
File startFile = itr.next();
while(itr.hasNext()) {
File nextFile = itr.next();
BufferedWriter writer = null;
BufferedReader reader = null;
try{
writer = new BufferedWriter(new FileWriter(startFile, true));
reader = new BufferedReader(new FileReader(nextFile));
boolean eof = false;
String temp = null;
while (!eof) {
temp = reader.readLine();
if (temp == null) {
eof = true;
} else {
writer.write(temp);
writer.write("\n");
}
}
} finally {
if (writer != null) {
try{ writer.close(); } catch (Throwable e) {};
}
if (reader != null) {
try{ reader.close(); } catch (Throwable e) {};
}
}
try{
nextFile.delete();
} catch (Throwable e) {}
}
}
private void reportException(Throwable re, int count, ExporterTask generatorTask) {
log("An exception occurred while running exporter #" + count + ":" + generatorTask.getName(), Project.MSG_ERR);
log("To get the full stack trace run ant with -verbose", Project.MSG_ERR);
log(re.toString(), Project.MSG_ERR);
String ex = new String();
Throwable cause = re.getCause();
while(cause!=null) {
ex += cause.toString() + "\n";
if(cause==cause.getCause()) {
break; // we reached the top.
} else {
cause=cause.getCause();
}
}
if(StringUtils.isNotEmpty(ex)) {
log(ex, Project.MSG_ERR);
}
String newbieMessage = getProbableSolutionOrCause(re);
if(newbieMessage!=null) {
log(newbieMessage);
}
if(re instanceof BuildException) {
throw (BuildException)re;
} else {
throw new BuildException(re, getLocation());
}
}
private String getProbableSolutionOrCause(Throwable re) {
if(re==null) return null;
if(re instanceof MappingNotFoundException) {
MappingNotFoundException mnf = (MappingNotFoundException)re;
if("resource".equals(mnf.getType())) {
return "A " + mnf.getType() + " located at " + mnf.getPath() + " was not found.\n" +
"Check the following:\n" +
"\n" +
"1) Is the spelling/casing correct ?\n" +
"2) Is " + mnf.getPath() + " available via the classpath ?\n" +
"3) Does it actually exist ?\n";
} else {
return "A " + mnf.getType() + " located at " + mnf.getPath() + " was not found.\n" +
"Check the following:\n" +
"\n" +
"1) Is the spelling/casing correct ?\n" +
"2) Do you permission to access " + mnf.getPath() + " ?\n" +
"3) Does it actually exist ?\n";
}
}
if(re instanceof ClassNotFoundException || re instanceof NoClassDefFoundError) {
return "A class were not found in the classpath of the Ant task.\n" +
"Ensure that the classpath contains the classes needed for Hibernate and your code are in the classpath.\n";
}
if(re instanceof UnsupportedClassVersionError) {
return "You are most likely running the ant task with a JRE that is older than the JRE required to use the classes.\n" +
"e.g. running with JRE 1.3 or 1.4 when using JDK 1.5 annotations is not possible.\n" +
"Ensure that you are using a correct JRE.";
}
if(re.getCause()!=re) {
return getProbableSolutionOrCause( re.getCause() );
}
return null;
}
public File getDestdir() {
return destDir;
}
public void setDestdir(File destDir) {
this.destDir = destDir;
}
public boolean isCombinePersistenceUnits() {
return combinePersistenceUnits;
}
public void setCombinePersistenceUnits(boolean combinePersistenceUnits) {
this.combinePersistenceUnits = combinePersistenceUnits;
}
public boolean isRefineFileNames() {
return refineFileNames;
}
public void setRefineFileNames(boolean refineFileNames) {
this.refineFileNames = refineFileNames;
}
private class SqlFileFilter implements FilenameFilter {
@Override
public boolean accept(File dir, String name) {
return name.endsWith(".sql");
}
}
} | 0true
| common_src_main_java_org_broadleafcommerce_common_util_sql_HibernateToolTask.java |
3,867 | public class IdsQueryParser implements QueryParser {
public static final String NAME = "ids";
@Inject
public IdsQueryParser() {
}
@Override
public String[] names() {
return new String[]{NAME};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
List<BytesRef> ids = new ArrayList<BytesRef>();
Collection<String> types = null;
String currentFieldName = null;
float boost = 1.0f;
String queryName = null;
XContentParser.Token token;
boolean idsProvided = false;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("values".equals(currentFieldName)) {
idsProvided = true;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
BytesRef value = parser.bytesOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
}
ids.add(value);
}
} else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) {
types = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String value = parser.textOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No type specified for term filter");
}
types.add(value);
}
} else {
throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
types = ImmutableList.of(parser.text());
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
}
}
}
if (!idsProvided) {
throw new QueryParsingException(parseContext.index(), "[ids] query, no ids values provided");
}
if (ids.isEmpty()) {
return Queries.newMatchNoDocsQuery();
}
if (types == null || types.isEmpty()) {
types = parseContext.queryTypes();
} else if (types.size() == 1 && Iterables.getFirst(types, null).equals("_all")) {
types = parseContext.mapperService().types();
}
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
ConstantScoreQuery query = new ConstantScoreQuery(filter);
query.setBoost(boost);
if (queryName != null) {
parseContext.addNamedQuery(queryName, query);
}
return query;
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_IdsQueryParser.java |
1,393 | public interface Custom {
interface Factory<T extends Custom> {
String type();
T readFrom(StreamInput in) throws IOException;
void writeTo(T customIndexMetaData, StreamOutput out) throws IOException;
T fromXContent(XContentParser parser) throws IOException;
void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException;
/**
* Returns true if this custom metadata should be persisted as part of global cluster state
*/
boolean isPersistent();
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_MetaData.java |
1,255 | new OProfilerHookValue() {
public Object getValue() {
return metricUsedChannel;
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_fs_OMMapManagerOld.java |
1,415 | public class DummyEntity {
private long id;
private int version;
private String name;
private double value;
private Date date;
private Set<DummyProperty> properties;
public DummyEntity() {
super();
}
public DummyEntity(long id, String name, double value, Date date) {
super();
this.id = id;
this.name = name;
this.value = value;
this.date = date;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public int getVersion() {
return version;
}
public void setVersion(int version) {
this.version = version;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public double getValue() {
return value;
}
public void setValue(double value) {
this.value = value;
}
public Date getDate() {
return date;
}
public void setDate(Date date) {
this.date = date;
}
public void setProperties(Set<DummyProperty> properties) {
this.properties = properties;
}
public Set<DummyProperty> getProperties() {
return properties;
}
} | 0true
| hazelcast-hibernate_hazelcast-hibernate3_src_test_java_com_hazelcast_hibernate_entity_DummyEntity.java |
608 | @Component("blRequestProcessor")
public class BroadleafRequestProcessor extends AbstractBroadleafWebRequestProcessor {
protected final Log LOG = LogFactory.getLog(getClass());
private static String REQUEST_DTO_PARAM_NAME = BroadleafRequestFilter.REQUEST_DTO_PARAM_NAME;
@Resource(name = "blSiteResolver")
protected BroadleafSiteResolver siteResolver;
@Resource(name = "blLocaleResolver")
protected BroadleafLocaleResolver localeResolver;
@Resource(name = "blCurrencyResolver")
protected BroadleafCurrencyResolver currencyResolver;
@Resource(name = "blSandBoxResolver")
protected BroadleafSandBoxResolver sandboxResolver;
@Resource(name = "blThemeResolver")
protected BroadleafThemeResolver themeResolver;
@Resource(name = "messageSource")
protected MessageSource messageSource;
@Resource(name = "blTimeZoneResolver")
protected BroadleafTimeZoneResolver broadleafTimeZoneResolver;
@Override
public void process(WebRequest request) {
Site site = siteResolver.resolveSite(request);
BroadleafRequestContext brc = new BroadleafRequestContext();
brc.setSite(site);
brc.setWebRequest(request);
if (site == null) {
brc.setIgnoreSite(true);
}
BroadleafRequestContext.setBroadleafRequestContext(brc);
Locale locale = localeResolver.resolveLocale(request);
TimeZone timeZone = broadleafTimeZoneResolver.resolveTimeZone(request);
BroadleafCurrency currency = currencyResolver.resolveCurrency(request);
// Assumes BroadleafProcess
RequestDTO requestDTO = (RequestDTO) request.getAttribute(REQUEST_DTO_PARAM_NAME, WebRequest.SCOPE_REQUEST);
if (requestDTO == null) {
requestDTO = new RequestDTOImpl(request);
}
SandBox currentSandbox = sandboxResolver.resolveSandBox(request, site);
if (currentSandbox != null) {
SandBoxContext previewSandBoxContext = new SandBoxContext();
previewSandBoxContext.setSandBoxId(currentSandbox.getId());
previewSandBoxContext.setPreviewMode(true);
SandBoxContext.setSandBoxContext(previewSandBoxContext);
}
// Note that this must happen after the request context is set up as resolving a theme is dependent on site
Theme theme = themeResolver.resolveTheme(request);
brc.setLocale(locale);
brc.setBroadleafCurrency(currency);
brc.setSandbox(currentSandbox);
brc.setTheme(theme);
brc.setMessageSource(messageSource);
brc.setTimeZone(timeZone);
brc.setRequestDTO(requestDTO);
Map<String, Object> ruleMap = (Map<String, Object>) request.getAttribute("blRuleMap", WebRequest.SCOPE_REQUEST);
if (ruleMap == null) {
LOG.trace("Creating ruleMap and adding in Locale.");
ruleMap = new HashMap<String, Object>();
request.setAttribute("blRuleMap", ruleMap, WebRequest.SCOPE_REQUEST);
} else {
LOG.trace("Using pre-existing ruleMap - added by non standard BLC process.");
}
ruleMap.put("locale", locale);
}
@Override
public void postProcess(WebRequest request) {
ThreadLocalManager.remove();
//temporary workaround for Thymeleaf issue #18 (resolved in version 2.1)
//https://github.com/thymeleaf/thymeleaf-spring3/issues/18
try {
Field currentProcessLocale = TemplateEngine.class.getDeclaredField("currentProcessLocale");
currentProcessLocale.setAccessible(true);
((ThreadLocal) currentProcessLocale.get(null)).remove();
Field currentProcessTemplateEngine = TemplateEngine.class.getDeclaredField("currentProcessTemplateEngine");
currentProcessTemplateEngine.setAccessible(true);
((ThreadLocal) currentProcessTemplateEngine.get(null)).remove();
Field currentProcessTemplateName = TemplateEngine.class.getDeclaredField("currentProcessTemplateName");
currentProcessTemplateName.setAccessible(true);
((ThreadLocal) currentProcessTemplateName.get(null)).remove();
} catch (Throwable e) {
LOG.warn("Unable to remove Thymeleaf threadlocal variables from request thread", e);
}
}
} | 1no label
| common_src_main_java_org_broadleafcommerce_common_web_BroadleafRequestProcessor.java |
1,025 | public static class Order {
public static final int General = 1000;
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_OrderImpl.java |
90 | public class ODFACommandStreamTest {
@Test
public void testNextCommand() throws Exception {
test("one;two", "one", "two");
}
@Test
public void testNextCommandQuotes() throws Exception {
test("Select 'one;'; Select \"t;w;o\"", "Select 'one;'", "Select \"t;w;o\"");
}
@Test
public void testNextCommandSeparatorAtTheEnd() throws Exception {
test("one;two;", "one", "two");
}
@Test
public void testNextCommandWhitespaces() throws Exception {
test("\tone ; two ", "one", "two");
}
private void test(String source, String... expectedResults) {
final ODFACommandStream stream = new ODFACommandStream(source);
for (String expectedResult : expectedResults) {
Assert.assertTrue(stream.hasNext());
String result = stream.nextCommand();
Assert.assertEquals(result, expectedResult);
}
Assert.assertFalse(stream.hasNext());
}
} | 0true
| commons_src_test_java_com_orientechnologies_common_console_ODFACommandStreamTest.java |
1,583 | rn.shards(new Predicate<MutableShardRouting>() {
@Override
public boolean apply(org.elasticsearch.cluster.routing.MutableShardRouting input) {
return true;
}
}).size(), | 0true
| src_test_java_org_elasticsearch_cluster_routing_allocation_decider_DiskThresholdDeciderTests.java |
211 | public abstract class LogEntry
{
/* version 1 as of 2011-02-22
* version 2 as of 2011-10-17
* version 3 as of 2013-02-09: neo4j 2.0 Labels & Indexing
*/
static final byte CURRENT_VERSION = (byte) 3;
// empty record due to memory mapped file
public static final byte EMPTY = (byte) 0;
public static final byte TX_START = (byte) 1;
public static final byte TX_PREPARE = (byte) 2;
public static final byte COMMAND = (byte) 3;
public static final byte DONE = (byte) 4;
public static final byte TX_1P_COMMIT = (byte) 5;
public static final byte TX_2P_COMMIT = (byte) 6;
private int identifier;
LogEntry( int identifier )
{
this.identifier = identifier;
}
public int getIdentifier()
{
return identifier;
}
public String toString( TimeZone timeZone )
{
return toString();
}
public static class
Start extends LogEntry
{
private final Xid xid;
private final int masterId;
private final int myId;
private final long timeWritten;
private final long lastCommittedTxWhenTransactionStarted;
private long startPosition;
Start( Xid xid, int identifier, int masterId, int myId, long startPosition, long timeWritten,
long lastCommittedTxWhenTransactionStarted )
{
super( identifier );
this.xid = xid;
this.masterId = masterId;
this.myId = myId;
this.startPosition = startPosition;
this.timeWritten = timeWritten;
this.lastCommittedTxWhenTransactionStarted = lastCommittedTxWhenTransactionStarted;
}
public Xid getXid()
{
return xid;
}
public int getMasterId()
{
return masterId;
}
public int getLocalId()
{
return myId;
}
public long getStartPosition()
{
return startPosition;
}
void setStartPosition( long position )
{
this.startPosition = position;
}
public long getTimeWritten()
{
return timeWritten;
}
public long getLastCommittedTxWhenTransactionStarted()
{
return lastCommittedTxWhenTransactionStarted;
}
/**
* @return combines necessary state to get a unique checksum to identify this transaction uniquely.
*/
public long getChecksum()
{
// [4 bits combined masterId/myId][4 bits xid hashcode, which combines time/randomness]
long lowBits = xid.hashCode();
long highBits = masterId*37 + myId;
return (highBits << 32) | (lowBits & 0xFFFFFFFFL);
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return "Start[" + getIdentifier() + ",xid=" + xid + ",master=" + masterId + ",me=" + myId + ",time=" +
timestamp( timeWritten, timeZone ) + ",lastCommittedTxWhenTransactionStarted="+
lastCommittedTxWhenTransactionStarted+"]";
}
}
public static class Prepare extends LogEntry
{
private final long timeWritten;
Prepare( int identifier, long timeWritten )
{
super( identifier );
this.timeWritten = timeWritten;
}
public long getTimeWritten()
{
return timeWritten;
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return "Prepare[" + getIdentifier() + ", " + timestamp( timeWritten, timeZone ) + "]";
}
}
public static abstract class Commit extends LogEntry
{
private final long txId;
private final long timeWritten;
protected final String name;
Commit( int identifier, long txId, long timeWritten, String name )
{
super( identifier );
this.txId = txId;
this.timeWritten = timeWritten;
this.name = name;
}
public long getTxId()
{
return txId;
}
public long getTimeWritten()
{
return timeWritten;
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return name + "[" + getIdentifier() + ", txId=" + getTxId() + ", " + timestamp( getTimeWritten(), timeZone ) + "]";
}
}
public static class OnePhaseCommit extends Commit
{
OnePhaseCommit( int identifier, long txId, long timeWritten )
{
super( identifier, txId, timeWritten, "1PC" );
}
}
public static class TwoPhaseCommit extends Commit
{
TwoPhaseCommit( int identifier, long txId, long timeWritten )
{
super( identifier, txId, timeWritten, "2PC" );
}
}
public static class Done extends LogEntry
{
Done( int identifier )
{
super( identifier );
}
@Override
public String toString()
{
return "Done[" + getIdentifier() + "]";
}
}
public static class Command extends LogEntry
{
private final XaCommand command;
Command( int identifier, XaCommand command )
{
super( identifier );
this.command = command;
}
public XaCommand getXaCommand()
{
return command;
}
@Override
public String toString()
{
return "Command[" + getIdentifier() + ", " + command + "]";
}
}
public void setIdentifier( int newXidIdentifier )
{
identifier = newXidIdentifier;
}
public String timestamp( long timeWritten, TimeZone timeZone )
{
return Format.date( timeWritten, timeZone ) + "/" + timeWritten;
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java |
2,037 | public final class InjectionPoint {
private final boolean optional;
private final Member member;
private final ImmutableList<Dependency<?>> dependencies;
private InjectionPoint(Member member,
ImmutableList<Dependency<?>> dependencies, boolean optional) {
this.member = member;
this.dependencies = dependencies;
this.optional = optional;
}
InjectionPoint(TypeLiteral<?> type, Method method) {
this.member = method;
Inject inject = method.getAnnotation(Inject.class);
this.optional = inject.optional();
this.dependencies = forMember(method, type, method.getParameterAnnotations());
}
InjectionPoint(TypeLiteral<?> type, Constructor<?> constructor) {
this.member = constructor;
this.optional = false;
this.dependencies = forMember(constructor, type, constructor.getParameterAnnotations());
}
InjectionPoint(TypeLiteral<?> type, Field field) {
this.member = field;
Inject inject = field.getAnnotation(Inject.class);
this.optional = inject.optional();
Annotation[] annotations = field.getAnnotations();
Errors errors = new Errors(field);
Key<?> key = null;
try {
key = Annotations.getKey(type.getFieldType(field), field, annotations, errors);
} catch (ErrorsException e) {
errors.merge(e.getErrors());
}
errors.throwConfigurationExceptionIfErrorsExist();
this.dependencies = ImmutableList.<Dependency<?>>of(
newDependency(key, Nullability.allowsNull(annotations), -1));
}
private ImmutableList<Dependency<?>> forMember(Member member, TypeLiteral<?> type,
Annotation[][] paramterAnnotations) {
Errors errors = new Errors(member);
Iterator<Annotation[]> annotationsIterator = Arrays.asList(paramterAnnotations).iterator();
List<Dependency<?>> dependencies = Lists.newArrayList();
int index = 0;
for (TypeLiteral<?> parameterType : type.getParameterTypes(member)) {
try {
Annotation[] parameterAnnotations = annotationsIterator.next();
Key<?> key = Annotations.getKey(parameterType, member, parameterAnnotations, errors);
dependencies.add(newDependency(key, Nullability.allowsNull(parameterAnnotations), index));
index++;
} catch (ErrorsException e) {
errors.merge(e.getErrors());
}
}
errors.throwConfigurationExceptionIfErrorsExist();
return ImmutableList.copyOf(dependencies);
}
// This metohd is necessary to create a Dependency<T> with proper generic type information
private <T> Dependency<T> newDependency(Key<T> key, boolean allowsNull, int parameterIndex) {
return new Dependency<T>(this, key, allowsNull, parameterIndex);
}
/**
* Returns the injected constructor, field, or method.
*/
public Member getMember() {
return member;
}
/**
* Returns the dependencies for this injection point. If the injection point is for a method or
* constructor, the dependencies will correspond to that member's parameters. Field injection
* points always have a single dependency for the field itself.
*
* @return a possibly-empty list
*/
public List<Dependency<?>> getDependencies() {
return dependencies;
}
/**
* Returns true if this injection point shall be skipped if the injector cannot resolve bindings
* for all required dependencies. Both explicit bindings (as specified in a module), and implicit
* bindings ({@literal @}{@link org.elasticsearch.common.inject.ImplementedBy ImplementedBy}, default
* constructors etc.) may be used to satisfy optional injection points.
*/
public boolean isOptional() {
return optional;
}
@Override
public boolean equals(Object o) {
return o instanceof InjectionPoint
&& member.equals(((InjectionPoint) o).member);
}
@Override
public int hashCode() {
return member.hashCode();
}
@Override
public String toString() {
return MoreTypes.toString(member);
}
/**
* Returns a new injection point for the injectable constructor of {@code type}.
*
* @param type a concrete type with exactly one constructor annotated {@literal @}{@link Inject},
* or a no-arguments constructor that is not private.
* @throws ConfigurationException if there is no injectable constructor, more than one injectable
* constructor, or if parameters of the injectable constructor are malformed, such as a
* parameter with multiple binding annotations.
*/
public static InjectionPoint forConstructorOf(TypeLiteral<?> type) {
Class<?> rawType = getRawType(type.getType());
Errors errors = new Errors(rawType);
Constructor<?> injectableConstructor = null;
for (Constructor<?> constructor : rawType.getDeclaredConstructors()) {
Inject inject = constructor.getAnnotation(Inject.class);
if (inject != null) {
if (inject.optional()) {
errors.optionalConstructor(constructor);
}
if (injectableConstructor != null) {
errors.tooManyConstructors(rawType);
}
injectableConstructor = constructor;
checkForMisplacedBindingAnnotations(injectableConstructor, errors);
}
}
errors.throwConfigurationExceptionIfErrorsExist();
if (injectableConstructor != null) {
return new InjectionPoint(type, injectableConstructor);
}
// If no annotated constructor is found, look for a no-arg constructor instead.
try {
Constructor<?> noArgConstructor = rawType.getDeclaredConstructor();
// Disallow private constructors on non-private classes (unless they have @Inject)
if (Modifier.isPrivate(noArgConstructor.getModifiers())
&& !Modifier.isPrivate(rawType.getModifiers())) {
errors.missingConstructor(rawType);
throw new ConfigurationException(errors.getMessages());
}
checkForMisplacedBindingAnnotations(noArgConstructor, errors);
return new InjectionPoint(type, noArgConstructor);
} catch (NoSuchMethodException e) {
errors.missingConstructor(rawType);
throw new ConfigurationException(errors.getMessages());
}
}
/**
* Returns a new injection point for the injectable constructor of {@code type}.
*
* @param type a concrete type with exactly one constructor annotated {@literal @}{@link Inject},
* or a no-arguments constructor that is not private.
* @throws ConfigurationException if there is no injectable constructor, more than one injectable
* constructor, or if parameters of the injectable constructor are malformed, such as a
* parameter with multiple binding annotations.
*/
public static InjectionPoint forConstructorOf(Class<?> type) {
return forConstructorOf(TypeLiteral.get(type));
}
/**
* Returns all static method and field injection points on {@code type}.
*
* @return a possibly empty set of injection points. The set has a specified iteration order. All
* fields are returned and then all methods. Within the fields, supertype fields are returned
* before subtype fields. Similarly, supertype methods are returned before subtype methods.
* @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
* a field with multiple binding annotations. The exception's {@link
* ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
* of the valid injection points.
*/
public static Set<InjectionPoint> forStaticMethodsAndFields(TypeLiteral type) {
List<InjectionPoint> sink = Lists.newArrayList();
Errors errors = new Errors();
addInjectionPoints(type, Factory.FIELDS, true, sink, errors);
addInjectionPoints(type, Factory.METHODS, true, sink, errors);
ImmutableSet<InjectionPoint> result = ImmutableSet.copyOf(sink);
if (errors.hasErrors()) {
throw new ConfigurationException(errors.getMessages()).withPartialValue(result);
}
return result;
}
/**
* Returns all static method and field injection points on {@code type}.
*
* @return a possibly empty set of injection points. The set has a specified iteration order. All
* fields are returned and then all methods. Within the fields, supertype fields are returned
* before subtype fields. Similarly, supertype methods are returned before subtype methods.
* @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
* a field with multiple binding annotations. The exception's {@link
* ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
* of the valid injection points.
*/
public static Set<InjectionPoint> forStaticMethodsAndFields(Class<?> type) {
return forStaticMethodsAndFields(TypeLiteral.get(type));
}
/**
* Returns all instance method and field injection points on {@code type}.
*
* @return a possibly empty set of injection points. The set has a specified iteration order. All
* fields are returned and then all methods. Within the fields, supertype fields are returned
* before subtype fields. Similarly, supertype methods are returned before subtype methods.
* @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
* a field with multiple binding annotations. The exception's {@link
* ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
* of the valid injection points.
*/
public static Set<InjectionPoint> forInstanceMethodsAndFields(TypeLiteral<?> type) {
List<InjectionPoint> sink = Lists.newArrayList();
Errors errors = new Errors();
// TODO (crazybob): Filter out overridden members.
addInjectionPoints(type, Factory.FIELDS, false, sink, errors);
addInjectionPoints(type, Factory.METHODS, false, sink, errors);
ImmutableSet<InjectionPoint> result = ImmutableSet.copyOf(sink);
if (errors.hasErrors()) {
throw new ConfigurationException(errors.getMessages()).withPartialValue(result);
}
return result;
}
/**
* Returns all instance method and field injection points on {@code type}.
*
* @return a possibly empty set of injection points. The set has a specified iteration order. All
* fields are returned and then all methods. Within the fields, supertype fields are returned
* before subtype fields. Similarly, supertype methods are returned before subtype methods.
* @throws ConfigurationException if there is a malformed injection point on {@code type}, such as
* a field with multiple binding annotations. The exception's {@link
* ConfigurationException#getPartialValue() partial value} is a {@code Set<InjectionPoint>}
* of the valid injection points.
*/
public static Set<InjectionPoint> forInstanceMethodsAndFields(Class<?> type) {
return forInstanceMethodsAndFields(TypeLiteral.get(type));
}
private static void checkForMisplacedBindingAnnotations(Member member, Errors errors) {
Annotation misplacedBindingAnnotation = Annotations.findBindingAnnotation(
errors, member, ((AnnotatedElement) member).getAnnotations());
if (misplacedBindingAnnotation == null) {
return;
}
// don't warn about misplaced binding annotations on methods when there's a field with the same
// name. In Scala, fields always get accessor methods (that we need to ignore). See bug 242.
if (member instanceof Method) {
try {
if (member.getDeclaringClass().getDeclaredField(member.getName()) != null) {
return;
}
} catch (NoSuchFieldException ignore) {
}
}
errors.misplacedBindingAnnotation(member, misplacedBindingAnnotation);
}
private static <M extends Member & AnnotatedElement> void addInjectionPoints(TypeLiteral<?> type,
Factory<M> factory, boolean statics, Collection<InjectionPoint> injectionPoints,
Errors errors) {
if (type.getType() == Object.class) {
return;
}
// Add injectors for superclass first.
TypeLiteral<?> superType = type.getSupertype(type.getRawType().getSuperclass());
addInjectionPoints(superType, factory, statics, injectionPoints, errors);
// Add injectors for all members next
addInjectorsForMembers(type, factory, statics, injectionPoints, errors);
}
private static <M extends Member & AnnotatedElement> void addInjectorsForMembers(
TypeLiteral<?> typeLiteral, Factory<M> factory, boolean statics,
Collection<InjectionPoint> injectionPoints, Errors errors) {
for (M member : factory.getMembers(getRawType(typeLiteral.getType()))) {
if (isStatic(member) != statics) {
continue;
}
Inject inject = member.getAnnotation(Inject.class);
if (inject == null) {
continue;
}
try {
injectionPoints.add(factory.create(typeLiteral, member, errors));
} catch (ConfigurationException ignorable) {
if (!inject.optional()) {
errors.merge(ignorable.getErrorMessages());
}
}
}
}
private static boolean isStatic(Member member) {
return Modifier.isStatic(member.getModifiers());
}
private interface Factory<M extends Member & AnnotatedElement> {
Factory<Field> FIELDS = new Factory<Field>() {
public Field[] getMembers(Class<?> type) {
return type.getDeclaredFields();
}
public InjectionPoint create(TypeLiteral<?> typeLiteral, Field member, Errors errors) {
return new InjectionPoint(typeLiteral, member);
}
};
Factory<Method> METHODS = new Factory<Method>() {
public Method[] getMembers(Class<?> type) {
return type.getDeclaredMethods();
}
public InjectionPoint create(TypeLiteral<?> typeLiteral, Method member, Errors errors) {
checkForMisplacedBindingAnnotations(member, errors);
return new InjectionPoint(typeLiteral, member);
}
};
M[] getMembers(Class<?> type);
InjectionPoint create(TypeLiteral<?> typeLiteral, M member, Errors errors);
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_spi_InjectionPoint.java |
1,797 | abstract class AbstractProcessor implements ElementVisitor<Boolean> {
protected Errors errors;
protected InjectorImpl injector;
protected AbstractProcessor(Errors errors) {
this.errors = errors;
}
public void process(Iterable<InjectorShell> isolatedInjectorBuilders) {
for (InjectorShell injectorShell : isolatedInjectorBuilders) {
process(injectorShell.getInjector(), injectorShell.getElements());
}
}
public void process(InjectorImpl injector, List<Element> elements) {
Errors errorsAnyElement = this.errors;
this.injector = injector;
try {
for (Iterator<Element> i = elements.iterator(); i.hasNext(); ) {
Element element = i.next();
this.errors = errorsAnyElement.withSource(element.getSource());
Boolean allDone = element.acceptVisitor(this);
if (allDone) {
i.remove();
}
}
} finally {
this.errors = errorsAnyElement;
this.injector = null;
}
}
public Boolean visit(Message message) {
return false;
}
public Boolean visit(ScopeBinding scopeBinding) {
return false;
}
public Boolean visit(InjectionRequest injectionRequest) {
return false;
}
public Boolean visit(StaticInjectionRequest staticInjectionRequest) {
return false;
}
public Boolean visit(TypeConverterBinding typeConverterBinding) {
return false;
}
public <T> Boolean visit(Binding<T> binding) {
return false;
}
public <T> Boolean visit(ProviderLookup<T> providerLookup) {
return false;
}
public Boolean visit(PrivateElements privateElements) {
return false;
}
public <T> Boolean visit(MembersInjectorLookup<T> lookup) {
return false;
}
public Boolean visit(TypeListenerBinding binding) {
return false;
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_AbstractProcessor.java |
914 | public class AutoCreateIndex {
private final boolean needToCheck;
private final boolean globallyDisabled;
private final String[] matches;
private final String[] matches2;
public AutoCreateIndex(Settings settings) {
String value = settings.get("action.auto_create_index");
if (value == null || Booleans.isExplicitTrue(value)) {
needToCheck = true;
globallyDisabled = false;
matches = null;
matches2 = null;
} else if (Booleans.isExplicitFalse(value)) {
needToCheck = false;
globallyDisabled = true;
matches = null;
matches2 = null;
} else {
needToCheck = true;
globallyDisabled = false;
matches = Strings.commaDelimitedListToStringArray(value);
matches2 = new String[matches.length];
for (int i = 0; i < matches.length; i++) {
matches2[i] = matches[i].substring(1);
}
}
}
/**
* Do we really need to check if an index should be auto created?
*/
public boolean needToCheck() {
return this.needToCheck;
}
/**
* Should the index be auto created?
*/
public boolean shouldAutoCreate(String index, ClusterState state) {
if (!needToCheck) {
return false;
}
if (state.metaData().hasConcreteIndex(index)) {
return false;
}
if (globallyDisabled) {
return false;
}
// matches not set, default value of "true"
if (matches == null) {
return true;
}
for (int i = 0; i < matches.length; i++) {
char c = matches[i].charAt(0);
if (c == '-') {
if (Regex.simpleMatch(matches2[i], index)) {
return false;
}
} else if (c == '+') {
if (Regex.simpleMatch(matches2[i], index)) {
return true;
}
} else {
if (Regex.simpleMatch(matches[i], index)) {
return true;
}
}
}
return false;
}
} | 0true
| src_main_java_org_elasticsearch_action_support_AutoCreateIndex.java |
690 | public static class Failure {
private final String index;
private final String type;
private final String id;
private final String message;
private final RestStatus status;
public Failure(String index, String type, String id, Throwable t) {
this.index = index;
this.type = type;
this.id = id;
this.message = ExceptionsHelper.detailedMessage(t);
this.status = ExceptionsHelper.status(t);
}
public Failure(String index, String type, String id, String message, RestStatus status) {
this.index = index;
this.type = type;
this.id = id;
this.message = message;
this.status = status;
}
/**
* The index name of the action.
*/
public String getIndex() {
return this.index;
}
/**
* The type of the action.
*/
public String getType() {
return type;
}
/**
* The id of the action.
*/
public String getId() {
return id;
}
/**
* The failure message.
*/
public String getMessage() {
return this.message;
}
/**
* The rest status.
*/
public RestStatus getStatus() {
return this.status;
}
} | 0true
| src_main_java_org_elasticsearch_action_bulk_BulkItemResponse.java |
633 | public class IndicesStatusAction extends IndicesAction<IndicesStatusRequest, IndicesStatusResponse, IndicesStatusRequestBuilder> {
public static final IndicesStatusAction INSTANCE = new IndicesStatusAction();
public static final String NAME = "indices/status";
private IndicesStatusAction() {
super(NAME);
}
@Override
public IndicesStatusResponse newResponse() {
return new IndicesStatusResponse();
}
@Override
public IndicesStatusRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new IndicesStatusRequestBuilder(client);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_status_IndicesStatusAction.java |
618 | public class IndicesStatsRequest extends BroadcastOperationRequest<IndicesStatsRequest> {
private CommonStatsFlags flags = new CommonStatsFlags();
/**
* Sets all flags to return all stats.
*/
public IndicesStatsRequest all() {
flags.all();
return this;
}
/**
* Clears all stats.
*/
public IndicesStatsRequest clear() {
flags.clear();
return this;
}
/**
* Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
* enabled, returning specific indexing stats for those types.
*/
public IndicesStatsRequest types(String... types) {
flags.types(types);
return this;
}
/**
* Document types to return stats for. Mainly affects {@link #indexing(boolean)} when
* enabled, returning specific indexing stats for those types.
*/
public String[] types() {
return this.flags.types();
}
/**
* Sets specific search group stats to retrieve the stats for. Mainly affects search
* when enabled.
*/
public IndicesStatsRequest groups(String... groups) {
flags.groups(groups);
return this;
}
public String[] groups() {
return this.flags.groups();
}
public IndicesStatsRequest docs(boolean docs) {
flags.set(Flag.Docs, docs);
return this;
}
public boolean docs() {
return flags.isSet(Flag.Docs);
}
public IndicesStatsRequest store(boolean store) {
flags.set(Flag.Store, store);
return this;
}
public boolean store() {
return flags.isSet(Flag.Store);
}
public IndicesStatsRequest indexing(boolean indexing) {
flags.set(Flag.Indexing, indexing);
return this;
}
public boolean indexing() {
return flags.isSet(Flag.Indexing);
}
public IndicesStatsRequest get(boolean get) {
flags.set(Flag.Get, get);
return this;
}
public boolean get() {
return flags.isSet(Flag.Get);
}
public IndicesStatsRequest search(boolean search) {
flags.set(Flag.Search, search);
return this;
}
public boolean search() {
return flags.isSet(Flag.Search);
}
public IndicesStatsRequest merge(boolean merge) {
flags.set(Flag.Merge, merge);
return this;
}
public boolean merge() {
return flags.isSet(Flag.Merge);
}
public IndicesStatsRequest refresh(boolean refresh) {
flags.set(Flag.Refresh, refresh);
return this;
}
public boolean refresh() {
return flags.isSet(Flag.Refresh);
}
public IndicesStatsRequest flush(boolean flush) {
flags.set(Flag.Flush, flush);
return this;
}
public boolean flush() {
return flags.isSet(Flag.Flush);
}
public IndicesStatsRequest warmer(boolean warmer) {
flags.set(Flag.Warmer, warmer);
return this;
}
public boolean warmer() {
return flags.isSet(Flag.Warmer);
}
public IndicesStatsRequest filterCache(boolean filterCache) {
flags.set(Flag.FilterCache, filterCache);
return this;
}
public boolean filterCache() {
return flags.isSet(Flag.FilterCache);
}
public IndicesStatsRequest idCache(boolean idCache) {
flags.set(Flag.IdCache, idCache);
return this;
}
public boolean idCache() {
return flags.isSet(Flag.IdCache);
}
public IndicesStatsRequest fieldData(boolean fieldData) {
flags.set(Flag.FieldData, fieldData);
return this;
}
public boolean fieldData() {
return flags.isSet(Flag.FieldData);
}
public IndicesStatsRequest percolate(boolean percolate) {
flags.set(Flag.Percolate, percolate);
return this;
}
public boolean percolate() {
return flags.isSet(Flag.Percolate);
}
public IndicesStatsRequest segments(boolean segments) {
flags.set(Flag.Segments, segments);
return this;
}
public boolean segments() {
return flags.isSet(Flag.Segments);
}
public IndicesStatsRequest fieldDataFields(String... fieldDataFields) {
flags.fieldDataFields(fieldDataFields);
return this;
}
public String[] fieldDataFields() {
return flags.fieldDataFields();
}
public IndicesStatsRequest completion(boolean completion) {
flags.set(Flag.Completion, completion);
return this;
}
public boolean completion() {
return flags.isSet(Flag.Completion);
}
public IndicesStatsRequest completionFields(String... completionDataFields) {
flags.completionDataFields(completionDataFields);
return this;
}
public String[] completionFields() {
return flags.completionDataFields();
}
public IndicesStatsRequest translog(boolean translog) {
flags.set(Flag.Translog, translog);
return this;
}
public boolean translog() {
return flags.isSet(Flag.Translog);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
flags.writeTo(out);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
flags = CommonStatsFlags.readCommonStatsFlags(in);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_stats_IndicesStatsRequest.java |
2,388 | abstract class AbstractBigArray extends AbstractArray {
private Recycler.V<?>[] cache;
private final int pageShift;
private final int pageMask;
protected long size;
protected AbstractBigArray(int pageSize, PageCacheRecycler recycler, boolean clearOnResize) {
super(recycler, clearOnResize);
Preconditions.checkArgument(pageSize >= 128, "pageSize must be >= 128");
Preconditions.checkArgument((pageSize & (pageSize - 1)) == 0, "pageSize must be a power of two");
this.pageShift = Integer.numberOfTrailingZeros(pageSize);
this.pageMask = pageSize - 1;
size = 0;
if (this.recycler != null) {
cache = new Recycler.V<?>[16];
} else {
cache = null;
}
}
final int numPages(long capacity) {
final long numPages = (capacity + pageMask) >>> pageShift;
Preconditions.checkArgument(numPages <= Integer.MAX_VALUE, "pageSize=" + (pageMask + 1) + " is too small for such as capacity: " + capacity);
return (int) numPages;
}
final int pageSize() {
return pageMask + 1;
}
final int pageIndex(long index) {
return (int) (index >>> pageShift);
}
final int indexInPage(long index) {
return (int) (index & pageMask);
}
public final long size() {
return size;
}
protected abstract int numBytesPerElement();
public final long sizeInBytes() {
// rough approximate, we only take into account the size of the values, not the overhead of the array objects
return ((long) pageIndex(size - 1) + 1) * pageSize() * numBytesPerElement();
}
private static <T> T[] grow(T[] array, int minSize) {
if (array.length < minSize) {
final int newLen = ArrayUtil.oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
array = Arrays.copyOf(array, newLen);
}
return array;
}
private <T> T registerNewPage(Recycler.V<T> v, int page, int expectedSize) {
cache = grow(cache, page + 1);
assert cache[page] == null;
cache[page] = v;
assert Array.getLength(v.v()) == expectedSize;
return v.v();
}
protected final byte[] newBytePage(int page) {
if (recycler != null) {
final Recycler.V<byte[]> v = recycler.bytePage(clearOnResize);
return registerNewPage(v, page, BigArrays.BYTE_PAGE_SIZE);
} else {
return new byte[BigArrays.BYTE_PAGE_SIZE];
}
}
protected final int[] newIntPage(int page) {
if (recycler != null) {
final Recycler.V<int[]> v = recycler.intPage(clearOnResize);
return registerNewPage(v, page, BigArrays.INT_PAGE_SIZE);
} else {
return new int[BigArrays.INT_PAGE_SIZE];
}
}
protected final long[] newLongPage(int page) {
if (recycler != null) {
final Recycler.V<long[]> v = recycler.longPage(clearOnResize);
return registerNewPage(v, page, BigArrays.LONG_PAGE_SIZE);
} else {
return new long[BigArrays.LONG_PAGE_SIZE];
}
}
protected final double[] newDoublePage(int page) {
if (recycler != null) {
final Recycler.V<double[]> v = recycler.doublePage(clearOnResize);
return registerNewPage(v, page, BigArrays.DOUBLE_PAGE_SIZE);
} else {
return new double[BigArrays.DOUBLE_PAGE_SIZE];
}
}
protected final Object[] newObjectPage(int page) {
if (recycler != null) {
final Recycler.V<Object[]> v = recycler.objectPage();
return registerNewPage(v, page, BigArrays.OBJECT_PAGE_SIZE);
} else {
return new Object[BigArrays.OBJECT_PAGE_SIZE];
}
}
protected final void releasePage(int page) {
if (recycler != null) {
cache[page].release();
cache[page] = null;
}
}
@Override
public final boolean release() {
super.release();
if (recycler != null) {
Releasables.release(cache);
cache = null;
}
return true;
}
} | 0true
| src_main_java_org_elasticsearch_common_util_AbstractBigArray.java |
1,297 | public final class SimpleQueueTest {
private static final int VALUE_SIZE = 1000;
private static final int STATS_SECONDS = 10;
private SimpleQueueTest() {
}
/**
* Creates a cluster and exercises a queue until stopped
* @param args none
*/
public static void main(String[] args) {
int threadCount = 5;
final HazelcastInstance hz1 = Hazelcast.newHazelcastInstance(null);
final Stats stats = new Stats();
ExecutorService es = Executors.newFixedThreadPool(threadCount);
for (int i = 0; i < threadCount; i++) {
es.submit(new Runnable() {
public void run() {
Random random = new Random();
while (true) {
int ran = random.nextInt(100);
Queue<byte[]> queue = hz1.getQueue("default" + ran);
for (int j = 0; j < 1000; j++) {
queue.offer(new byte[VALUE_SIZE]);
stats.offers.incrementAndGet();
}
for (int j = 0; j < 1000; j++) {
queue.poll();
stats.polls.incrementAndGet();
}
}
}
});
}
Executors.newSingleThreadExecutor().submit(new Runnable() {
@SuppressWarnings("BusyWait")
public void run() {
while (true) {
try {
Thread.sleep(STATS_SECONDS * 1000);
System.out.println("cluster size:"
+ hz1.getCluster().getMembers().size());
Stats currentStats = stats.getAndReset();
System.out.println(currentStats);
System.out.println("Operations per Second : " + currentStats.total()
/ STATS_SECONDS);
} catch (Exception e) {
e.printStackTrace();
}
}
}
});
}
/**
* A basic statistics class
*/
public static class Stats {
private AtomicLong offers = new AtomicLong();
private AtomicLong polls = new AtomicLong();
public Stats getAndReset() {
long offersNow = offers.getAndSet(0);
long pollsNow = polls.getAndSet(0);
Stats newOne = new Stats();
newOne.offers.set(offersNow);
newOne.polls.set(pollsNow);
return newOne;
}
public long total() {
return offers.get() + polls.get();
}
public String toString() {
return "total= " + total() + ", offers:" + offers.get() + ", polls:" + polls.get();
}
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_examples_SimpleQueueTest.java |
962 | public abstract class NodesOperationRequest<T extends NodesOperationRequest> extends ActionRequest<T> {
public static String[] ALL_NODES = Strings.EMPTY_ARRAY;
private String[] nodesIds;
private TimeValue timeout;
protected NodesOperationRequest() {
}
protected NodesOperationRequest(String... nodesIds) {
this.nodesIds = nodesIds;
}
public final String[] nodesIds() {
return nodesIds;
}
@SuppressWarnings("unchecked")
public final T nodesIds(String... nodesIds) {
this.nodesIds = nodesIds;
return (T) this;
}
public TimeValue timeout() {
return this.timeout;
}
@SuppressWarnings("unchecked")
public final T timeout(TimeValue timeout) {
this.timeout = timeout;
return (T) this;
}
@SuppressWarnings("unchecked")
public final T timeout(String timeout) {
this.timeout = TimeValue.parseTimeValue(timeout, null);
return (T) this;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodesIds = in.readStringArray();
if (in.readBoolean()) {
timeout = TimeValue.readTimeValue(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(nodesIds);
if (timeout == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
timeout.writeTo(out);
}
}
} | 0true
| src_main_java_org_elasticsearch_action_support_nodes_NodesOperationRequest.java |
566 | trackedList.addChangeListener(new OMultiValueChangeListener<Integer, String>() {
public void onAfterRecordChanged(final OMultiValueChangeEvent<Integer, String> event) {
firedEvents.add(event);
}
}); | 0true
| core_src_test_java_com_orientechnologies_orient_core_index_OCompositeIndexDefinitionTest.java |
1,276 | public class DummyStore implements MapLoader, MapStore {
public Set loadAllKeys() {
System.out.println("Loader.loadAllKeys ");
Set keys = new HashSet();
keys.add("key");
return keys;
}
public Object load(Object key) {
System.out.println("Loader.load " + key);
return "loadedvalue";
}
public Map loadAll(Collection keys) {
System.out.println("Loader.loadAll keys " + keys);
return null;
}
public void store(Object key, Object value) {
System.out.println("Store.store key=" + key + ", value=" + value);
}
public void storeAll(Map map) {
System.out.println("Store.storeAll " + map.size());
}
public void delete(Object key) {
System.out.println("Store.delete " + key);
}
public void deleteAll(Collection keys) {
System.out.println("Store.deleteAll " + keys);
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_examples_DummyStore.java |
780 | searchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
listener.onResponse(response);
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
}); | 0true
| src_main_java_org_elasticsearch_action_mlt_TransportMoreLikeThisAction.java |
1,042 | public interface OrderItemPriceDetail extends Serializable {
/**
* The unique identifier of this OrderItem
* @return
*/
Long getId();
/**
* Sets the unique id of the OrderItem. Typically left blank for new items and Broadleaf will
* set using the next sequence number.
* @param id
*/
void setId(Long id);
/**
* Reference back to the containing orderItem.
* @return
*/
OrderItem getOrderItem();
/**
* Sets the orderItem for this itemPriceDetail.
* @param order
*/
void setOrderItem(OrderItem order);
/**
* Returns a List of the adjustments that effected this priceDetail.
* @return a List of OrderItemPriceDetailAdjustment
*/
List<OrderItemPriceDetailAdjustment> getOrderItemPriceDetailAdjustments();
/**
* Sets the list of OrderItemPriceDetailAdjustment
* @param orderItemPriceDetailAdjustments
*/
void setOrderItemAdjustments(List<OrderItemPriceDetailAdjustment> orderItemPriceDetailAdjustments);
/**
* The quantity of this {@link OrderItemPriceDetail}.
*
* @return
*/
int getQuantity();
/**
* Returns the quantity
* @param quantity
*/
void setQuantity(int quantity);
/**
* Returns the value of all adjustments for a single quantity of the item.
*
* Use {@link #getTotalAdjustmentValue()} to get the total for all quantities of this item.
*
* @return
*/
Money getAdjustmentValue();
/**
* Returns getAdjustmentValue() * the quantity.
*
* @return
*/
Money getTotalAdjustmentValue();
/**
* Returns the total adjustedPrice.
*
* @return
*/
Money getTotalAdjustedPrice();
/**
* Indicates that the adjustments were based off of the item's sale price.
* @return
*/
boolean getUseSalePrice();
/**
* Set that the adjustments should be taken off of the item's sale price.
* @param useSalePrice
*/
void setUseSalePrice(boolean useSalePrice);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_OrderItemPriceDetail.java |
3,703 | public static class Defaults extends AbstractFieldMapper.Defaults {
public static final String NAME = UidFieldMapper.NAME;
public static final String INDEX_NAME = UidFieldMapper.NAME;
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
public static final FieldType NESTED_FIELD_TYPE;
static {
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setStored(true);
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
FIELD_TYPE.freeze();
NESTED_FIELD_TYPE = new FieldType(FIELD_TYPE);
NESTED_FIELD_TYPE.setStored(false);
NESTED_FIELD_TYPE.freeze();
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_internal_UidFieldMapper.java |
1,559 | @XmlRootElement(name = "handler")
@XmlType(propOrder = { "parameters", "clazz" })
public class OServerHandlerConfiguration {
@XmlAttribute(name = "class", required = true)
public String clazz;
@XmlElementWrapper
@XmlElementRef(type = OServerParameterConfiguration.class)
public OServerParameterConfiguration[] parameters;
} | 0true
| server_src_main_java_com_orientechnologies_orient_server_config_OServerHandlerConfiguration.java |
1,997 | assertTrueEventually(new AssertTask() {
@Override
public void run() throws Exception {
assertEquals(TestEventBasedMapStore.STORE_EVENTS.LOAD_ALL_KEYS, testMapStore.getEvents().poll());
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_mapstore_MapStoreTest.java |
158 | public class ONullSerializer implements OBinarySerializer<Object> {
public static ONullSerializer INSTANCE = new ONullSerializer();
public static final byte ID = 11;
public int getObjectSize(final Object object, Object... hints) {
return 0;
}
public void serialize(final Object object, final byte[] stream, final int startPosition, Object... hints) {
// nothing to serialize
}
public Object deserialize(final byte[] stream, final int startPosition) {
// nothing to deserialize
return null;
}
public int getObjectSize(byte[] stream, int startPosition) {
return 0;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return 0;
}
public void serializeNative(Object object, byte[] stream, int startPosition, Object... hints) {
}
public Object deserializeNative(byte[] stream, int startPosition) {
return null;
}
@Override
public void serializeInDirectMemory(Object object, ODirectMemoryPointer pointer, long offset, Object... hints) {
}
@Override
public Object deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
return null;
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return 0;
}
public boolean isFixedLength() {
return true;
}
public int getFixedLength() {
return 0;
}
@Override
public Object preprocess(Object value, Object... hints) {
return null;
}
} | 0true
| commons_src_main_java_com_orientechnologies_common_serialization_types_ONullSerializer.java |
32 | @Service("blOrderItemFieldService")
public class OrderItemFieldServiceImpl extends AbstractRuleBuilderFieldService {
//TODO: extensibility mechanism, support i18N
@Override
public void init() {
fields.add(new FieldData.Builder()
.label("rule_orderItemName")
.name("name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemPrice")
.name("price")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemQuantity")
.name("quantity")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.INTEGER)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemRetailPrice")
.name("retailPrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSalePrice")
.name("salePrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryId")
.name("category.id")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.ID)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryName")
.name("category.name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryFulfillmentType")
.name("category.fulfillmentType")
.operators("blcOperators_Enumeration")
.options("blcOptions_FulfillmentType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemInventoryType")
.name("category.inventoryType")
.operators("blcOperators_Enumeration")
.options("blcOptions_InventoryType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryUrl")
.name("category.url")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryUrlKey")
.name("category.urlKey")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryDescription")
.name("category.description")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryLongDescription")
.name("category.longDescription")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductCanSellWithoutOptions")
.name("product.canSellWithoutOptions")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductUrl")
.name("product.url")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductUrlKey")
.name("product.urlKey")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductIsFeatured")
.name("product.isFeaturedProduct")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductManufacturer")
.name("product.manufacturer")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductModel")
.name("product.model")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuFulfillmentType")
.name("sku.fulfillmentType")
.operators("blcOperators_Enumeration")
.options("blcOptions_FulfillmentType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuInventoryType")
.name("sku.inventoryType")
.operators("blcOperators_Enumeration")
.options("blcOptions_InventoryType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuDescription")
.name("sku.description")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuLongDescription")
.name("sku.longDescription")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuTaxable")
.name("sku.taxable")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuAvailable")
.name("sku.available")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuStartDate")
.name("sku.activeStartDate")
.operators("blcOperators_Date")
.options("[]")
.type(SupportedFieldType.DATE)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuEndDate")
.name("sku.activeEndDate")
.operators("blcOperators_Date")
.options("[]")
.type(SupportedFieldType.DATE)
.build());
}
@Override
public String getName() {
return RuleIdentifier.ORDERITEM;
}
@Override
public String getDtoClassName() {
return "org.broadleafcommerce.core.order.domain.OrderItemImpl";
}
} | 0true
| admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_OrderItemFieldServiceImpl.java |
1,439 | public class RepositoryMetaData {
private final String name;
private final String type;
private final Settings settings;
/**
* Constructs new repository metadata
*
* @param name repository name
* @param type repository type
* @param settings repository settings
*/
public RepositoryMetaData(String name, String type, Settings settings) {
this.name = name;
this.type = type;
this.settings = settings;
}
/**
* Returns repository name
*
* @return repository name
*/
public String name() {
return this.name;
}
/**
* Returns repository type
*
* @return repository type
*/
public String type() {
return this.type;
}
/**
* Returns repository settings
*
* @return repository settings
*/
public Settings settings() {
return this.settings;
}
/**
* Reads repository metadata from stream input
*
* @param in stream input
* @return repository metadata
* @throws IOException
*/
public static RepositoryMetaData readFrom(StreamInput in) throws IOException {
String name = in.readString();
String type = in.readString();
Settings settings = ImmutableSettings.readSettingsFromStream(in);
return new RepositoryMetaData(name, type, settings);
}
/**
* Writes repository metadata to stream output
*
* @param out stream output
* @throws IOException
*/
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(type);
ImmutableSettings.writeSettingsToStream(settings, out);
}
} | 0true
| src_main_java_org_elasticsearch_cluster_metadata_RepositoryMetaData.java |
797 | public class LongWrapper {
private long value;
public long get() {
return value;
}
public long addAndGet(long delta) {
value += delta;
return value;
}
public void set(long value) {
this.value = value;
}
public boolean compareAndSet(long expect, long value) {
if (this.value != expect) {
return false;
}
this.value = value;
return true;
}
public long getAndAdd(long delta) {
long tempValue = value;
value += delta;
return tempValue;
}
public long getAndSet(long value) {
long tempValue = this.value;
this.value = value;
return tempValue;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_LongWrapper.java |
3,387 | public class EventServiceImpl implements EventService {
private static final EventRegistration[] EMPTY_REGISTRATIONS = new EventRegistration[0];
private final ILogger logger;
private final NodeEngineImpl nodeEngine;
private final ConcurrentMap<String, EventServiceSegment> segments;
private final StripedExecutor eventExecutor;
private final int eventQueueTimeoutMs;
private final int eventThreadCount;
private final int eventQueueCapacity;
EventServiceImpl(NodeEngineImpl nodeEngine) {
this.nodeEngine = nodeEngine;
this.logger = nodeEngine.getLogger(EventService.class.getName());
final Node node = nodeEngine.getNode();
GroupProperties groupProperties = node.getGroupProperties();
this.eventThreadCount = groupProperties.EVENT_THREAD_COUNT.getInteger();
this.eventQueueCapacity = groupProperties.EVENT_QUEUE_CAPACITY.getInteger();
this.eventQueueTimeoutMs = groupProperties.EVENT_QUEUE_TIMEOUT_MILLIS.getInteger();
this.eventExecutor = new StripedExecutor(
node.getLogger(EventServiceImpl.class),
node.getThreadNamePrefix("event"),
node.threadGroup,
eventThreadCount,
eventQueueCapacity);
this.segments = new ConcurrentHashMap<String, EventServiceSegment>();
}
@Override
public int getEventThreadCount() {
return eventThreadCount;
}
@Override
public int getEventQueueCapacity() {
return eventQueueCapacity;
}
@Override
public int getEventQueueSize() {
return eventExecutor.getWorkQueueSize();
}
@Override
public EventRegistration registerLocalListener(String serviceName, String topic, Object listener) {
return registerListenerInternal(serviceName, topic, new EmptyFilter(), listener, true);
}
@Override
public EventRegistration registerLocalListener(String serviceName, String topic, EventFilter filter, Object listener) {
return registerListenerInternal(serviceName, topic, filter, listener, true);
}
@Override
public EventRegistration registerListener(String serviceName, String topic, Object listener) {
return registerListenerInternal(serviceName, topic, new EmptyFilter(), listener, false);
}
@Override
public EventRegistration registerListener(String serviceName, String topic, EventFilter filter, Object listener) {
return registerListenerInternal(serviceName, topic, filter, listener, false);
}
private EventRegistration registerListenerInternal(String serviceName, String topic, EventFilter filter,
Object listener, boolean localOnly) {
if (listener == null) {
throw new IllegalArgumentException("Listener required!");
}
if (filter == null) {
throw new IllegalArgumentException("EventFilter required!");
}
EventServiceSegment segment = getSegment(serviceName, true);
Registration reg = new Registration(UUID.randomUUID().toString(), serviceName, topic, filter,
nodeEngine.getThisAddress(), listener, localOnly);
if (segment.addRegistration(topic, reg)) {
if (!localOnly) {
invokeRegistrationOnOtherNodes(serviceName, reg);
}
return reg;
} else {
return null;
}
}
private boolean handleRegistration(Registration reg) {
if (nodeEngine.getThisAddress().equals(reg.getSubscriber())) {
return false;
}
EventServiceSegment segment = getSegment(reg.serviceName, true);
return segment.addRegistration(reg.topic, reg);
}
@Override
public boolean deregisterListener(String serviceName, String topic, Object id) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
final Registration reg = segment.removeRegistration(topic, String.valueOf(id));
if (reg != null && !reg.isLocalOnly()) {
invokeDeregistrationOnOtherNodes(serviceName, topic, String.valueOf(id));
}
return reg != null;
}
return false;
}
@Override
public void deregisterAllListeners(String serviceName, String topic) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
segment.removeRegistrations(topic);
}
}
private void deregisterSubscriber(String serviceName, String topic, String id) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
segment.removeRegistration(topic, id);
}
}
private void invokeRegistrationOnOtherNodes(String serviceName, Registration reg) {
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
Collection<Future> calls = new ArrayList<Future>(members.size());
for (MemberImpl member : members) {
if (!member.localMember()) {
Future f = nodeEngine.getOperationService().invokeOnTarget(serviceName,
new RegistrationOperation(reg), member.getAddress());
calls.add(f);
}
}
for (Future f : calls) {
try {
f.get(5, TimeUnit.SECONDS);
} catch (InterruptedException ignored) {
} catch (TimeoutException ignored) {
} catch (MemberLeftException e) {
logger.finest("Member left while registering listener...", e);
} catch (ExecutionException e) {
throw new HazelcastException(e);
}
}
}
private void invokeDeregistrationOnOtherNodes(String serviceName, String topic, String id) {
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
Collection<Future> calls = new ArrayList<Future>(members.size());
for (MemberImpl member : members) {
if (!member.localMember()) {
Future f = nodeEngine.getOperationService().invokeOnTarget(serviceName,
new DeregistrationOperation(topic, id), member.getAddress());
calls.add(f);
}
}
for (Future f : calls) {
try {
f.get(5, TimeUnit.SECONDS);
} catch (InterruptedException ignored) {
} catch (TimeoutException ignored) {
} catch (MemberLeftException e) {
logger.finest("Member left while de-registering listener...", e);
} catch (ExecutionException e) {
throw new HazelcastException(e);
}
}
}
@Override
public EventRegistration[] getRegistrationsAsArray(String serviceName, String topic) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
final Collection<Registration> registrations = segment.getRegistrations(topic, false);
return registrations != null && !registrations.isEmpty()
? registrations.toArray(new Registration[registrations.size()])
: EMPTY_REGISTRATIONS;
}
return EMPTY_REGISTRATIONS;
}
@Override
public Collection<EventRegistration> getRegistrations(String serviceName, String topic) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
final Collection<Registration> registrations = segment.getRegistrations(topic, false);
return registrations != null && !registrations.isEmpty()
? Collections.<EventRegistration>unmodifiableCollection(registrations)
: Collections.<EventRegistration>emptySet();
}
return Collections.emptySet();
}
@Override
public void publishEvent(String serviceName, EventRegistration registration, Object event, int orderKey) {
if (!(registration instanceof Registration)) {
throw new IllegalArgumentException();
}
final Registration reg = (Registration) registration;
if (isLocal(reg)) {
executeLocal(serviceName, event, reg, orderKey);
} else {
final Address subscriber = registration.getSubscriber();
sendEventPacket(subscriber, new EventPacket(registration.getId(), serviceName, event), orderKey);
}
}
@Override
public void publishEvent(String serviceName, Collection<EventRegistration> registrations, Object event, int orderKey) {
final Iterator<EventRegistration> iter = registrations.iterator();
Data eventData = null;
while (iter.hasNext()) {
EventRegistration registration = iter.next();
if (!(registration instanceof Registration)) {
throw new IllegalArgumentException();
}
final Registration reg = (Registration) registration;
if (isLocal(reg)) {
executeLocal(serviceName, event, reg, orderKey);
} else {
if (eventData == null) {
eventData = nodeEngine.toData(event);
}
final Address subscriber = registration.getSubscriber();
sendEventPacket(subscriber, new EventPacket(registration.getId(), serviceName, eventData), orderKey);
}
}
}
private void executeLocal(String serviceName, Object event, Registration reg, int orderKey) {
if (nodeEngine.isActive()) {
try {
if (reg.listener != null) {
eventExecutor.execute(new LocalEventDispatcher(serviceName, event, reg.listener, orderKey, eventQueueTimeoutMs));
} else {
logger.warning("Something seems wrong! Listener instance is null! -> " + reg);
}
} catch (RejectedExecutionException e) {
if (eventExecutor.isLive()) {
logger.warning("EventQueue overloaded! " + event + " failed to publish to " + reg.serviceName + ":" + reg.topic);
}
}
}
}
private void sendEventPacket(Address subscriber, EventPacket eventPacket, int orderKey) {
final String serviceName = eventPacket.serviceName;
final EventServiceSegment segment = getSegment(serviceName, true);
boolean sync = segment.incrementPublish() % 100000 == 0;
if (sync) {
Future f = nodeEngine.getOperationService().createInvocationBuilder(serviceName,
new SendEventOperation(eventPacket, orderKey), subscriber).setTryCount(50).invoke();
try {
f.get(3, TimeUnit.SECONDS);
} catch (Exception ignored) {
}
} else {
final Packet packet = new Packet(nodeEngine.toData(eventPacket), orderKey, nodeEngine.getSerializationContext());
packet.setHeader(Packet.HEADER_EVENT);
nodeEngine.send(packet, subscriber);
}
}
private EventServiceSegment getSegment(String service, boolean forceCreate) {
EventServiceSegment segment = segments.get(service);
if (segment == null && forceCreate) {
return ConcurrencyUtil.getOrPutIfAbsent(segments, service, new ConstructorFunction<String, EventServiceSegment>() {
public EventServiceSegment createNew(String key) {
return new EventServiceSegment(key);
}
});
}
return segment;
}
private boolean isLocal(Registration reg) {
return nodeEngine.getThisAddress().equals(reg.getSubscriber());
}
@PrivateApi
void executeEvent(Runnable eventRunnable) {
if (nodeEngine.isActive()) {
try {
eventExecutor.execute(eventRunnable);
} catch (RejectedExecutionException e) {
if (eventExecutor.isLive()) {
logger.warning("EventQueue overloaded! Failed to execute event process: " + eventRunnable);
}
}
}
}
@PrivateApi
void handleEvent(Packet packet) {
try {
eventExecutor.execute(new RemoteEventPacketProcessor(packet));
} catch (RejectedExecutionException e) {
if (eventExecutor.isLive()) {
final Connection conn = packet.getConn();
String endpoint = conn.getEndPoint() != null ? conn.getEndPoint().toString() : conn.toString();
logger.warning("EventQueue overloaded! Failed to process event packet sent from: " + endpoint);
}
}
}
public PostJoinRegistrationOperation getPostJoinOperation() {
final Collection<Registration> registrations = new LinkedList<Registration>();
for (EventServiceSegment segment : segments.values()) {
for (Registration reg : segment.registrationIdMap.values()) {
if (!reg.isLocalOnly()) {
registrations.add(reg);
}
}
}
return registrations.isEmpty() ? null : new PostJoinRegistrationOperation(registrations);
}
void shutdown() {
logger.finest("Stopping event executor...");
eventExecutor.shutdown();
for (EventServiceSegment segment : segments.values()) {
segment.clear();
}
segments.clear();
}
void onMemberLeft(MemberImpl member) {
final Address address = member.getAddress();
for (EventServiceSegment segment : segments.values()) {
segment.onMemberLeft(address);
}
}
private static class EventServiceSegment {
final String serviceName;
final ConcurrentMap<String, Collection<Registration>> registrations
= new ConcurrentHashMap<String, Collection<Registration>>();
final ConcurrentMap<String, Registration> registrationIdMap = new ConcurrentHashMap<String, Registration>();
final AtomicInteger totalPublishes = new AtomicInteger();
EventServiceSegment(String serviceName) {
this.serviceName = serviceName;
}
private Collection<Registration> getRegistrations(String topic, boolean forceCreate) {
Collection<Registration> listenerList = registrations.get(topic);
if (listenerList == null && forceCreate) {
return ConcurrencyUtil.getOrPutIfAbsent(registrations, topic, new ConstructorFunction<String, Collection<Registration>>() {
public Collection<Registration> createNew(String key) {
return Collections.newSetFromMap(new ConcurrentHashMap<Registration, Boolean>());
}
});
}
return listenerList;
}
private boolean addRegistration(String topic, Registration registration) {
final Collection<Registration> registrations = getRegistrations(topic, true);
if (registrations.add(registration)) {
registrationIdMap.put(registration.id, registration);
return true;
}
return false;
}
private Registration removeRegistration(String topic, String id) {
final Registration registration = registrationIdMap.remove(id);
if (registration != null) {
final Collection<Registration> all = registrations.get(topic);
if (all != null) {
all.remove(registration);
}
}
return registration;
}
void removeRegistrations(String topic) {
final Collection<Registration> all = registrations.remove(topic);
if (all != null) {
for (Registration reg : all) {
registrationIdMap.remove(reg.getId());
}
}
}
void clear() {
registrations.clear();
registrationIdMap.clear();
}
void onMemberLeft(Address address) {
for (Collection<Registration> all : registrations.values()) {
Iterator<Registration> iter = all.iterator();
while (iter.hasNext()) {
Registration reg = iter.next();
if (address.equals(reg.getSubscriber())) {
iter.remove();
registrationIdMap.remove(reg.id);
}
}
}
}
int incrementPublish() {
return totalPublishes.incrementAndGet();
}
}
private class EventPacketProcessor implements StripedRunnable {
private EventPacket eventPacket;
int orderKey;
private EventPacketProcessor() {
}
public EventPacketProcessor(EventPacket packet, int orderKey) {
this.eventPacket = packet;
this.orderKey = orderKey;
}
@Override
public void run() {
process(eventPacket);
}
void process(EventPacket eventPacket) {
Object eventObject = eventPacket.event;
if (eventObject instanceof Data) {
eventObject = nodeEngine.toObject(eventObject);
}
final String serviceName = eventPacket.serviceName;
EventPublishingService<Object, Object> service = nodeEngine.getService(serviceName);
if (service == null) {
if (nodeEngine.isActive()) {
logger.warning("There is no service named: " + serviceName);
}
return;
}
EventServiceSegment segment = getSegment(serviceName, false);
if (segment == null) {
if (nodeEngine.isActive()) {
logger.warning("No service registration found for " + serviceName);
}
return;
}
Registration registration = segment.registrationIdMap.get(eventPacket.id);
if (registration == null) {
if (nodeEngine.isActive()) {
if (logger.isFinestEnabled()) {
logger.finest("No registration found for " + serviceName + " / " + eventPacket.id);
}
}
return;
}
if (!isLocal(registration)) {
logger.severe("Invalid target for " + registration);
return;
}
if (registration.listener == null) {
logger.warning("Something seems wrong! Subscriber is local but listener instance is null! -> " + registration);
return;
}
service.dispatchEvent(eventObject, registration.listener);
}
@Override
public int getKey() {
return orderKey;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("EventPacketProcessor{");
sb.append("eventPacket=").append(eventPacket);
sb.append('}');
return sb.toString();
}
}
private class RemoteEventPacketProcessor extends EventPacketProcessor implements StripedRunnable {
private Packet packet;
public RemoteEventPacketProcessor(Packet packet) {
this.packet = packet;
this.orderKey = packet.getPartitionId();
}
@Override
public void run() {
Data data = packet.getData();
EventPacket eventPacket = (EventPacket) nodeEngine.toObject(data);
process(eventPacket);
}
}
private class LocalEventDispatcher implements StripedRunnable, TimeoutRunnable {
final String serviceName;
final Object event;
final Object listener;
final int orderKey;
final long timeoutMs;
private LocalEventDispatcher(String serviceName, Object event, Object listener, int orderKey, long timeoutMs) {
this.serviceName = serviceName;
this.event = event;
this.listener = listener;
this.orderKey = orderKey;
this.timeoutMs = timeoutMs;
}
@Override
public long getTimeout() {
return timeoutMs;
}
@Override
public TimeUnit getTimeUnit() {
return TimeUnit.MILLISECONDS;
}
@Override
public final void run() {
final EventPublishingService<Object, Object> service = nodeEngine.getService(serviceName);
if (service != null) {
service.dispatchEvent(event, listener);
} else {
if (nodeEngine.isActive()) {
throw new IllegalArgumentException("Service[" + serviceName + "] could not be found!");
}
}
}
@Override
public int getKey() {
return orderKey;
}
}
public static class Registration implements EventRegistration {
private String id;
private String serviceName;
private String topic;
private EventFilter filter;
private Address subscriber;
private transient boolean localOnly;
private transient Object listener;
public Registration() {
}
public Registration(String id, String serviceName, String topic,
EventFilter filter, Address subscriber, Object listener, boolean localOnly) {
this.filter = filter;
this.id = id;
this.listener = listener;
this.serviceName = serviceName;
this.topic = topic;
this.subscriber = subscriber;
this.localOnly = localOnly;
}
@Override
public EventFilter getFilter() {
return filter;
}
@Override
public String getId() {
return id;
}
@Override
public Address getSubscriber() {
return subscriber;
}
@Override
public boolean isLocalOnly() {
return localOnly;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Registration that = (Registration) o;
if (id != null ? !id.equals(that.id) : that.id != null) return false;
if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
if (topic != null ? !topic.equals(that.topic) : that.topic != null) return false;
if (filter != null ? !filter.equals(that.filter) : that.filter != null) return false;
if (subscriber != null ? !subscriber.equals(that.subscriber) : that.subscriber != null) return false;
return true;
}
@Override
public int hashCode() {
int result = id != null ? id.hashCode() : 0;
result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
result = 31 * result + (topic != null ? topic.hashCode() : 0);
result = 31 * result + (filter != null ? filter.hashCode() : 0);
result = 31 * result + (subscriber != null ? subscriber.hashCode() : 0);
return result;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(id);
out.writeUTF(serviceName);
out.writeUTF(topic);
subscriber.writeData(out);
out.writeObject(filter);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
id = in.readUTF();
serviceName = in.readUTF();
topic = in.readUTF();
subscriber = new Address();
subscriber.readData(in);
filter = in.readObject();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Registration");
sb.append("{filter=").append(filter);
sb.append(", id='").append(id).append('\'');
sb.append(", serviceName='").append(serviceName).append('\'');
sb.append(", subscriber=").append(subscriber);
sb.append(", listener=").append(listener);
sb.append('}');
return sb.toString();
}
}
public final static class EventPacket implements IdentifiedDataSerializable {
private String id;
private String serviceName;
private Object event;
public EventPacket() {
}
EventPacket(String id, String serviceName, Object event) {
this.event = event;
this.id = id;
this.serviceName = serviceName;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(id);
out.writeUTF(serviceName);
out.writeObject(event);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
id = in.readUTF();
serviceName = in.readUTF();
event = in.readObject();
}
@Override
public int getFactoryId() {
return SpiDataSerializerHook.F_ID;
}
@Override
public int getId() {
return SpiDataSerializerHook.EVENT_PACKET;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("EventPacket{");
sb.append("id='").append(id).append('\'');
sb.append(", serviceName='").append(serviceName).append('\'');
sb.append(", event=").append(event);
sb.append('}');
return sb.toString();
}
}
public static final class EmptyFilter implements EventFilter, DataSerializable {
public boolean eval(Object arg) {
return true;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
}
@Override
public void readData(ObjectDataInput in) throws IOException {
}
@Override
public boolean equals(Object obj) {
return obj instanceof EmptyFilter;
}
@Override
public int hashCode() {
return 0;
}
}
public static class SendEventOperation extends AbstractOperation {
private EventPacket eventPacket;
private int orderKey;
public SendEventOperation() {
}
public SendEventOperation(EventPacket eventPacket, int orderKey) {
this.eventPacket = eventPacket;
this.orderKey = orderKey;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
eventService.executeEvent(eventService.new EventPacketProcessor(eventPacket, orderKey));
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
eventPacket.writeData(out);
out.writeInt(orderKey);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
eventPacket = new EventPacket();
eventPacket.readData(in);
orderKey = in.readInt();
}
}
public static class RegistrationOperation extends AbstractOperation {
private Registration registration;
private boolean response = false;
public RegistrationOperation() {
}
private RegistrationOperation(Registration registration) {
this.registration = registration;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
response = eventService.handleRegistration(registration);
}
@Override
public Object getResponse() {
return response;
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
registration.writeData(out);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
registration = new Registration();
registration.readData(in);
}
}
public static class DeregistrationOperation extends AbstractOperation {
private String topic;
private String id;
DeregistrationOperation() {
}
private DeregistrationOperation(String topic, String id) {
this.topic = topic;
this.id = id;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
eventService.deregisterSubscriber(getServiceName(), topic, id);
}
@Override
public Object getResponse() {
return true;
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
out.writeUTF(topic);
out.writeUTF(id);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
topic = in.readUTF();
id = in.readUTF();
}
}
public static class PostJoinRegistrationOperation extends AbstractOperation {
private Collection<Registration> registrations;
public PostJoinRegistrationOperation() {
}
public PostJoinRegistrationOperation(Collection<Registration> registrations) {
this.registrations = registrations;
}
@Override
public void run() throws Exception {
if (registrations != null && registrations.size() > 0) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
EventServiceImpl eventService = nodeEngine.eventService;
for (Registration reg : registrations) {
eventService.handleRegistration(reg);
}
}
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
int len = registrations != null ? registrations.size() : 0;
out.writeInt(len);
if (len > 0) {
for (Registration reg : registrations) {
reg.writeData(out);
}
}
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
int len = in.readInt();
if (len > 0) {
registrations = new ArrayList<Registration>(len);
for (int i = 0; i < len; i++) {
Registration reg = new Registration();
registrations.add(reg);
reg.readData(in);
}
}
}
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_spi_impl_EventServiceImpl.java |
1,346 | public interface StoreDao {
public Store readStoreByStoreCode(final String storeCode);
public List<Store> readAllStores();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_store_dao_StoreDao.java |
1,946 | private static abstract class Converter<T> {
final Class<T> type;
Converter(Class<T> type) {
this.type = type;
}
boolean appliesTo(Object o) {
return type.isAssignableFrom(o.getClass());
}
String convert(Object o) {
return toString(type.cast(o));
}
abstract String toString(T t);
} | 0true
| src_main_java_org_elasticsearch_common_inject_internal_Errors.java |
1,423 | @XmlRootElement(name = "taxDetail")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class TaxDetailWrapper extends BaseWrapper implements APIWrapper<TaxDetail> {
@XmlElement
protected Long id;
@XmlElement
protected BroadleafEnumerationTypeWrapper taxType;
@XmlElement
protected Money amount;
@XmlElement
protected BigDecimal rate;
@XmlElement
protected String currency;
@XmlElement
protected String jurisdictionName;
@XmlElement
protected String taxName;
@XmlElement
protected String region;
@XmlElement
protected String country;
@Override
public void wrapDetails(TaxDetail model, HttpServletRequest request) {
this.id = model.getId();
if (model.getType() != null) {
this.taxType = (BroadleafEnumerationTypeWrapper) context.getBean(BroadleafEnumerationTypeWrapper.class.getName());
this.taxType.wrapDetails(model.getType(), request);
}
this.amount = model.getAmount();
this.rate = model.getRate();
if (model.getCurrency() != null) {
this.currency = model.getCurrency().getCurrencyCode();
}
this.jurisdictionName = model.getJurisdictionName();
this.taxName = model.getTaxName();
this.region = model.getRegion();
this.country = model.getCountry();
}
@Override
public void wrapSummary(TaxDetail model, HttpServletRequest request) {
wrapDetails(model, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_TaxDetailWrapper.java |
3,036 | public abstract class AbstractPostingsFormatProvider implements PostingsFormatProvider {
private final String name;
protected AbstractPostingsFormatProvider(String name) {
this.name = name;
}
public String name() {
return name;
}
} | 0true
| src_main_java_org_elasticsearch_index_codec_postingsformat_AbstractPostingsFormatProvider.java |
497 | public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder<CloseIndexRequest, CloseIndexResponse, CloseIndexRequestBuilder> {
public CloseIndexRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new CloseIndexRequest());
}
public CloseIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) {
super((InternalIndicesAdminClient) indicesClient, new CloseIndexRequest(indices));
}
/**
* Sets the indices to be closed
* @param indices the indices to be closed
* @return the request itself
*/
public CloseIndexRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and indices wildcard expressions
* @return the request itself
*/
public CloseIndexRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
@Override
protected void doExecute(ActionListener<CloseIndexResponse> listener) {
((IndicesAdminClient) client).close(request, listener);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_close_CloseIndexRequestBuilder.java |
1,251 | new OProfilerHookValue() {
public Object getValue() {
return blockSize;
}
}); | 0true
| core_src_main_java_com_orientechnologies_orient_core_storage_fs_OMMapManagerOld.java |
448 | final Thread t2 = new Thread() {
public void run() {
try {
Thread.sleep(2 * 1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
q.offer("item1");
}
}; | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_queue_ClientQueueTest.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.