Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
47 |
@Component("blSandBoxItemCustomPersistenceHandler")
public class SandBoxItemCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private final Log LOG = LogFactory.getLog(SandBoxItemCustomPersistenceHandler.class);
@Resource(name="blSandBoxService")
protected SandBoxService sandBoxService;
@Resource(name="blAdminSecurityService")
protected AdminSecurityService adminSecurityService;
@Resource(name="blAdminSecurityRemoteService")
protected SecurityVerifier adminRemoteSecurityService;
@Override
public Boolean willHandleSecurity(PersistencePackage persistencePackage) {
return true;
}
@Override
public Boolean canHandleFetch(PersistencePackage persistencePackage) {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
boolean isSandboxItem = SandBoxItem.class.getName().equals(ceilingEntityFullyQualifiedClassname);
if (isSandboxItem) {
return persistencePackage.getCustomCriteria()[4].equals("standard");
}
return false;
}
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
@Override
public Boolean canHandleRemove(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
@Override
public Boolean canHandleUpdate(PersistencePackage persistencePackage) {
return canHandleFetch(persistencePackage);
}
protected List<SandBoxItem> retrieveSandBoxItems(List<Long> ids, DynamicEntityDao dynamicEntityDao, SandBox mySandBox) {
if (CollectionUtils.isEmpty(ids)) {
throw new IllegalArgumentException("The passed in ids parameter is empty");
}
//declare SandBoxItemImpl explicitly, as we do not want to retrieve other polymorphic types (e.g. WorkflowSandBoxItemImpl)
Criteria criteria = dynamicEntityDao.createCriteria(SandBoxItemImpl.class);
criteria.add(Restrictions.in("id", ids));
criteria.add(Restrictions.or(Restrictions.eq("originalSandBoxId", mySandBox.getId()), Restrictions.eq("sandBoxId", mySandBox.getId())));
return criteria.list();
}
@Override
public DynamicResultSet fetch(PersistencePackage persistencePackage, CriteriaTransferObject cto, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
String ceilingEntityFullyQualifiedClassname = persistencePackage.getCeilingEntityFullyQualifiedClassname();
String[] customCriteria = persistencePackage.getCustomCriteria();
if (ArrayUtils.isEmpty(customCriteria) || customCriteria.length != 5) {
ServiceException e = new ServiceException("Invalid request for entity: " + ceilingEntityFullyQualifiedClassname);
LOG.error("Invalid request for entity: " + ceilingEntityFullyQualifiedClassname, e);
throw e;
}
AdminUser adminUser = adminRemoteSecurityService.getPersistentAdminUser();
if (adminUser == null) {
ServiceException e = new ServiceException("Unable to determine current user logged in status");
throw e;
}
try {
String moduleKey = customCriteria[0];
String operation = customCriteria[1];
List<Long> targets = new ArrayList<Long>();
if (!StringUtils.isEmpty(customCriteria[2])) {
String[] parts = customCriteria[2].split(",");
for (String part : parts) {
try {
targets.add(Long.valueOf(part));
} catch (NumberFormatException e) {
//do nothing
}
}
}
String comment = customCriteria[3];
String requiredPermission;
if (moduleKey.equals("userSandBox")) {
requiredPermission = "PERMISSION_ALL_USER_SANDBOX";
} else {
requiredPermission = "PERMISSION_ALL_APPROVER_SANDBOX";
}
boolean allowOperation = false;
for (AdminRole role : adminUser.getAllRoles()) {
for (AdminPermission permission : role.getAllPermissions()) {
if (permission.getName().equals(requiredPermission)) {
allowOperation = true;
break;
}
}
}
if (!allowOperation) {
ServiceException e = new ServiceException("Current user does not have permission to perform operation");
LOG.error("Current user does not have permission to perform operation", e);
throw e;
}
SandBox originalSandBox;
SandBox currentSandBox;
if (moduleKey.equals("userSandBox")) {
currentSandBox = sandBoxService.retrieveUserSandBox(null, adminUser);
originalSandBox = currentSandBox;
} else {
originalSandBox = sandBoxService.retrieveUserSandBox(null, adminUser);
currentSandBox = sandBoxService.retrieveApprovalSandBox(originalSandBox);
}
if (operation.equals("promoteAll")) {
sandBoxService.promoteAllSandBoxItems(currentSandBox, comment);
} else if (operation.equals("promoteSelected")) {
List<SandBoxItem> items = retrieveSandBoxItems(targets, dynamicEntityDao, currentSandBox);
sandBoxService.promoteSelectedItems(currentSandBox, comment, items);
} else if (operation.equals("revertRejectAll")) {
if (moduleKey.equals("userSandBox")) {
sandBoxService.revertAllSandBoxItems(originalSandBox, currentSandBox);
} else {
sandBoxService.rejectAllSandBoxItems(originalSandBox, currentSandBox, comment);
}
} else if (operation.equals("revertRejectSelected")) {
List<SandBoxItem> items = retrieveSandBoxItems(targets, dynamicEntityDao, currentSandBox);
if (moduleKey.equals("userSandBox")) {
sandBoxService.revertSelectedSandBoxItems(currentSandBox, items);
} else {
sandBoxService.rejectSelectedSandBoxItems(currentSandBox, comment, items);
}
}
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
Map<String, FieldMetadata> originalProps = helper.getSimpleMergedProperties(SandBoxItem.class.getName(), persistencePerspective);
cto.get("sandBoxId").setFilterValue(currentSandBox.getId().toString());
cto.get("archivedFlag").setFilterValue(Boolean.FALSE.toString());
List<FilterMapping> filterMappings = helper.getFilterMappings(persistencePerspective, cto, SandBoxItem.class.getName(), originalProps);
//declare SandBoxItemImpl explicitly, as we do not want to retrieve other polymorphic types (e.g. WorkflowSandBoxItemImpl)
List<Serializable> records = helper.getPersistentRecords(SandBoxItem.class.getName(), filterMappings, cto.getFirstResult(), cto.getMaxResults());
Entity[] results = helper.getRecords(originalProps, records);
int totalRecords = helper.getTotalRecords(SandBoxItem.class.getName(), filterMappings);
DynamicResultSet response = new DynamicResultSet(results, totalRecords);
return response;
} catch (Exception e) {
throw new ServiceException("Unable to execute persistence activity for entity: "+ceilingEntityFullyQualifiedClassname, e);
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_admin_server_handler_SandBoxItemCustomPersistenceHandler.java
|
863 |
public class MoveFileRefactoringParticipant extends MoveParticipant {
private IFile file;
private static Map<String,TextFileChange> fileChanges =
new HashMap<String,TextFileChange>();
private static List<IResource> movingFiles =
new ArrayList<IResource>();
@Override
protected boolean initialize(Object element) {
file = (IFile) element;
if (getProcessor() instanceof MoveProcessor) {
MoveProcessor moveProcessor = (MoveProcessor) getProcessor();
movingFiles.addAll(Arrays.asList((IResource[]) moveProcessor.getElements()));
return getProjectTypeChecker(file.getProject())!=null &&
file.getFileExtension()!=null &&
(file.getFileExtension().equals("ceylon") ||
file.getFileExtension().equals("java"));
}
else {
return false;
}
}
@Override
public String getName() {
return "Move file participant for Ceylon source";
}
@Override
public RefactoringStatus checkConditions(IProgressMonitor pm,
CheckConditionsContext context)
throws OperationCanceledException {
return new RefactoringStatus();
}
@Override
public Change createChange(IProgressMonitor pm)
throws CoreException, OperationCanceledException {
return null;
}
@Override
public Change createPreChange(IProgressMonitor pm)
throws CoreException, OperationCanceledException {
try {
IProject project = file.getProject();
IFolder folder = (IFolder) getArguments().getDestination();
String newName = folder.getProjectRelativePath()
.removeFirstSegments(1)
.toPortableString()
.replace('/', '.');
String movedRelFilePath = file.getProjectRelativePath()
.removeFirstSegments(1)
.toPortableString();
String movedRelPath = file.getParent()
.getProjectRelativePath()
.removeFirstSegments(1)
.toPortableString();
String oldName = movedRelPath.replace('/', '.');
List<Change> changes = new ArrayList<Change>();
if (file.getFileExtension().equals("java")) {
updateRefsToMovedJavaFile(project, newName, oldName, changes);
}
else {
PhasedUnit movedPhasedUnit =
getProjectTypeChecker(project)
.getPhasedUnitFromRelativePath(movedRelFilePath);
if (movedPhasedUnit==null) {
return null;
}
List<Declaration> declarations =
movedPhasedUnit.getDeclarations();
if (newName.equals(oldName)) return null;
updateRefsFromMovedCeylonFile(project, newName, oldName, changes,
movedPhasedUnit, declarations);
updateRefsToMovedCeylonFile(project, newName, oldName, changes,
movedPhasedUnit, declarations);
}
if (changes.isEmpty())
return null;
CompositeChange result =
new CompositeChange("Ceylon source changes") {
@Override
public Change perform(IProgressMonitor pm)
throws CoreException {
fileChanges.clear();
movingFiles.clear();
return super.perform(pm);
}
};
for (Change change: changes) {
result.add(change);
}
return result;
}
catch (Exception e) {
e.printStackTrace();
return null;
}
}
protected void updateRefsFromMovedCeylonFile(final IProject project,
final String newName, final String oldName,
final List<Change> changes, final PhasedUnit movedPhasedUnit,
final List<Declaration> declarations) {
final Map<Declaration,String> imports =
new HashMap<Declaration,String>();
movedPhasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedMemberOrTypeExpression that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclaration());
// }
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedType that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclarationModel());
// }
protected void visitIt(Tree.Identifier id, Declaration dec) {
if (dec!=null && !declarations.contains(dec)) {
Unit unit = dec.getUnit();
if (unit instanceof ProjectSourceFile &&
movingFiles.contains(((ProjectSourceFile) unit).getFileResource())) {
//also moving
}
else if (unit.getPackage().equals(movedPhasedUnit.getPackage())) {
imports.put(dec, id.getText());
}
}
}
//TODO: DocLinks!!
});
collectEditsToMovedFile(newName, oldName, changes,
movedPhasedUnit, imports);
}
protected void updateRefsToMovedCeylonFile(final IProject project,
final String newName, final String oldName,
final List<Change> changes, PhasedUnit movedPhasedUnit,
final List<Declaration> declarations) {
if (!getArguments().getUpdateReferences()) return;
for (PhasedUnit phasedUnit: getProjectTypeChecker(project)
.getPhasedUnits().getPhasedUnits()) {
if (phasedUnit==movedPhasedUnit ||
phasedUnit.getUnit() instanceof ProjectSourceFile &&
movingFiles.contains(((ProjectSourceFile) phasedUnit.getUnit()).getFileResource())) {
continue;
}
final Map<Declaration,String> imports =
new HashMap<Declaration,String>();
phasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedMemberOrTypeExpression that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclaration());
// }
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedType that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclarationModel());
// }
protected void visitIt(Tree.Identifier id, Declaration dec) {
if (dec!=null && declarations.contains(dec)) {
imports.put(dec, id.getText());
}
}
//TODO: DocLinks!!
});
collectEdits(newName, oldName, changes, phasedUnit, imports);
}
}
protected void updateRefsToMovedJavaFile(final IProject project,
final String newName, final String oldName,
final List<Change> changes) throws JavaModelException {
if (!getArguments().getUpdateReferences()) return;
ICompilationUnit jcu = (ICompilationUnit) JavaCore.create(file);
final IType[] types = jcu.getTypes();
TypeChecker tc = getProjectTypeChecker(project);
if (tc==null) return;
for (PhasedUnit phasedUnit: tc.getPhasedUnits().getPhasedUnits()) {
final Map<Declaration,String> imports =
new HashMap<Declaration,String>();
phasedUnit.getCompilationUnit().visit(new Visitor() {
@Override
public void visit(ImportMemberOrType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedMemberOrTypeExpression that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclaration());
// }
@Override
public void visit(BaseMemberOrTypeExpression that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclaration());
}
@Override
public void visit(BaseType that) {
super.visit(that);
visitIt(that.getIdentifier(),
that.getDeclarationModel());
}
// @Override
// public void visit(QualifiedType that) {
// super.visit(that);
// visitIt(that.getIdentifier(), that.getDeclarationModel());
// }
protected void visitIt(Tree.Identifier id, Declaration dec) {
for (IType type: types) {
if (dec!=null && dec.getQualifiedNameString()
.equals(getQualifiedName(type))) {
imports.put(dec, id.getText());
}
}
}
protected String getQualifiedName(IMember dec) {
IJavaElement parent = dec.getParent();
if (parent instanceof ICompilationUnit) {
return parent.getParent().getElementName() + "::" +
dec.getElementName();
}
else if (dec.getDeclaringType()!=null) {
return getQualifiedName(dec.getDeclaringType()) + "." +
dec.getElementName();
}
else {
return "@";
}
}
});
collectEdits(newName, oldName, changes, phasedUnit, imports);
}
}
private void collectEditsToMovedFile(String newName,
String oldName, List<Change> changes,
PhasedUnit movedPhasedUnit,
Map<Declaration, String> imports) {
try {
IFileVirtualFile virtualFile =
(IFileVirtualFile) movedPhasedUnit.getUnitFile();
IFile file = virtualFile.getFile();
String path = file.getProjectRelativePath().toPortableString();
TextFileChange change = fileChanges.get(path);
if (change==null) {
change = new TextFileChange(file.getName(), file);
change.setEdit(new MultiTextEdit());
changes.add(change);
fileChanges.put(path, change);
}
Tree.CompilationUnit cu =
movedPhasedUnit.getCompilationUnit();
if (!imports.isEmpty()) {
List<InsertEdit> edits = importEdits(cu,
imports.keySet(), imports.values(), null,
EditorUtil.getDocument(change));
for (TextEdit edit: edits) {
change.addEdit(edit);
}
}
Tree.Import toDelete = findImportNode(cu, newName);
if (toDelete!=null) {
change.addEdit(new DeleteEdit(toDelete.getStartIndex(),
toDelete.getStopIndex()-toDelete.getStartIndex()+1));
}
}
catch (Exception e) {
e.printStackTrace();
}
}
private void collectEdits(String newName,
String oldName, List<Change> changes,
PhasedUnit phasedUnit,
Map<Declaration, String> imports) {
try {
Tree.CompilationUnit cu =
phasedUnit.getCompilationUnit();
if (!imports.isEmpty()) {
IFileVirtualFile virtualFile =
(IFileVirtualFile) phasedUnit.getUnitFile();
IFile file = virtualFile.getFile();
String path = file.getProjectRelativePath().toPortableString();
TextFileChange change = fileChanges.get(path);
if (change==null) {
change = new TextFileChange(file.getName(), file);
change.setEdit(new MultiTextEdit());
changes.add(change);
fileChanges.put(path, change);
}
List<TextEdit> edits =
importEditForMove(cu,
imports.keySet(), imports.values(),
newName, oldName,
EditorUtil.getDocument(change));
if (!edits.isEmpty()) {
for (TextEdit edit: edits) {
change.addEdit(edit);
}
}
}
}
catch (Exception e) {
e.printStackTrace();
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_refactor_MoveFileRefactoringParticipant.java
|
355 |
future.andThen(new ExecutionCallback<Map<String, List<Integer>>>() {
@Override
public void onResponse(Map<String, List<Integer>> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
351 |
@SuppressWarnings("unchecked")
public abstract class ODatabaseWrapperAbstract<DB extends ODatabase> implements ODatabase {
protected DB underlying;
protected ODatabaseComplex<?> databaseOwner;
public ODatabaseWrapperAbstract(final DB iDatabase) {
underlying = iDatabase;
databaseOwner = (ODatabaseComplex<?>) this;
}
@Override
public void finalize() {
// close();
}
public <THISDB extends ODatabase> THISDB open(final String iUserName, final String iUserPassword) {
underlying.open(iUserName, iUserPassword);
Orient.instance().getDatabaseFactory().register(databaseOwner);
return (THISDB) this;
}
public <THISDB extends ODatabase> THISDB create() {
underlying.create();
Orient.instance().getDatabaseFactory().register(databaseOwner);
return (THISDB) this;
}
public boolean exists() {
return underlying.exists();
}
public void reload() {
underlying.reload();
}
@Override
public void backup(OutputStream out, Map<String, Object> options, Callable<Object> callable) throws IOException {
underlying.backup(out, options, callable);
}
@Override
public void restore(InputStream in, Map<String, Object> options, Callable<Object> callable) throws IOException {
underlying.restore(in, options, callable);
}
public void close() {
underlying.close();
Orient.instance().getDatabaseFactory().unregister(databaseOwner);
}
public void replaceStorage(OStorage iNewStorage) {
underlying.replaceStorage(iNewStorage);
}
public void drop() {
underlying.drop();
Orient.instance().getDatabaseFactory().unregister(databaseOwner);
}
public STATUS getStatus() {
return underlying.getStatus();
}
public <THISDB extends ODatabase> THISDB setStatus(final STATUS iStatus) {
underlying.setStatus(iStatus);
return (THISDB) this;
}
public String getName() {
return underlying.getName();
}
public String getURL() {
return underlying.getURL();
}
public OStorage getStorage() {
return underlying.getStorage();
}
public OLevel1RecordCache getLevel1Cache() {
return underlying.getLevel1Cache();
}
public OLevel2RecordCache getLevel2Cache() {
return getStorage().getLevel2Cache();
}
public boolean isClosed() {
return underlying.isClosed();
}
public long countClusterElements(final int iClusterId) {
checkOpeness();
return underlying.countClusterElements(iClusterId);
}
public long countClusterElements(final int[] iClusterIds) {
checkOpeness();
return underlying.countClusterElements(iClusterIds);
}
public long countClusterElements(final String iClusterName) {
checkOpeness();
return underlying.countClusterElements(iClusterName);
}
@Override
public long countClusterElements(int iClusterId, boolean countTombstones) {
checkOpeness();
return underlying.countClusterElements(iClusterId, countTombstones);
}
@Override
public long countClusterElements(int[] iClusterIds, boolean countTombstones) {
checkOpeness();
return underlying.countClusterElements(iClusterIds, countTombstones);
}
public int getClusters() {
checkOpeness();
return underlying.getClusters();
}
public boolean existsCluster(String iClusterName) {
checkOpeness();
return underlying.existsCluster(iClusterName);
}
public Collection<String> getClusterNames() {
checkOpeness();
return underlying.getClusterNames();
}
public String getClusterType(final String iClusterName) {
checkOpeness();
return underlying.getClusterType(iClusterName);
}
public int getDataSegmentIdByName(final String iDataSegmentName) {
checkOpeness();
return underlying.getDataSegmentIdByName(iDataSegmentName);
}
public String getDataSegmentNameById(final int iDataSegmentId) {
checkOpeness();
return underlying.getDataSegmentNameById(iDataSegmentId);
}
public int getClusterIdByName(final String iClusterName) {
checkOpeness();
return underlying.getClusterIdByName(iClusterName);
}
public String getClusterNameById(final int iClusterId) {
checkOpeness();
return underlying.getClusterNameById(iClusterId);
}
public long getClusterRecordSizeById(int iClusterId) {
return underlying.getClusterRecordSizeById(iClusterId);
}
public long getClusterRecordSizeByName(String iClusterName) {
return underlying.getClusterRecordSizeByName(iClusterName);
}
public int addCluster(final String iType, final String iClusterName, final String iLocation, final String iDataSegmentName,
final Object... iParameters) {
checkOpeness();
return underlying.addCluster(iType, iClusterName, iLocation, iDataSegmentName, iParameters);
}
public int addCluster(String iType, String iClusterName, int iRequestedId, String iLocation, String iDataSegmentName,
Object... iParameters) {
return underlying.addCluster(iType, iClusterName, iRequestedId, iLocation, iDataSegmentName, iParameters);
}
public int addCluster(final String iClusterName, final CLUSTER_TYPE iType, final Object... iParameters) {
checkOpeness();
return underlying.addCluster(iType.toString(), iClusterName, null, null, iParameters);
}
public int addCluster(String iClusterName, CLUSTER_TYPE iType) {
checkOpeness();
return underlying.addCluster(iType.toString(), iClusterName, null, null);
}
public boolean dropDataSegment(final String name) {
return underlying.dropDataSegment(name);
}
public boolean dropCluster(final String iClusterName, final boolean iTruncate) {
getLevel1Cache().freeCluster(getClusterIdByName(iClusterName));
return underlying.dropCluster(iClusterName, true);
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
getLevel1Cache().freeCluster(iClusterId);
return underlying.dropCluster(iClusterId, true);
}
public int addDataSegment(final String iSegmentName, final String iLocation) {
checkOpeness();
return underlying.addDataSegment(iSegmentName, iLocation);
}
public int getDefaultClusterId() {
checkOpeness();
return underlying.getDefaultClusterId();
}
public boolean declareIntent(final OIntent iIntent) {
checkOpeness();
return underlying.declareIntent(iIntent);
}
public <DBTYPE extends ODatabase> DBTYPE getUnderlying() {
return (DBTYPE) underlying;
}
public ODatabaseComplex<?> getDatabaseOwner() {
return databaseOwner;
}
public ODatabaseComplex<?> setDatabaseOwner(final ODatabaseComplex<?> iOwner) {
databaseOwner = iOwner;
return (ODatabaseComplex<?>) this;
}
@Override
public boolean equals(final Object iOther) {
if (!(iOther instanceof ODatabase))
return false;
final ODatabase other = (ODatabase) iOther;
return other.getName().equals(getName());
}
@Override
public String toString() {
return underlying.toString();
}
public Object setProperty(final String iName, final Object iValue) {
return underlying.setProperty(iName, iValue);
}
public Object getProperty(final String iName) {
return underlying.getProperty(iName);
}
public Iterator<Entry<String, Object>> getProperties() {
return underlying.getProperties();
}
public Object get(final ATTRIBUTES iAttribute) {
return underlying.get(iAttribute);
}
public <THISDB extends ODatabase> THISDB set(final ATTRIBUTES attribute, final Object iValue) {
return (THISDB) underlying.set(attribute, iValue);
}
public void registerListener(final ODatabaseListener iListener) {
underlying.registerListener(iListener);
}
public void unregisterListener(final ODatabaseListener iListener) {
underlying.unregisterListener(iListener);
}
public <V> V callInLock(final Callable<V> iCallable, final boolean iExclusiveLock) {
return getStorage().callInLock(iCallable, iExclusiveLock);
}
@Override
public <V> V callInRecordLock(Callable<V> iCallable, ORID rid, boolean iExclusiveLock) {
return underlying.callInRecordLock(iCallable, rid, iExclusiveLock);
}
@Override
public ORecordMetadata getRecordMetadata(ORID rid) {
return underlying.getRecordMetadata(rid);
}
public long getSize() {
return underlying.getSize();
}
protected void checkOpeness() {
if (isClosed())
throw new ODatabaseException("Database '" + getURL() + "' is closed");
}
public void freeze(boolean throwException) {
underlying.freeze(throwException);
}
public void freeze() {
underlying.freeze();
}
public void release() {
underlying.release();
}
@Override
public void freezeCluster(int iClusterId, boolean throwException) {
underlying.freezeCluster(iClusterId, throwException);
}
@Override
public void freezeCluster(int iClusterId) {
underlying.freezeCluster(iClusterId);
}
@Override
public void releaseCluster(int iClusterId) {
underlying.releaseCluster(iClusterId);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_ODatabaseWrapperAbstract.java
|
83 |
class ChangeTypeProposal extends CorrectionProposal {
ChangeTypeProposal(ProblemLocation problem,
String name, String type, int offset,
TextFileChange change) {
super("Change type of "+ name + " to '" + type + "'",
change, new Region(offset, type.length()));
}
static void addChangeTypeProposal(Node node, ProblemLocation problem,
Collection<ICompletionProposal> proposals, Declaration dec,
ProducedType newType, IFile file, Tree.CompilationUnit cu) {
if (node.getStartIndex() == null || node.getStopIndex() == null) {
return;
}
if (newType.isNothing()) {
return;
}
TextFileChange change = new TextFileChange("Change Type", file);
change.setEdit(new MultiTextEdit());
IDocument doc = EditorUtil.getDocument(change);
String typeName = newType.getProducedTypeName(cu.getUnit());
int offset = node.getStartIndex();
int length = node.getStopIndex()-offset+1;
HashSet<Declaration> decs = new HashSet<Declaration>();
importType(decs, newType, cu);
int il=applyImports(change, decs, cu, doc);
change.addEdit(new ReplaceEdit(offset, length, typeName));
String name;
if (dec.isParameter()) {
name = "parameter '" + dec.getName() + "' of '" +
((Declaration) dec.getContainer()).getName() + "'";
}
else if (dec.isClassOrInterfaceMember()) {
name = "member '" + dec.getName() + "' of '" +
((ClassOrInterface) dec.getContainer()).getName() + "'";
}
else {
name = "'" + dec.getName() + "'";
}
proposals.add(new ChangeTypeProposal(problem, name,
typeName, offset+il, change));
}
static void addChangeTypeArgProposals(Tree.CompilationUnit cu, Node node,
ProblemLocation problem, Collection<ICompletionProposal> proposals,
IProject project) {
if (node instanceof Tree.SimpleType) {
TypeDeclaration decl = ((Tree.SimpleType) node).getDeclarationModel();
if (decl instanceof TypeParameter) {
Tree.Statement statement = findStatement(cu, node);
if (statement instanceof Tree.TypedDeclaration) {
Tree.TypedDeclaration ad = (Tree.TypedDeclaration) statement;
if (ad.getType() instanceof Tree.SimpleType) {
Tree.SimpleType st = (Tree.SimpleType) ad.getType();
TypeParameter stTypeParam = null;
if (st.getTypeArgumentList() != null) {
List<Tree.Type> stTypeArguments =
st.getTypeArgumentList().getTypes();
for (int i=0; i<stTypeArguments.size(); i++) {
Tree.SimpleType stTypeArgument =
(Tree.SimpleType) stTypeArguments.get(i);
if (decl.getName().equals(
stTypeArgument.getDeclarationModel().getName())) {
TypeDeclaration stDecl = st.getDeclarationModel();
if (stDecl != null) {
if (stDecl.getTypeParameters()!=null &&
stDecl.getTypeParameters().size()>i) {
stTypeParam = stDecl.getTypeParameters().get(i);
break;
}
}
}
}
}
if (stTypeParam != null &&
!stTypeParam.getSatisfiedTypes().isEmpty()) {
IntersectionType it = new IntersectionType(cu.getUnit());
it.setSatisfiedTypes(stTypeParam.getSatisfiedTypes());
addChangeTypeProposals(proposals, problem, project, node,
it.canonicalize().getType(), decl, true);
}
}
}
}
}
}
static void addChangeTypeProposals(Tree.CompilationUnit cu, Node node,
ProblemLocation problem, Collection<ICompletionProposal> proposals,
IProject project) {
if (node instanceof Tree.SpecifierExpression) {
Tree.Expression e = ((Tree.SpecifierExpression) node).getExpression();
if (e!=null) {
node = e.getTerm();
}
}
if (node instanceof Tree.Expression) {
node = ((Tree.Expression) node).getTerm();
}
if (node instanceof Tree.Term) {
ProducedType t = ((Tree.Term) node).getTypeModel();
if (t==null) return;
ProducedType type = node.getUnit().denotableType(t);
FindInvocationVisitor fav = new FindInvocationVisitor(node);
fav.visit(cu);
TypedDeclaration td = fav.parameter;
if (td!=null) {
if (node instanceof Tree.InvocationExpression) {
node = ((Tree.InvocationExpression) node).getPrimary();
}
if (node instanceof Tree.BaseMemberExpression) {
TypedDeclaration d = (TypedDeclaration)
((Tree.BaseMemberExpression) node).getDeclaration();
addChangeTypeProposals(proposals, problem, project, node,
td.getType(), d, true);
}
if (node instanceof Tree.QualifiedMemberExpression){
TypedDeclaration d = (TypedDeclaration)
((Tree.QualifiedMemberExpression) node).getDeclaration();
addChangeTypeProposals(proposals, problem, project, node,
td.getType(), d, true);
}
addChangeTypeProposals(proposals, problem, project,
node, type, td, false);
}
}
}
private static void addChangeTypeProposals(Collection<ICompletionProposal> proposals,
ProblemLocation problem, IProject project, Node node, ProducedType type,
Declaration dec, boolean intersect) {
if (dec!=null) {
for (PhasedUnit unit: getUnits(project)) {
if (dec.getUnit().equals(unit.getUnit())) {
ProducedType t = null;
Node typeNode = null;
if (dec instanceof TypeParameter) {
t = ((TypeParameter) dec).getType();
typeNode = node;
}
if (dec instanceof TypedDeclaration) {
TypedDeclaration typedDec = (TypedDeclaration) dec;
FindDeclarationNodeVisitor fdv =
new FindDeclarationNodeVisitor(typedDec);
getRootNode(unit).visit(fdv);
Tree.TypedDeclaration decNode =
(Tree.TypedDeclaration) fdv.getDeclarationNode();
if (decNode!=null) {
typeNode = decNode.getType();
if (typeNode!=null) {
t= ((Tree.Type) typeNode).getTypeModel();
}
}
}
//TODO: fix this condition to properly distinguish
// between a method reference and an invocation
if (dec instanceof Method &&
node.getUnit().isCallableType(type)) {
type = node.getUnit().getCallableReturnType(type);
}
if (typeNode != null && !isTypeUnknown(type)) {
addChangeTypeProposal(typeNode, problem, proposals, dec,
type, getFile(unit), unit.getCompilationUnit());
if (t != null) {
ProducedType newType = intersect ?
intersectionType(t, type, unit.getUnit()) :
unionType(t, type, unit.getUnit());
if (!newType.isExactly(t)) {
addChangeTypeProposal(typeNode, problem,
proposals, dec, newType, getFile(unit),
unit.getCompilationUnit());
}
}
}
}
}
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ChangeTypeProposal.java
|
1,369 |
public class OTransactionIndexChanges {
public static enum OPERATION {
PUT, REMOVE, CLEAR
}
public NavigableMap<Object, OTransactionIndexChangesPerKey> changesPerKey = new TreeMap<Object, OTransactionIndexChangesPerKey>(
ODefaultComparator.INSTANCE);
public boolean cleared = false;
public OTransactionIndexChangesPerKey getChangesPerKey(final Object iKey) {
OTransactionIndexChangesPerKey changes = changesPerKey.get(iKey);
if (changes == null) {
changes = new OTransactionIndexChangesPerKey(iKey);
changesPerKey.put(iKey, changes);
}
return changes;
}
public Collection<OTransactionIndexChangesPerKey> getChangesForKeys(final Object firstKey, final Object lastKey) {
return changesPerKey.subMap(firstKey, lastKey).values();
}
public void setCleared() {
changesPerKey.clear();
cleared = true;
}
public boolean containsChangesPerKey(final Object iKey) {
return changesPerKey.containsKey(iKey);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionIndexChanges.java
|
8 |
.setImplementation(new EntryAdapter() {
@Override
public void entryEvicted(EntryEvent event) {
latch.countDown();
}
}));
| 0true
|
hazelcast_src_test_java_com_hazelcast_ascii_RestTest.java
|
618 |
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
|
3,389 |
public class PagedBytesEstimator implements PerValueEstimator {
private final AtomicReaderContext context;
private final MemoryCircuitBreaker breaker;
private long estimatedBytes;
PagedBytesEstimator(AtomicReaderContext context, MemoryCircuitBreaker breaker) {
this.breaker = breaker;
this.context = context;
}
/**
* @return the number of bytes for the term based on the length and ordinal overhead
*/
public long bytesPerValue(BytesRef term) {
long bytes = term.length;
// 64 bytes for miscellaneous overhead
bytes += 64;
// Seems to be about a 1.5x compression per term/ord, plus 1 for some wiggle room
bytes = (long) ((double) bytes / 1.5) + 1;
return bytes;
}
/**
* @return the estimate for loading the entire term set into field data, or 0 if unavailable
*/
public long estimateStringFieldData() {
try {
AtomicReader reader = context.reader();
Terms terms = reader.terms(getFieldNames().indexName());
Fields fields = reader.fields();
final Terms fieldTerms = fields.terms(getFieldNames().indexName());
if (fieldTerms instanceof BlockTreeTermsReader.FieldReader) {
final BlockTreeTermsReader.Stats stats = ((BlockTreeTermsReader.FieldReader) fieldTerms).computeStats();
long totalTermBytes = stats.totalTermBytes;
if (logger.isTraceEnabled()) {
logger.trace("totalTermBytes: {}, terms.size(): {}, terms.getSumDocFreq(): {}",
totalTermBytes, terms.size(), terms.getSumDocFreq());
}
long totalBytes = totalTermBytes + (2 * terms.size()) + (4 * terms.getSumDocFreq());
return totalBytes;
}
} catch (Exception e) {
logger.warn("Unable to estimate memory overhead", e);
}
return 0;
}
/**
* Determine whether the BlockTreeTermsReader.FieldReader can be used
* for estimating the field data, adding the estimate to the circuit
* breaker if it can, otherwise wrapping the terms in a
* RamAccountingTermsEnum to be estimated on a per-term basis.
*
* @param terms terms to be estimated
* @return A possibly wrapped TermsEnum for the terms
* @throws IOException
*/
public TermsEnum beforeLoad(Terms terms) throws IOException {
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
FilterSettingFields.ACCEPTABLE_TRANSIENT_OVERHEAD_RATIO,
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
AtomicReader reader = context.reader();
// Check if one of the following is present:
// - The OrdinalsBuilder overhead has been tweaked away from the default
// - A field data filter is present
// - A regex filter is present
if (acceptableTransientOverheadRatio != OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN, 0d) != 0d ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MAX, 0d) != 0d ||
fieldDataType.getSettings().getAsDouble(FilterSettingFields.FREQUENCY_MIN_SEGMENT_SIZE, 0d) != 0d ||
fieldDataType.getSettings().get(FilterSettingFields.REGEX_PATTERN) != null) {
if (logger.isTraceEnabled()) {
logger.trace("Filter exists, can't circuit break normally, using RamAccountingTermsEnum");
}
return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
} else {
estimatedBytes = this.estimateStringFieldData();
// If we weren't able to estimate, wrap in the RamAccountingTermsEnum
if (estimatedBytes == 0) {
return new RamAccountingTermsEnum(filter(terms, reader), breaker, this);
}
breaker.addEstimateBytesAndMaybeBreak(estimatedBytes);
return filter(terms, reader);
}
}
/**
* Adjust the circuit breaker now that terms have been loaded, getting
* the actual used either from the parameter (if estimation worked for
* the entire set), or from the TermsEnum if it has been wrapped in a
* RamAccountingTermsEnum.
*
* @param termsEnum terms that were loaded
* @param actualUsed actual field data memory usage
*/
public void afterLoad(TermsEnum termsEnum, long actualUsed) {
if (termsEnum instanceof RamAccountingTermsEnum) {
estimatedBytes = ((RamAccountingTermsEnum) termsEnum).getTotalBytes();
}
breaker.addWithoutBreaking(-(estimatedBytes - actualUsed));
}
/**
* Adjust the breaker when no terms were actually loaded, but the field
* data takes up space regardless. For instance, when ordinals are
* used.
* @param actualUsed bytes actually used
*/
public void adjustForNoTerms(long actualUsed) {
breaker.addWithoutBreaking(actualUsed);
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_fielddata_plain_PagedBytesIndexFieldData.java
|
2,933 |
public class MultiResultSet extends AbstractSet<QueryableEntry> {
private Set<Object> index;
private final List<ConcurrentMap<Data, QueryableEntry>> resultSets
= new ArrayList<ConcurrentMap<Data, QueryableEntry>>();
public MultiResultSet() {
}
public void addResultSet(ConcurrentMap<Data, QueryableEntry> resultSet) {
resultSets.add(resultSet);
}
@Override
public boolean contains(Object o) {
QueryableEntry entry = (QueryableEntry) o;
if (index != null) {
return checkFromIndex(entry);
} else {
//todo: what is the point of this condition? Is it some kind of optimization?
if (resultSets.size() > 3) {
index = new HashSet<Object>();
for (ConcurrentMap<Data, QueryableEntry> result : resultSets) {
for (QueryableEntry queryableEntry : result.values()) {
index.add(queryableEntry.getIndexKey());
}
}
return checkFromIndex(entry);
} else {
for (ConcurrentMap<Data, QueryableEntry> resultSet : resultSets) {
if (resultSet.containsKey(entry.getIndexKey())) {
return true;
}
}
return false;
}
}
}
private boolean checkFromIndex(QueryableEntry entry) {
return index.contains(entry.getIndexKey());
}
@Override
public Iterator<QueryableEntry> iterator() {
return new It();
}
class It implements Iterator<QueryableEntry> {
int currentIndex;
Iterator<QueryableEntry> currentIterator;
@Override
public boolean hasNext() {
if (resultSets.size() == 0) {
return false;
}
if (currentIterator != null && currentIterator.hasNext()) {
return true;
}
while (currentIndex < resultSets.size()) {
currentIterator = resultSets.get(currentIndex++).values().iterator();
if (currentIterator.hasNext()) {
return true;
}
}
return false;
}
@Override
public QueryableEntry next() {
if (resultSets.size() == 0) {
return null;
}
return currentIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
@Override
public boolean add(QueryableEntry obj) {
throw new UnsupportedOperationException();
}
@Override
public int size() {
int size = 0;
for (ConcurrentMap<Data, QueryableEntry> resultSet : resultSets) {
size += resultSet.size();
}
return size;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_impl_MultiResultSet.java
|
115 |
{
@Override
public Object doWork( Void state )
{
try
{
tm.commit();
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
return null;
}
};
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestJtaCompliance.java
|
3,772 |
public class TieredMergePolicyProvider extends AbstractMergePolicyProvider<TieredMergePolicy> {
private final IndexSettingsService indexSettingsService;
private final Set<CustomTieredMergePolicyProvider> policies = new CopyOnWriteArraySet<CustomTieredMergePolicyProvider>();
private volatile double forceMergeDeletesPctAllowed;
private volatile ByteSizeValue floorSegment;
private volatile int maxMergeAtOnce;
private volatile int maxMergeAtOnceExplicit;
private volatile ByteSizeValue maxMergedSegment;
private volatile double segmentsPerTier;
private volatile double reclaimDeletesWeight;
private boolean asyncMerge;
private final ApplySettings applySettings = new ApplySettings();
@Inject
public TieredMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
super(store);
this.indexSettingsService = indexSettingsService;
this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
this.forceMergeDeletesPctAllowed = componentSettings.getAsDouble("expunge_deletes_allowed", 10d); // percentage
this.floorSegment = componentSettings.getAsBytesSize("floor_segment", new ByteSizeValue(2, ByteSizeUnit.MB));
this.maxMergeAtOnce = componentSettings.getAsInt("max_merge_at_once", 10);
this.maxMergeAtOnceExplicit = componentSettings.getAsInt("max_merge_at_once_explicit", 30);
// TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
this.maxMergedSegment = componentSettings.getAsBytesSize("max_merged_segment", componentSettings.getAsBytesSize("max_merge_segment", new ByteSizeValue(5, ByteSizeUnit.GB)));
this.segmentsPerTier = componentSettings.getAsDouble("segments_per_tier", 10.0d);
this.reclaimDeletesWeight = componentSettings.getAsDouble("reclaim_deletes_weight", 2.0d);
fixSettingsIfNeeded();
logger.debug("using [tiered] merge policy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}], async_merge[{}]",
forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight, asyncMerge);
indexSettingsService.addListener(applySettings);
}
private void fixSettingsIfNeeded() {
// fixing maxMergeAtOnce, see TieredMergePolicy#setMaxMergeAtOnce
if (!(segmentsPerTier >= maxMergeAtOnce)) {
int newMaxMergeAtOnce = (int) segmentsPerTier;
// max merge at once should be at least 2
if (newMaxMergeAtOnce <= 1) {
newMaxMergeAtOnce = 2;
}
logger.debug("[tiered] merge policy changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or equal to it", maxMergeAtOnce, newMaxMergeAtOnce, segmentsPerTier);
this.maxMergeAtOnce = newMaxMergeAtOnce;
}
}
@Override
public TieredMergePolicy newMergePolicy() {
CustomTieredMergePolicyProvider mergePolicy;
if (asyncMerge) {
mergePolicy = new EnableMergeTieredMergePolicyProvider(this);
} else {
mergePolicy = new CustomTieredMergePolicyProvider(this);
}
mergePolicy.setNoCFSRatio(noCFSRatio);
mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
mergePolicy.setSegmentsPerTier(segmentsPerTier);
mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
return mergePolicy;
}
@Override
public void close() throws ElasticsearchException {
indexSettingsService.removeListener(applySettings);
}
public static final String INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED = "index.merge.policy.expunge_deletes_allowed";
public static final String INDEX_MERGE_POLICY_FLOOR_SEGMENT = "index.merge.policy.floor_segment";
public static final String INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE = "index.merge.policy.max_merge_at_once";
public static final String INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT = "index.merge.policy.max_merge_at_once_explicit";
public static final String INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT = "index.merge.policy.max_merged_segment";
public static final String INDEX_MERGE_POLICY_SEGMENTS_PER_TIER = "index.merge.policy.segments_per_tier";
public static final String INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT = "index.merge.policy.reclaim_deletes_weight";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
double expungeDeletesPctAllowed = settings.getAsDouble(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed);
if (expungeDeletesPctAllowed != TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed) {
logger.info("updating [expunge_deletes_allowed] from [{}] to [{}]", TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed, expungeDeletesPctAllowed);
TieredMergePolicyProvider.this.forceMergeDeletesPctAllowed = expungeDeletesPctAllowed;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setForceMergeDeletesPctAllowed(expungeDeletesPctAllowed);
}
}
ByteSizeValue floorSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_FLOOR_SEGMENT, TieredMergePolicyProvider.this.floorSegment);
if (!floorSegment.equals(TieredMergePolicyProvider.this.floorSegment)) {
logger.info("updating [floor_segment] from [{}] to [{}]", TieredMergePolicyProvider.this.floorSegment, floorSegment);
TieredMergePolicyProvider.this.floorSegment = floorSegment;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setFloorSegmentMB(floorSegment.mbFrac());
}
}
int maxMergeAtOnce = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, TieredMergePolicyProvider.this.maxMergeAtOnce);
if (maxMergeAtOnce != TieredMergePolicyProvider.this.maxMergeAtOnce) {
logger.info("updating [max_merge_at_once] from [{}] to [{}]", TieredMergePolicyProvider.this.maxMergeAtOnce, maxMergeAtOnce);
TieredMergePolicyProvider.this.maxMergeAtOnce = maxMergeAtOnce;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setMaxMergeAtOnce(maxMergeAtOnce);
}
}
int maxMergeAtOnceExplicit = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, TieredMergePolicyProvider.this.maxMergeAtOnceExplicit);
if (maxMergeAtOnceExplicit != TieredMergePolicyProvider.this.maxMergeAtOnceExplicit) {
logger.info("updating [max_merge_at_once_explicit] from [{}] to [{}]", TieredMergePolicyProvider.this.maxMergeAtOnceExplicit, maxMergeAtOnceExplicit);
TieredMergePolicyProvider.this.maxMergeAtOnceExplicit = maxMergeAtOnceExplicit;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
}
}
ByteSizeValue maxMergedSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, TieredMergePolicyProvider.this.maxMergedSegment);
if (!maxMergedSegment.equals(TieredMergePolicyProvider.this.maxMergedSegment)) {
logger.info("updating [max_merged_segment] from [{}] to [{}]", TieredMergePolicyProvider.this.maxMergedSegment, maxMergedSegment);
TieredMergePolicyProvider.this.maxMergedSegment = maxMergedSegment;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setFloorSegmentMB(maxMergedSegment.mbFrac());
}
}
double segmentsPerTier = settings.getAsDouble(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, TieredMergePolicyProvider.this.segmentsPerTier);
if (segmentsPerTier != TieredMergePolicyProvider.this.segmentsPerTier) {
logger.info("updating [segments_per_tier] from [{}] to [{}]", TieredMergePolicyProvider.this.segmentsPerTier, segmentsPerTier);
TieredMergePolicyProvider.this.segmentsPerTier = segmentsPerTier;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setSegmentsPerTier(segmentsPerTier);
}
}
double reclaimDeletesWeight = settings.getAsDouble(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, TieredMergePolicyProvider.this.reclaimDeletesWeight);
if (reclaimDeletesWeight != TieredMergePolicyProvider.this.reclaimDeletesWeight) {
logger.info("updating [reclaim_deletes_weight] from [{}] to [{}]", TieredMergePolicyProvider.this.reclaimDeletesWeight, reclaimDeletesWeight);
TieredMergePolicyProvider.this.reclaimDeletesWeight = reclaimDeletesWeight;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setReclaimDeletesWeight(reclaimDeletesWeight);
}
}
final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicyProvider.this.noCFSRatio)));
if (noCFSRatio != TieredMergePolicyProvider.this.noCFSRatio) {
logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(TieredMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
TieredMergePolicyProvider.this.noCFSRatio = noCFSRatio;
for (CustomTieredMergePolicyProvider policy : policies) {
policy.setNoCFSRatio(noCFSRatio);
}
}
fixSettingsIfNeeded();
}
}
public static class CustomTieredMergePolicyProvider extends TieredMergePolicy {
private final TieredMergePolicyProvider provider;
public CustomTieredMergePolicyProvider(TieredMergePolicyProvider provider) {
super();
this.provider = provider;
}
@Override
public void close() {
super.close();
provider.policies.remove(this);
}
@Override
public MergePolicy clone() {
// Lucene IW makes a clone internally but since we hold on to this instance
// the clone will just be the identity.
return this;
}
}
public static class EnableMergeTieredMergePolicyProvider extends CustomTieredMergePolicyProvider {
public EnableMergeTieredMergePolicyProvider(TieredMergePolicyProvider provider) {
super(provider);
}
@Override
public MergePolicy.MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException {
// we don't enable merges while indexing documents, we do them in the background
if (trigger == MergeTrigger.SEGMENT_FLUSH) {
return null;
}
return super.findMerges(trigger, infos);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_merge_policy_TieredMergePolicyProvider.java
|
256 |
public static interface Provider {
StoreRateLimiting rateLimiting();
}
| 0true
|
src_main_java_org_apache_lucene_store_StoreRateLimiting.java
|
256 |
public interface OCollateFactory {
/**
* @return Set of supported collate names of this factory
*/
Set<String> getNames();
/**
* Returns the requested collate
*
* @param name
*/
OCollate getCollate(String name);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_collate_OCollateFactory.java
|
681 |
public class OHashIndexFactory implements OIndexFactory {
private static final Set<String> TYPES;
static {
final Set<String> types = new HashSet<String>();
types.add(OClass.INDEX_TYPE.UNIQUE_HASH_INDEX.toString());
types.add(OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString());
types.add(OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString());
types.add(OClass.INDEX_TYPE.DICTIONARY_HASH_INDEX.toString());
TYPES = Collections.unmodifiableSet(types);
}
/**
* Index types :
* <ul>
* <li>UNIQUE</li>
* <li>NOTUNIQUE</li>
* <li>FULLTEXT</li>
* <li>DICTIONARY</li>
* </ul>
*/
public Set<String> getTypes() {
return TYPES;
}
public OIndexInternal<?> createIndex(ODatabaseRecord database, String indexType, String algorithm, String valueContainerAlgorithm)
throws OConfigurationException {
if (valueContainerAlgorithm == null) {
if (OClass.INDEX_TYPE.NOTUNIQUE.toString().equals(indexType)
|| OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString().equals(indexType)
|| OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString().equals(indexType)
|| OClass.INDEX_TYPE.FULLTEXT.toString().equals(indexType))
valueContainerAlgorithm = ODefaultIndexFactory.MVRBTREE_VALUE_CONTAINER;
else
valueContainerAlgorithm = ODefaultIndexFactory.NONE_VALUE_CONTAINER;
}
if ((database.getStorage().getType().equals(OEngineLocalPaginated.NAME) || database.getStorage().getType()
.equals(OEngineLocal.NAME))
&& valueContainerAlgorithm.equals(ODefaultIndexFactory.MVRBTREE_VALUE_CONTAINER)
&& OGlobalConfiguration.INDEX_NOTUNIQUE_USE_SBTREE_CONTAINER_BY_DEFAULT.getValueAsBoolean()) {
OLogManager
.instance()
.warn(
this,
"Index was created using %s as values container. "
+ "This container is deprecated and is not supported any more. To avoid this message please drop and recreate indexes or perform DB export/import.",
valueContainerAlgorithm);
}
OStorage storage = database.getStorage();
OIndexEngine indexEngine;
final String storageType = storage.getType();
if (storageType.equals("memory"))
indexEngine = new OMemoryHashMapIndexEngine();
else if (storageType.equals("local") || storageType.equals("plocal"))
indexEngine = new OLocalHashTableIndexEngine();
else if (storageType.equals("distributed"))
// DISTRIBUTED CASE: HANDLE IT AS FOR LOCAL
indexEngine = new OLocalHashTableIndexEngine();
else if (storageType.equals("remote"))
indexEngine = new ORemoteIndexEngine();
else
throw new OIndexException("Unsupported storage type : " + storageType);
if (OClass.INDEX_TYPE.UNIQUE_HASH_INDEX.toString().equals(indexType))
return new OIndexUnique(indexType, algorithm, indexEngine, valueContainerAlgorithm);
else if (OClass.INDEX_TYPE.NOTUNIQUE_HASH_INDEX.toString().equals(indexType))
return new OIndexNotUnique(indexType, algorithm, indexEngine, valueContainerAlgorithm);
else if (OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.toString().equals(indexType))
return new OIndexFullText(indexType, algorithm, indexEngine, valueContainerAlgorithm);
else if (OClass.INDEX_TYPE.DICTIONARY_HASH_INDEX.toString().equals(indexType))
return new OIndexDictionary(indexType, algorithm, indexEngine, valueContainerAlgorithm);
throw new OConfigurationException("Unsupported type : " + indexType);
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_OHashIndexFactory.java
|
390 |
clusterService.submitStateUpdateTask("cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask() {
private volatile boolean changed = false;
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
return true;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
if (changed) {
reroute(true);
} else {
listener.onResponse(new ClusterUpdateSettingsResponse(true, transientUpdates.build(), persistentUpdates.build()));
}
}
@Override
public void onAckTimeout() {
if (changed) {
reroute(false);
} else {
listener.onResponse(new ClusterUpdateSettingsResponse(false, transientUpdates.build(), persistentUpdates.build()));
}
}
private void reroute(final boolean updateSettingsAcked) {
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask() {
@Override
public boolean mustAck(DiscoveryNode discoveryNode) {
//we wait for the reroute ack only if the update settings was acknowledged
return updateSettingsAcked;
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
//we return when the cluster reroute is acked (the acknowledged flag depends on whether the update settings was acknowledged)
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build()));
}
@Override
public void onAckTimeout() {
//we return when the cluster reroute ack times out (acknowledged false)
listener.onResponse(new ClusterUpdateSettingsResponse(false, transientUpdates.build(), persistentUpdates.build()));
}
@Override
public TimeValue ackTimeout() {
return request.timeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
//if the reroute fails we only log
logger.debug("failed to perform [{}]", t, source);
}
@Override
public ClusterState execute(final ClusterState currentState) {
// now, reroute in case things that require it changed (e.g. number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(currentState);
if (!routingResult.changed()) {
return currentState;
}
return ClusterState.builder(currentState).routingResult(routingResult).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
}
@Override
public TimeValue ackTimeout() {
return request.timeout();
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to perform [{}]", t, source);
listener.onFailure(t);
}
@Override
public ClusterState execute(final ClusterState currentState) {
ImmutableSettings.Builder transientSettings = ImmutableSettings.settingsBuilder();
transientSettings.put(currentState.metaData().transientSettings());
for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) {
if (dynamicSettings.hasDynamicSetting(entry.getKey()) || entry.getKey().startsWith("logger.")) {
String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue());
if (error == null) {
transientSettings.put(entry.getKey(), entry.getValue());
transientUpdates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error);
}
} else {
logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey());
}
}
ImmutableSettings.Builder persistentSettings = ImmutableSettings.settingsBuilder();
persistentSettings.put(currentState.metaData().persistentSettings());
for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) {
if (dynamicSettings.hasDynamicSetting(entry.getKey()) || entry.getKey().startsWith("logger.")) {
String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue());
if (error == null) {
persistentSettings.put(entry.getKey(), entry.getValue());
persistentUpdates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error);
}
} else {
logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey());
}
}
if (!changed) {
return currentState;
}
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
.persistentSettings(persistentSettings.build())
.transientSettings(transientSettings.build());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false);
if (updatedReadOnly) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
} else {
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
return builder(currentState).metaData(metaData).blocks(blocks).build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_settings_TransportClusterUpdateSettingsAction.java
|
598 |
public abstract class AbstractBroadleafWebRequestProcessor implements BroadleafWebRequestProcessor {
public void postProcess(WebRequest request) {
// nada
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_web_AbstractBroadleafWebRequestProcessor.java
|
497 |
private static class ListRewriter implements FieldRewriter<List<?>> {
@Override
public List<?> rewriteValue(List<?> listValue) {
boolean wasRewritten = false;
List<Object> result = new ArrayList<Object>(listValue.size());
for (Object listItem : listValue) {
FieldRewriter<Object> fieldRewriter = RewritersFactory.INSTANCE.findRewriter(null, null, listItem);
Object rewrittenItem = fieldRewriter.rewriteValue(listItem);
if (rewrittenItem != null) {
wasRewritten = true;
result.add(rewrittenItem);
} else
result.add(listItem);
}
if (!wasRewritten)
return null;
return result;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_tool_ODatabaseImport.java
|
177 |
static final class AdaptedRunnableAction extends ForkJoinTask<Void>
implements RunnableFuture<Void> {
final Runnable runnable;
AdaptedRunnableAction(Runnable runnable) {
if (runnable == null) throw new NullPointerException();
this.runnable = runnable;
}
public final Void getRawResult() { return null; }
public final void setRawResult(Void v) { }
public final boolean exec() { runnable.run(); return true; }
public final void run() { invoke(); }
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166y_ForkJoinTask.java
|
30 |
final class ValueInverseIterator extends AbstractEntryIterator<K, V, V> {
ValueInverseIterator(final OMVRBTreeEntry<K, V> last) {
super(last);
// we have to set ourselves after current index to make iterator work
if (last != null) {
pageIndex = last.getTree().getPageIndex() + 1;
}
}
@Override
public boolean hasNext() {
return hasPrevious();
}
@Override
public V next() {
return prevValue();
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_collection_OMVRBTree.java
|
2,193 |
public class MultiPhrasePrefixQuery extends Query {
private String field;
private ArrayList<Term[]> termArrays = new ArrayList<Term[]>();
private ArrayList<Integer> positions = new ArrayList<Integer>();
private int maxExpansions = Integer.MAX_VALUE;
private int slop = 0;
/**
* Sets the phrase slop for this query.
*
* @see org.apache.lucene.search.PhraseQuery#setSlop(int)
*/
public void setSlop(int s) {
slop = s;
}
public void setMaxExpansions(int maxExpansions) {
this.maxExpansions = maxExpansions;
}
/**
* Sets the phrase slop for this query.
*
* @see org.apache.lucene.search.PhraseQuery#getSlop()
*/
public int getSlop() {
return slop;
}
/**
* Add a single term at the next position in the phrase.
*
* @see org.apache.lucene.search.PhraseQuery#add(Term)
*/
public void add(Term term) {
add(new Term[]{term});
}
/**
* Add multiple terms at the next position in the phrase. Any of the terms
* may match.
*
* @see org.apache.lucene.search.PhraseQuery#add(Term)
*/
public void add(Term[] terms) {
int position = 0;
if (positions.size() > 0)
position = positions.get(positions.size() - 1).intValue() + 1;
add(terms, position);
}
/**
* Allows to specify the relative position of terms within the phrase.
*
* @param terms
* @param position
* @see org.apache.lucene.search.PhraseQuery#add(Term, int)
*/
public void add(Term[] terms, int position) {
if (termArrays.size() == 0)
field = terms[0].field();
for (int i = 0; i < terms.length; i++) {
if (terms[i].field() != field) {
throw new IllegalArgumentException(
"All phrase terms must be in the same field (" + field + "): "
+ terms[i]);
}
}
termArrays.add(terms);
positions.add(Integer.valueOf(position));
}
/**
* Returns a List of the terms in the multiphrase.
* Do not modify the List or its contents.
*/
public List<Term[]> getTermArrays() {
return Collections.unmodifiableList(termArrays);
}
/**
* Returns the relative positions of terms in this phrase.
*/
public int[] getPositions() {
int[] result = new int[positions.size()];
for (int i = 0; i < positions.size(); i++)
result[i] = positions.get(i).intValue();
return result;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (termArrays.isEmpty()) {
return new MatchNoDocsQuery();
}
MultiPhraseQuery query = new MultiPhraseQuery();
query.setSlop(slop);
int sizeMinus1 = termArrays.size() - 1;
for (int i = 0; i < sizeMinus1; i++) {
query.add(termArrays.get(i), positions.get(i));
}
Term[] suffixTerms = termArrays.get(sizeMinus1);
int position = positions.get(sizeMinus1);
ObjectOpenHashSet<Term> terms = new ObjectOpenHashSet<Term>();
for (Term term : suffixTerms) {
getPrefixTerms(terms, term, reader);
if (terms.size() > maxExpansions) {
break;
}
}
if (terms.isEmpty()) {
return Queries.newMatchNoDocsQuery();
}
query.add(terms.toArray(Term.class), position);
return query.rewrite(reader);
}
private void getPrefixTerms(ObjectOpenHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
// SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
// instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
TermsEnum termsEnum = null;
List<AtomicReaderContext> leaves = reader.leaves();
for (AtomicReaderContext leaf : leaves) {
Terms _terms = leaf.reader().terms(field);
if (_terms == null) {
continue;
}
termsEnum = _terms.iterator(termsEnum);
TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(prefix.bytes());
if (TermsEnum.SeekStatus.END == seekStatus) {
continue;
}
for (BytesRef term = termsEnum.term(); term != null; term = termsEnum.next()) {
if (!StringHelper.startsWith(term, prefix.bytes())) {
break;
}
terms.add(new Term(field, BytesRef.deepCopyOf(term)));
if (terms.size() >= maxExpansions) {
return;
}
}
}
}
@Override
public final String toString(String f) {
StringBuilder buffer = new StringBuilder();
if (field == null || !field.equals(f)) {
buffer.append(field);
buffer.append(":");
}
buffer.append("\"");
Iterator<Term[]> i = termArrays.iterator();
while (i.hasNext()) {
Term[] terms = i.next();
if (terms.length > 1) {
buffer.append("(");
for (int j = 0; j < terms.length; j++) {
buffer.append(terms[j].text());
if (j < terms.length - 1)
buffer.append(" ");
}
buffer.append(")");
} else {
buffer.append(terms[0].text());
}
if (i.hasNext())
buffer.append(" ");
}
buffer.append("\"");
if (slop != 0) {
buffer.append("~");
buffer.append(slop);
}
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
/**
* Returns true if <code>o</code> is equal to this.
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof MultiPhrasePrefixQuery)) return false;
MultiPhrasePrefixQuery other = (MultiPhrasePrefixQuery) o;
return this.getBoost() == other.getBoost()
&& this.slop == other.slop
&& termArraysEquals(this.termArrays, other.termArrays)
&& this.positions.equals(other.positions);
}
/**
* Returns a hash code value for this object.
*/
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost())
^ slop
^ termArraysHashCode()
^ positions.hashCode()
^ 0x4AC65113;
}
// Breakout calculation of the termArrays hashcode
private int termArraysHashCode() {
int hashCode = 1;
for (final Term[] termArray : termArrays) {
hashCode = 31 * hashCode
+ (termArray == null ? 0 : Arrays.hashCode(termArray));
}
return hashCode;
}
// Breakout calculation of the termArrays equals
private boolean termArraysEquals(List<Term[]> termArrays1, List<Term[]> termArrays2) {
if (termArrays1.size() != termArrays2.size()) {
return false;
}
ListIterator<Term[]> iterator1 = termArrays1.listIterator();
ListIterator<Term[]> iterator2 = termArrays2.listIterator();
while (iterator1.hasNext()) {
Term[] termArray1 = iterator1.next();
Term[] termArray2 = iterator2.next();
if (!(termArray1 == null ? termArray2 == null : Arrays.equals(termArray1,
termArray2))) {
return false;
}
}
return true;
}
public String getField() {
return field;
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_MultiPhrasePrefixQuery.java
|
2,773 |
public class SSLSocketChannelWrapper extends DefaultSocketChannelWrapper {
private static final boolean DEBUG = false;
private final ByteBuffer in;
private final ByteBuffer emptyBuffer;
private final ByteBuffer netOutBuffer;
// "reliable" write transport
private final ByteBuffer netInBuffer;
// "reliable" read transport
private final SSLEngine sslEngine;
private volatile boolean handshakeCompleted;
private SSLEngineResult sslEngineResult;
public SSLSocketChannelWrapper(SSLContext sslContext, SocketChannel sc, boolean client) throws Exception {
super(sc);
sslEngine = sslContext.createSSLEngine();
sslEngine.setUseClientMode(client);
sslEngine.setEnableSessionCreation(true);
SSLSession session = sslEngine.getSession();
in = ByteBuffer.allocate(64 * 1024);
emptyBuffer = ByteBuffer.allocate(0);
int netBufferMax = session.getPacketBufferSize();
netOutBuffer = ByteBuffer.allocate(netBufferMax);
netInBuffer = ByteBuffer.allocate(netBufferMax);
}
private void handshake() throws IOException {
if (handshakeCompleted) {
return;
}
if (DEBUG) {
log("Starting handshake...");
}
synchronized (this) {
if (handshakeCompleted) {
if (DEBUG) {
log("Handshake already completed...");
}
return;
}
int counter = 0;
if (DEBUG) {
log("Begin handshake");
}
sslEngine.beginHandshake();
writeInternal(emptyBuffer);
while (counter++ < 250 && sslEngineResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED) {
if (DEBUG) {
log("Handshake status: " + sslEngineResult.getHandshakeStatus());
}
if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_UNWRAP) {
if (DEBUG) {
log("Begin UNWRAP");
}
netInBuffer.clear();
while (socketChannel.read(netInBuffer) < 1) {
try {
if (DEBUG) {
log("Spinning on channel read...");
}
Thread.sleep(50);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
netInBuffer.flip();
unwrap(netInBuffer);
if (DEBUG) {
log("Done UNWRAP: " + sslEngineResult.getHandshakeStatus());
}
if (sslEngineResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED) {
emptyBuffer.clear();
writeInternal(emptyBuffer);
if (DEBUG) {
log("Done WRAP after UNWRAP: " + sslEngineResult.getHandshakeStatus());
}
}
} else if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_WRAP) {
if (DEBUG) {
log("Begin WRAP");
}
emptyBuffer.clear();
writeInternal(emptyBuffer);
if (DEBUG) {
log("Done WRAP: " + sslEngineResult.getHandshakeStatus());
}
} else {
try {
if (DEBUG) {
log("Sleeping... Status: " + sslEngineResult.getHandshakeStatus());
}
Thread.sleep(500);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
if (sslEngineResult.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.FINISHED) {
throw new SSLHandshakeException("SSL handshake failed after " + counter
+ " trials! -> " + sslEngineResult.getHandshakeStatus());
}
if (DEBUG) {
log("Handshake completed!");
}
in.clear();
in.flip();
handshakeCompleted = true;
}
}
private void log(String log) {
if (DEBUG) {
System.err.println(getClass().getSimpleName() + "[" + socketChannel.socket().getLocalSocketAddress() + "]: " + log);
}
}
private ByteBuffer unwrap(ByteBuffer b) throws SSLException {
in.clear();
while (b.hasRemaining()) {
sslEngineResult = sslEngine.unwrap(b, in);
if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_TASK) {
if (DEBUG) {
log("Handshake NEED TASK");
}
Runnable task;
while ((task = sslEngine.getDelegatedTask()) != null) {
if (DEBUG) {
log("Running task: " + task);
}
task.run();
}
} else if (sslEngineResult.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.FINISHED
|| sslEngineResult.getStatus() == SSLEngineResult.Status.BUFFER_UNDERFLOW) {
return in;
}
}
return in;
}
public int write(ByteBuffer input) throws IOException {
if (!handshakeCompleted) {
handshake();
}
return writeInternal(input);
}
private int writeInternal(ByteBuffer input) throws IOException {
sslEngineResult = sslEngine.wrap(input, netOutBuffer);
netOutBuffer.flip();
int written = socketChannel.write(netOutBuffer);
if (netOutBuffer.hasRemaining()) {
netOutBuffer.compact();
} else {
netOutBuffer.clear();
}
return written;
}
public int read(ByteBuffer output) throws IOException {
if (!handshakeCompleted) {
handshake();
}
int readBytesCount = 0;
int limit;
if (in.hasRemaining()) {
limit = Math.min(in.remaining(), output.remaining());
for (int i = 0; i < limit; i++) {
output.put(in.get());
readBytesCount++;
}
return readBytesCount;
}
if (netInBuffer.hasRemaining()) {
unwrap(netInBuffer);
in.flip();
limit = Math.min(in.remaining(), output.remaining());
for (int i = 0; i < limit; i++) {
output.put(in.get());
readBytesCount++;
}
if (sslEngineResult.getStatus() != SSLEngineResult.Status.BUFFER_UNDERFLOW) {
netInBuffer.clear();
netInBuffer.flip();
return readBytesCount;
}
}
if (netInBuffer.hasRemaining()) {
netInBuffer.compact();
} else {
netInBuffer.clear();
}
if (socketChannel.read(netInBuffer) == -1) {
netInBuffer.clear();
netInBuffer.flip();
return -1;
}
netInBuffer.flip();
unwrap(netInBuffer);
in.flip();
limit = Math.min(in.remaining(), output.remaining());
for (int i = 0; i < limit; i++) {
output.put(in.get());
readBytesCount++;
}
return readBytesCount;
}
public void close() throws IOException {
sslEngine.closeOutbound();
try {
writeInternal(emptyBuffer);
} catch (Exception ignored) {
}
socketChannel.close();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("SSLSocketChannelWrapper{");
sb.append("socketChannel=").append(socketChannel);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_ssl_SSLSocketChannelWrapper.java
|
150 |
public final class LifecycleServiceImpl implements LifecycleService {
private final HazelcastClient client;
private final ConcurrentMap<String, LifecycleListener> lifecycleListeners
= new ConcurrentHashMap<String, LifecycleListener>();
private final AtomicBoolean active = new AtomicBoolean(false);
private final BuildInfo buildInfo;
public LifecycleServiceImpl(HazelcastClient client) {
this.client = client;
final List<ListenerConfig> listenerConfigs = client.getClientConfig().getListenerConfigs();
if (listenerConfigs != null && !listenerConfigs.isEmpty()) {
for (ListenerConfig listenerConfig : listenerConfigs) {
if (listenerConfig.getImplementation() instanceof LifecycleListener) {
addLifecycleListener((LifecycleListener) listenerConfig.getImplementation());
}
}
}
buildInfo = BuildInfoProvider.getBuildInfo();
fireLifecycleEvent(STARTING);
}
private ILogger getLogger() {
return Logger.getLogger(LifecycleService.class);
}
public String addLifecycleListener(LifecycleListener lifecycleListener) {
final String id = UuidUtil.buildRandomUuidString();
lifecycleListeners.put(id, lifecycleListener);
return id;
}
public boolean removeLifecycleListener(String registrationId) {
return lifecycleListeners.remove(registrationId) != null;
}
public void fireLifecycleEvent(LifecycleEvent.LifecycleState lifecycleState) {
final LifecycleEvent lifecycleEvent = new LifecycleEvent(lifecycleState);
getLogger().info("HazelcastClient[" + client.getName() + "]" + "["
+ buildInfo.getVersion() + "] is " + lifecycleEvent.getState());
for (LifecycleListener lifecycleListener : lifecycleListeners.values()) {
lifecycleListener.stateChanged(lifecycleEvent);
}
}
void setStarted() {
active.set(true);
fireLifecycleEvent(STARTED);
}
public boolean isRunning() {
return active.get();
}
public void shutdown() {
if (!active.compareAndSet(true, false)) {
return;
}
fireLifecycleEvent(SHUTTING_DOWN);
client.doShutdown();
fireLifecycleEvent(SHUTDOWN);
}
public void terminate() {
shutdown();
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_LifecycleServiceImpl.java
|
1,486 |
public class IntervalFilterMap {
public static final String CLASS = Tokens.makeNamespace(IntervalFilterMap.class) + ".class";
public static final String KEY = Tokens.makeNamespace(IntervalFilterMap.class) + ".key";
public static final String START_VALUE = Tokens.makeNamespace(IntervalFilterMap.class) + ".startValue";
public static final String END_VALUE = Tokens.makeNamespace(IntervalFilterMap.class) + ".endValue";
public static final String VALUE_CLASS = Tokens.makeNamespace(IntervalFilterMap.class) + ".valueClass";
public enum Counters {
VERTICES_FILTERED,
EDGES_FILTERED
}
public static Configuration createConfiguration(final Class<? extends Element> klass, final String key, final Object startValue, final Object endValue) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.set(KEY, key);
if (startValue instanceof String) {
configuration.set(VALUE_CLASS, String.class.getName());
configuration.set(START_VALUE, (String) startValue);
configuration.set(END_VALUE, (String) endValue);
} else if (startValue instanceof Number) {
configuration.set(VALUE_CLASS, Float.class.getName());
configuration.setFloat(START_VALUE, ((Number) startValue).floatValue());
configuration.setFloat(END_VALUE, ((Number) endValue).floatValue());
} else if (startValue instanceof Boolean) {
configuration.set(VALUE_CLASS, Boolean.class.getName());
configuration.setBoolean(START_VALUE, (Boolean) startValue);
configuration.setBoolean(END_VALUE, (Boolean) endValue);
} else {
throw new RuntimeException("Unknown value class: " + startValue.getClass().getName());
}
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private boolean isVertex;
private ElementChecker startChecker;
private ElementChecker endChecker;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
final String key = context.getConfiguration().get(KEY);
final Class valueClass = context.getConfiguration().getClass(VALUE_CLASS, String.class);
final Object startValue;
final Object endValue;
if (valueClass.equals(String.class)) {
startValue = context.getConfiguration().get(START_VALUE);
endValue = context.getConfiguration().get(END_VALUE);
} else if (Number.class.isAssignableFrom((valueClass))) {
startValue = context.getConfiguration().getFloat(START_VALUE, Float.MIN_VALUE);
endValue = context.getConfiguration().getFloat(END_VALUE, Float.MAX_VALUE);
} else {
throw new IOException("Class " + valueClass + " is an unsupported value class");
}
this.startChecker = new ElementChecker(key, Compare.GREATER_THAN_EQUAL, startValue);
this.endChecker = new ElementChecker(key, Compare.LESS_THAN, endValue);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths() && !(this.startChecker.isLegal(value) && this.endChecker.isLegal(value))) {
value.clearPaths();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_FILTERED, 1L);
}
} else {
long counter = 0;
for (final Edge e : value.getEdges(Direction.BOTH)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths() && !(this.startChecker.isLegal(edge) && this.endChecker.isLegal(edge))) {
edge.clearPaths();
counter++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_FILTERED, counter);
}
context.write(NullWritable.get(), value);
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_filter_IntervalFilterMap.java
|
1,266 |
public class OMultiFileSegment extends OSegment {
protected OStorageSegmentConfiguration config;
protected OFile[] files = new OFile[0];
private final String fileExtension;
private final String type;
private final long maxSize;
@SuppressWarnings("unused")
private final String defrag;
private int fileStartSize;
final private int fileMaxSize;
private final int fileIncrementSize;
private boolean wasSoftlyClosedAtPreviousTime = true;
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
public OMultiFileSegment(final OStorageLocalAbstract storage, final OStorageSegmentConfiguration config,
final String fileExtension, final int roundMaxSize) throws IOException {
super(storage, config.name);
readWriteLock.writeLock().lock();
try {
this.config = config;
this.fileExtension = fileExtension;
type = config.fileType;
defrag = config.defrag;
maxSize = OFileUtils.getSizeAsNumber(config.maxSize);
fileStartSize = (int) OFileUtils.getSizeAsNumber(config.fileStartSize);
final int tmpFileMaxSize = (int) OFileUtils.getSizeAsNumber(config.fileMaxSize);
fileIncrementSize = (int) OFileUtils.getSizeAsNumber(config.fileIncrementSize);
if (roundMaxSize > 0)
// ROUND THE FILE SIZE TO AVOID ERRORS ON ROUNDING BY DIVIDING FOR FIXED RECORD SIZE
fileMaxSize = (tmpFileMaxSize / roundMaxSize) * roundMaxSize;
else
fileMaxSize = tmpFileMaxSize;
// INSTANTIATE ALL THE FILES
int perFileMaxSize;
if (config.infoFiles.length == 0) {
// EMPTY FILE: CREATE THE FIRST FILE BY DEFAULT
files = new OFile[1];
files[0] = OFileFactory.instance().create(type,
storage.getVariableParser().resolveVariables(this.config.getLocation() + "/" + name + "." + 0 + this.fileExtension),
storage.getMode());
perFileMaxSize = fileMaxSize;
files[0].setMaxSize(perFileMaxSize);
files[0].setIncrementSize(fileIncrementSize);
} else {
files = new OFile[config.infoFiles.length];
for (int i = 0; i < files.length; ++i) {
files[i] = OFileFactory.instance().create(type, storage.getVariableParser().resolveVariables(config.infoFiles[i].path),
storage.getMode());
perFileMaxSize = fileMaxSize;
files[i].setMaxSize(perFileMaxSize);
files[i].setIncrementSize(fileIncrementSize);
}
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void open() throws IOException {
readWriteLock.writeLock().lock();
try {
// @TODO: LAZY OPEN FILES
for (OFile file : files)
if (!file.open()) {
// LAST TIME THE FILE WAS NOT CLOSED IN SOFT WAY
OLogManager.instance().warn(this, "segment file '%s' was not closed correctly last time",
OFileUtils.getPath(file.getName()));
// TODO VERIFY DATA?
wasSoftlyClosedAtPreviousTime = false;
}
} finally {
readWriteLock.writeLock().unlock();
}
}
/**
* Create the first file for current segment
*
* @param iStartSize
* @throws IOException
*/
public void create(final int iStartSize) throws IOException {
readWriteLock.writeLock().lock();
try {
files = new OFile[1];
fileStartSize = iStartSize;
createNewFile();
} finally {
readWriteLock.writeLock().unlock();
}
}
public void close() throws IOException {
readWriteLock.writeLock().lock();
try {
for (OFile file : files) {
if (file != null)
file.close();
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void delete() throws IOException {
readWriteLock.writeLock().lock();
try {
for (OFile file : files) {
if (file != null)
file.delete();
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public boolean exists() {
readWriteLock.readLock().lock();
try {
return files[0].exists();
} finally {
readWriteLock.readLock().unlock();
}
}
public void truncate() throws IOException {
readWriteLock.writeLock().lock();
try {
// SHRINK TO 0
files[0].shrink(0);
if (files.length > 1) {
// LEAVE JUST ONE FILE
for (int i = 1; i < files.length; ++i) {
if (files[i] != null)
files[i].delete();
}
// UPDATE FILE STRUCTURE
final OFile f = files[0];
files = new OFile[1];
files[0] = f;
// UPDATE CONFIGURATION
final OStorageFileConfiguration fileConfig = config.infoFiles[0];
config.infoFiles = new OStorageFileConfiguration[1];
config.infoFiles[0] = fileConfig;
config.root.update();
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void synch() throws IOException {
readWriteLock.readLock().lock();
try {
for (OFile file : files) {
if (file != null && file.isOpen())
file.synch();
}
} finally {
readWriteLock.readLock().unlock();
}
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
readWriteLock.writeLock().lock();
try {
for (OFile file : files)
if (file != null && file.isOpen())
file.setSoftlyClosed(softlyClosed);
} finally {
readWriteLock.writeLock().unlock();
}
}
public OStorageSegmentConfiguration getConfig() {
readWriteLock.readLock().lock();
try {
return config;
} finally {
readWriteLock.readLock().unlock();
}
}
public long getFilledUpTo() {
readWriteLock.readLock().lock();
try {
long filled = 0;
for (OFile file : files)
filled += file.getFilledUpTo();
return filled;
} finally {
readWriteLock.readLock().unlock();
}
}
public long getSize() {
readWriteLock.readLock().lock();
try {
long size = 0;
for (OFile file : files)
size += file.getFileSize();
return size;
} finally {
readWriteLock.readLock().unlock();
}
}
/**
* Find free space for iRecordSize bytes.
*
* @param iRecordSize
* @return a pair file-id/file-pos
* @throws IOException
*/
public long[] allocateSpace(final int iRecordSize) throws IOException {
readWriteLock.writeLock().lock();
try {
// IT'S PREFEREABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
for (int i = 0; i < files.length; ++i) {
file = files[i];
if (file.getFreeSpace() >= iRecordSize)
// FOUND: RETURN THIS OFFSET
return new long[] { i, file.allocateSpace(iRecordSize) };
}
// NOT FOUND: CHECK IF CAN OVERSIZE SOME FILES
for (int i = 0; i < files.length; ++i) {
file = files[i];
if (file.canOversize(iRecordSize)) {
// FOUND SPACE: ENLARGE IT
return new long[] { i, file.allocateSpace(iRecordSize) };
}
}
// TRY TO CREATE A NEW FILE
if (maxSize > 0 && getSize() >= maxSize)
// OUT OF MAX SIZE
throw new OStorageException("Unable to allocate the requested space of " + iRecordSize
+ " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo());
// COPY THE OLD ARRAY TO THE NEW ONE
OFile[] newFiles = new OFile[files.length + 1];
System.arraycopy(files, 0, newFiles, 0, files.length);
files = newFiles;
// CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY
file = createNewFile();
file.allocateSpace(iRecordSize);
config.root.update();
return new long[] { files.length - 1, 0 };
} finally {
readWriteLock.writeLock().unlock();
}
}
/**
* Return the absolute position receiving the pair file-id/file-pos.
*
* @param iFilePosition
* as pair file-id/file-pos
* @return
*/
public long getAbsolutePosition(final long[] iFilePosition) {
readWriteLock.readLock().lock();
try {
long position = 0;
for (int i = 0; i < iFilePosition[0]; ++i) {
position += fileMaxSize;
}
return position + iFilePosition[1];
} finally {
readWriteLock.readLock().unlock();
}
}
public long[] getRelativePosition(final long iPosition) {
readWriteLock.readLock().lock();
try {
if (iPosition < fileMaxSize)
return new long[] { 0l, iPosition };
final int fileNum = (int) (iPosition / fileMaxSize);
if (fileNum >= files.length && fileNum < 0)
throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum
+ " that is out of limit (files range 0-" + (files.length - 1) + ")");
final int fileRec = (int) (iPosition % fileMaxSize);
if (fileNum >= files.length)
throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum
+ " but configured files are only " + files.length);
if (fileRec >= files[fileNum].getFilledUpTo() && fileRec < 0)
throw new ODatabaseException("Record position #" + iPosition + " was bound to file #" + fileNum + " but the position #"
+ fileRec + " is out of file size " + files[fileNum].getFilledUpTo());
return new long[] { fileNum, fileRec };
} finally {
readWriteLock.readLock().unlock();
}
}
private OFile createNewFile() throws IOException {
final int num = files.length - 1;
final OFile file = OFileFactory.instance().create(type, config.getLocation() + "/" + name + "." + num + fileExtension,
storage.getMode());
file.setMaxSize(fileMaxSize);
file.create(fileStartSize);
files[num] = file;
addInfoFileConfigEntry(file);
return file;
}
private void addInfoFileConfigEntry(final OFile file) throws IOException {
OStorageFileConfiguration[] newConfigFiles = new OStorageFileConfiguration[config.infoFiles.length + 1];
for (int i = 0; i < config.infoFiles.length; ++i)
newConfigFiles[i] = config.infoFiles[i];
config.infoFiles = newConfigFiles;
// CREATE A NEW ENTRY FOR THE NEW FILE
String fileNameToStore = storage.getVariableParser().convertPathToRelative(OFileUtils.getPath(file.getPath()));
final OStorageSegmentConfiguration template = config.root.fileTemplate;
config.infoFiles[config.infoFiles.length - 1] = new OStorageFileConfiguration(config, fileNameToStore, template.fileType,
template.fileMaxSize, template.fileIncrementSize);
}
public long allocateSpaceContinuously(final int iSize) throws IOException {
readWriteLock.writeLock().lock();
try {
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = iSize;
// IF SOME FILES ALREADY CREATED
long offset = -1;
int fileNumber = -1;
if (files.length > 0) {
// CHECK IF THERE IS FREE SPACE IN LAST FILE IN CHAIN
file = files[files.length - 1];
if (file.getFreeSpace() > 0) {
fileNumber = files.length - 1;
if (remainingSize > file.getFreeSpace()) {
remainingSize -= file.getFreeSpace();
offset = file.allocateSpace(file.getFreeSpace());
} else {
return (long) (files.length - 1) * fileMaxSize + file.allocateSpace(remainingSize);
}
}
// NOT FOUND FREE SPACE: CHECK IF CAN OVERSIZE LAST FILE
final long oversize = fileMaxSize - file.getFileSize();
if (oversize > 0 && remainingSize > 0) {
fileNumber = files.length - 1;
if (remainingSize > oversize) {
remainingSize -= oversize;
long newOffset = file.allocateSpace(oversize);
// SAVE OFFSET IF IT WASN'T SAVED EARLIER
if (offset == -1)
offset = newOffset;
} else {
long newOffset = file.allocateSpace(remainingSize);
if (offset == -1)
offset = newOffset;
if (fileNumber == -1) {
fileNumber = files.length - 1;
}
return (long) fileNumber * fileMaxSize + offset;
}
}
}
// CREATE NEW FILE BECAUSE THERE IS NO FILES OR WE CANNOT ENLARGE EXISTING ENOUGH
if (remainingSize > 0) {
if (maxSize > 0 && getSize() >= maxSize)
// OUT OF MAX SIZE
throw new OStorageException("Unable to allocate the requested space of " + iSize
+ " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo());
// COPY THE OLD ARRAY TO THE NEW ONE
OFile[] newFiles = new OFile[files.length + 1];
for (int i = 0; i < files.length; ++i)
newFiles[i] = files[i];
files = newFiles;
// CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY
file = createNewFile();
file.allocateSpace(iSize);
config.root.update();
if (fileNumber == -1) {
fileNumber = files.length - 1;
}
if (offset == -1)
offset = 0;
}
return (long) fileNumber * fileMaxSize + offset;
} finally {
readWriteLock.writeLock().unlock();
}
}
public void writeContinuously(long iPosition, byte[] iData) throws IOException {
readWriteLock.writeLock().lock();
try {
long[] pos = getRelativePosition(iPosition);
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = iData.length;
long offset = pos[1];
for (int i = (int) pos[0]; remainingSize > 0; ++i) {
file = files[i];
if (remainingSize > file.getFilledUpTo() - offset) {
if (file.getFilledUpTo() < offset) {
throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset);
}
file.write(offset, iData, (int) (file.getFilledUpTo() - offset), iData.length - remainingSize);
remainingSize -= (file.getFilledUpTo() - offset);
} else {
file.write(offset, iData, remainingSize, iData.length - remainingSize);
remainingSize = 0;
}
offset = 0;
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void writeContinuously(long iPosition, byte[] iData, int arrayOffset, int length) throws IOException {
readWriteLock.writeLock().lock();
try {
long[] pos = getRelativePosition(iPosition);
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = length;
long offset = pos[1];
for (int i = (int) pos[0]; remainingSize > 0; ++i) {
file = files[i];
if (remainingSize > file.getFilledUpTo() - offset) {
if (file.getFilledUpTo() < offset) {
throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset);
}
file.write(offset, iData, (int) (file.getFilledUpTo() - offset), arrayOffset + iData.length - remainingSize);
remainingSize -= (file.getFilledUpTo() - offset);
} else {
file.write(offset, iData, remainingSize, arrayOffset + iData.length - remainingSize);
remainingSize = 0;
}
offset = 0;
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public void readContinuously(final long iPosition, byte[] iBuffer, final int iSize) throws IOException {
readWriteLock.readLock().lock();
try {
long[] pos = getRelativePosition(iPosition);
// IT'S PREFERABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE
OFile file;
int remainingSize = iSize;
long offset = pos[1];
assert offset < Integer.MAX_VALUE;
assert offset > -1;
for (int i = (int) pos[0]; remainingSize > 0; ++i) {
file = files[i];
if (remainingSize > file.getFilledUpTo() - offset) {
if (file.getFilledUpTo() < offset) {
throw new ODatabaseException("range check! " + file.getFilledUpTo() + " " + offset);
}
int toRead = (int) (file.getFilledUpTo() - offset);
file.read(offset, iBuffer, toRead, iSize - remainingSize);
remainingSize -= toRead;
} else {
file.read(offset, iBuffer, remainingSize, iSize - remainingSize);
remainingSize = 0;
}
offset = 0;
}
} finally {
readWriteLock.readLock().unlock();
}
}
public void rename(String iOldName, String iNewName) {
readWriteLock.writeLock().lock();
try {
for (OFile file : files) {
final String osFileName = file.getName();
if (osFileName.startsWith(name)) {
final File newFile = new File(storage.getStoragePath() + "/" + iNewName
+ osFileName.substring(osFileName.lastIndexOf(name) + name.length()));
for (OStorageFileConfiguration conf : config.infoFiles) {
if (conf.parent.name.equals(name))
conf.parent.name = iNewName;
if (conf.path.endsWith(osFileName))
conf.path = new String(conf.path.replace(osFileName, newFile.getName()));
}
boolean renamed = file.renameTo(newFile);
while (!renamed) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
renamed = file.renameTo(newFile);
}
}
}
} finally {
readWriteLock.writeLock().unlock();
}
}
public boolean wasSoftlyClosedAtPreviousTime() {
readWriteLock.readLock().lock();
try {
return wasSoftlyClosedAtPreviousTime;
} finally {
readWriteLock.readLock().unlock();
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_OMultiFileSegment.java
|
2,613 |
new BaseTransportResponseHandler<PingResponse>() {
@Override
public PingResponse newInstance() {
return new PingResponse();
}
@Override
public void handleResponse(PingResponse response) {
if (!running) {
return;
}
NodeFD nodeFD = nodesFD.get(node);
if (nodeFD != null) {
if (!nodeFD.running) {
return;
}
nodeFD.retryCount = 0;
threadPool.schedule(pingInterval, ThreadPool.Names.SAME, SendPingRequest.this);
}
}
@Override
public void handleException(TransportException exp) {
// check if the master node did not get switched on us...
if (!running) {
return;
}
if (exp instanceof ConnectTransportException) {
// ignore this one, we already handle it by registering a connection listener
return;
}
NodeFD nodeFD = nodesFD.get(node);
if (nodeFD != null) {
if (!nodeFD.running) {
return;
}
int retryCount = ++nodeFD.retryCount;
logger.trace("[node ] failed to ping [{}], retry [{}] out of [{}]", exp, node, retryCount, pingRetryCount);
if (retryCount >= pingRetryCount) {
logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node, pingRetryCount, pingRetryTimeout);
// not good, failure
if (nodesFD.remove(node) != null) {
notifyNodeFailure(node, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
}
} else {
// resend the request, not reschedule, rely on send timeout
transportService.sendRequest(node, PingRequestHandler.ACTION, new PingRequest(node.id()),
options().withType(TransportRequestOptions.Type.PING).withTimeout(pingRetryTimeout), this);
}
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_fd_NodesFaultDetection.java
|
443 |
@Deprecated
public @interface AdminPresentationMapOverride {
/**
* The name of the property whose AdminPresentation annotation should be overwritten
*
* @return the name of the property that should be overwritten
*/
String name();
/**
* The AdminPresentation to overwrite the property with
*
* @return the AdminPresentation being mapped to the attribute
*/
AdminPresentationMap value();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_presentation_override_AdminPresentationMapOverride.java
|
403 |
public class CreditCardType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, CreditCardType> TYPES = new LinkedHashMap<String, CreditCardType>();
public static final CreditCardType MASTERCARD = new CreditCardType("MASTERCARD", "Master Card");
public static final CreditCardType VISA = new CreditCardType("VISA", "Visa");
public static final CreditCardType AMEX = new CreditCardType("AMEX", "American Express");
public static final CreditCardType DINERSCLUB_CARTEBLANCHE = new CreditCardType("DINERSCLUB_CARTEBLANCHE", "Diner's Club / Carte Blanche");
public static final CreditCardType DISCOVER = new CreditCardType("DISCOVER", "Discover");
public static final CreditCardType ENROUTE = new CreditCardType("ENROUTE", "En Route");
public static final CreditCardType JCB = new CreditCardType("JCB", "JCB");
public static CreditCardType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public CreditCardType() {
//do nothing
}
public CreditCardType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)){
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CreditCardType other = (CreditCardType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_payment_CreditCardType.java
|
146 |
public class TitanId {
/**
* Converts a user provided long id into a Titan vertex id. The id must be positive and can be at most 61 bits long.
* This method is useful when providing ids during vertex creation via {@link com.tinkerpop.blueprints.Graph#addVertex(Object)}.
*
* @param id long id
* @return a corresponding Titan vertex id
*/
public static final long toVertexId(long id) {
Preconditions.checkArgument(id > 0, "Vertex id must be positive: %s", id);
Preconditions.checkArgument(IDManager.VertexIDType.NormalVertex.removePadding(Long.MAX_VALUE) >= id, "Vertex id is too large: %s", id);
return IDManager.VertexIDType.NormalVertex.addPadding(id);
}
/**
* Converts a Titan vertex id to the user provided id as the inverse mapping of {@link #toVertexId(long)}.
*
* @param id Titan vertex id (must be positive)
* @return original user provided id
*/
public static final long fromVertexId(long id) {
Preconditions.checkArgument(id > 0, "Invalid vertex id provided: %s", id);
return IDManager.VertexIDType.NormalVertex.removePadding(id);
}
/**
* Converts a Titan vertex id of a given vertex to the user provided id as the inverse mapping of {@link #toVertexId(long)}.
*
* @param v Vertex
* @return original user provided id
*/
public static final long fromVertexID(TitanVertex v) {
Preconditions.checkArgument(v.hasId(), "Invalid vertex provided: %s", v);
return fromVertexId(v.getLongId());
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_util_TitanId.java
|
397 |
public class CurrencyConversionContext {
private static final ThreadLocal<CurrencyConversionService> currencyConversionService = ThreadLocalManager.createThreadLocal(CurrencyConversionService.class);
private static final ThreadLocal<HashMap> currencyConversionContext = ThreadLocalManager.createThreadLocal(HashMap.class);
public static HashMap getCurrencyConversionContext() {
return CurrencyConversionContext.currencyConversionContext.get();
}
public static void setCurrencyConversionContext(HashMap currencyConsiderationContext) {
CurrencyConversionContext.currencyConversionContext.set(currencyConsiderationContext);
}
public static CurrencyConversionService getCurrencyConversionService() {
return CurrencyConversionContext.currencyConversionService.get();
}
public static void setCurrencyConversionService(CurrencyConversionService currencyDeterminationService) {
CurrencyConversionContext.currencyConversionService.set(currencyDeterminationService);
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_money_CurrencyConversionContext.java
|
326 |
ExecutionCallback executionCallback = new ExecutionCallback() {
@Override
public void onResponse(Object response) {
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
19 |
Collections.sort(results, new Comparator<DeclarationWithProximity>() {
public int compare(DeclarationWithProximity x, DeclarationWithProximity y) {
if (x.getProximity()<y.getProximity()) return -1;
if (x.getProximity()>y.getProximity()) return 1;
int c = x.getDeclaration().getName().compareTo(y.getDeclaration().getName());
if (c!=0) return c;
return x.getDeclaration().getQualifiedNameString()
.compareTo(y.getDeclaration().getQualifiedNameString());
}
});
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_CompletionUtil.java
|
1,056 |
public class TermVectorRequest extends SingleShardOperationRequest<TermVectorRequest> {
private String type;
private String id;
private String routing;
protected String preference;
// TODO: change to String[]
private Set<String> selectedFields;
private EnumSet<Flag> flagsEnum = EnumSet.of(Flag.Positions, Flag.Offsets, Flag.Payloads,
Flag.FieldStatistics);
public TermVectorRequest() {
}
/**
* Constructs a new term vector request for a document that will be fetch
* from the provided index. Use {@link #type(String)} and
* {@link #id(String)} to specify the document to load.
*/
public TermVectorRequest(String index, String type, String id) {
super(index);
this.id = id;
this.type = type;
}
/**
* Constructs a new term vector request for a document that will be fetch
* from the provided index. Use {@link #type(String)} and
* {@link #id(String)} to specify the document to load.
*/
public TermVectorRequest(TermVectorRequest other) {
super(other.index());
this.id = other.id();
this.type = other.type();
this.flagsEnum = other.getFlags().clone();
this.preference = other.preference();
this.routing = other.routing();
if (other.selectedFields != null) {
this.selectedFields = new HashSet<String>(other.selectedFields);
}
}
public EnumSet<Flag> getFlags() {
return flagsEnum;
}
/**
* Sets the type of document to get the term vector for.
*/
public TermVectorRequest type(String type) {
this.type = type;
return this;
}
/**
* Returns the type of document to get the term vector for.
*/
public String type() {
return type;
}
/**
* Returns the id of document the term vector is requested for.
*/
public String id() {
return id;
}
/**
* Sets the id of document the term vector is requested for.
*/
public TermVectorRequest id(String id) {
this.id = id;
return this;
}
/**
* @return The routing for this request.
*/
public String routing() {
return routing;
}
public TermVectorRequest routing(String routing) {
this.routing = routing;
return this;
}
/**
* Sets the parent id of this document. Will simply set the routing to this
* value, as it is only used for routing with delete requests.
*/
public TermVectorRequest parent(String parent) {
if (routing == null) {
routing = parent;
}
return this;
}
public String preference() {
return this.preference;
}
/**
* Sets the preference to execute the search. Defaults to randomize across
* shards. Can be set to <tt>_local</tt> to prefer local shards,
* <tt>_primary</tt> to execute only on primary shards, or a custom value,
* which guarantees that the same order will be used across different
* requests.
*/
public TermVectorRequest preference(String preference) {
this.preference = preference;
return this;
}
/**
* Return the start and stop offsets for each term if they were stored or
* skip offsets.
*/
public TermVectorRequest offsets(boolean offsets) {
setFlag(Flag.Offsets, offsets);
return this;
}
/**
* @returns <code>true</code> if term offsets should be returned. Otherwise
* <code>false</code>
*/
public boolean offsets() {
return flagsEnum.contains(Flag.Offsets);
}
/**
* Return the positions for each term if stored or skip.
*/
public TermVectorRequest positions(boolean positions) {
setFlag(Flag.Positions, positions);
return this;
}
/**
* @return Returns if the positions for each term should be returned if
* stored or skip.
*/
public boolean positions() {
return flagsEnum.contains(Flag.Positions);
}
/**
* @returns <code>true</code> if term payloads should be returned. Otherwise
* <code>false</code>
*/
public boolean payloads() {
return flagsEnum.contains(Flag.Payloads);
}
/**
* Return the payloads for each term or skip.
*/
public TermVectorRequest payloads(boolean payloads) {
setFlag(Flag.Payloads, payloads);
return this;
}
/**
* @returns <code>true</code> if term statistics should be returned.
* Otherwise <code>false</code>
*/
public boolean termStatistics() {
return flagsEnum.contains(Flag.TermStatistics);
}
/**
* Return the term statistics for each term in the shard or skip.
*/
public TermVectorRequest termStatistics(boolean termStatistics) {
setFlag(Flag.TermStatistics, termStatistics);
return this;
}
/**
* @returns <code>true</code> if field statistics should be returned.
* Otherwise <code>false</code>
*/
public boolean fieldStatistics() {
return flagsEnum.contains(Flag.FieldStatistics);
}
/**
* Return the field statistics for each term in the shard or skip.
*/
public TermVectorRequest fieldStatistics(boolean fieldStatistics) {
setFlag(Flag.FieldStatistics, fieldStatistics);
return this;
}
/**
* Return only term vectors for special selected fields. Returns for term
* vectors for all fields if selectedFields == null
*/
public Set<String> selectedFields() {
return selectedFields;
}
/**
* Return only term vectors for special selected fields. Returns the term
* vectors for all fields if selectedFields == null
*/
public TermVectorRequest selectedFields(String[] fields) {
selectedFields = fields != null && fields.length != 0 ? Sets.newHashSet(fields) : null;
return this;
}
private void setFlag(Flag flag, boolean set) {
if (set && !flagsEnum.contains(flag)) {
flagsEnum.add(flag);
} else if (!set) {
flagsEnum.remove(flag);
assert (!flagsEnum.contains(flag));
}
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (index == null) {
validationException = ValidateActions.addValidationError("index is missing", validationException);
}
if (type == null) {
validationException = ValidateActions.addValidationError("type is missing", validationException);
}
if (id == null) {
validationException = ValidateActions.addValidationError("id is missing", validationException);
}
return validationException;
}
public static TermVectorRequest readTermVectorRequest(StreamInput in) throws IOException {
TermVectorRequest termVectorRequest = new TermVectorRequest();
termVectorRequest.readFrom(in);
return termVectorRequest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
type = in.readString();
id = in.readString();
routing = in.readOptionalString();
preference = in.readOptionalString();
long flags = in.readVLong();
flagsEnum.clear();
for (Flag flag : Flag.values()) {
if ((flags & (1 << flag.ordinal())) != 0) {
flagsEnum.add(flag);
}
}
int numSelectedFields = in.readVInt();
if (numSelectedFields > 0) {
selectedFields = new HashSet<String>();
for (int i = 0; i < numSelectedFields; i++) {
selectedFields.add(in.readString());
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeString(type);
out.writeString(id);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
long longFlags = 0;
for (Flag flag : flagsEnum) {
longFlags |= (1 << flag.ordinal());
}
out.writeVLong(longFlags);
if (selectedFields != null) {
out.writeVInt(selectedFields.size());
for (String selectedField : selectedFields) {
out.writeString(selectedField);
}
} else {
out.writeVInt(0);
}
}
public static enum Flag {
// Do not change the order of these flags we use
// the ordinal for encoding! Only append to the end!
Positions, Offsets, Payloads, FieldStatistics, TermStatistics;
}
/**
* populates a request object (pre-populated with defaults) based on a parser.
*
* @param termVectorRequest
* @param parser
* @throws IOException
*/
public static void parseRequest(TermVectorRequest termVectorRequest, XContentParser parser) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
List<String> fields = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (currentFieldName != null) {
if (currentFieldName.equals("fields")) {
if (token == XContentParser.Token.START_ARRAY) {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
}
} else {
throw new ElasticsearchParseException(
"The parameter fields must be given as an array! Use syntax : \"fields\" : [\"field1\", \"field2\",...]");
}
} else if (currentFieldName.equals("offsets")) {
termVectorRequest.offsets(parser.booleanValue());
} else if (currentFieldName.equals("positions")) {
termVectorRequest.positions(parser.booleanValue());
} else if (currentFieldName.equals("payloads")) {
termVectorRequest.payloads(parser.booleanValue());
} else if (currentFieldName.equals("term_statistics") || currentFieldName.equals("termStatistics")) {
termVectorRequest.termStatistics(parser.booleanValue());
} else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) {
termVectorRequest.fieldStatistics(parser.booleanValue());
} else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing.
termVectorRequest.index = parser.text();
} else if ("_type".equals(currentFieldName)) {
termVectorRequest.type = parser.text();
} else if ("_id".equals(currentFieldName)) {
termVectorRequest.id = parser.text();
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
termVectorRequest.routing = parser.text();
} else {
throw new ElasticsearchParseException("The parameter " + currentFieldName
+ " is not valid for term vector request!");
}
}
}
if (fields.size() > 0) {
String[] fieldsAsArray = new String[fields.size()];
termVectorRequest.selectedFields(fields.toArray(fieldsAsArray));
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_termvector_TermVectorRequest.java
|
1,262 |
public class FulfillmentItemPricingActivity extends BaseActivity<PricingContext> {
private static final Log LOG = LogFactory.getLog(FulfillmentItemPricingActivity.class);
protected BroadleafCurrency getCurrency(FulfillmentGroup fg) {
return fg.getOrder().getCurrency();
}
/**
* Returns the order adjustment value or zero if none exists
* @param order
* @return
*/
protected Money getOrderSavingsToDistribute(Order order) {
if (order.getOrderAdjustmentsValue() == null) {
return new Money(order.getCurrency());
} else {
Money adjustmentValue = order.getOrderAdjustmentsValue();
Money orderSubTotal = order.getSubTotal();
if (orderSubTotal == null || orderSubTotal.lessThan(adjustmentValue)) {
if (LOG.isWarnEnabled()) {
LOG.warn("Subtotal is null or less than orderSavings in DistributeOrderSavingsActivity.java. " +
"No distribution is taking place.");
}
return new Money(order.getCurrency());
}
return adjustmentValue;
}
}
@Override
public PricingContext execute(PricingContext context) throws Exception {
Order order = context.getSeedData();
Map<OrderItem,List<FulfillmentGroupItem>> partialOrderItemMap = new HashMap<OrderItem,List<FulfillmentGroupItem>>();
// Calculate the fulfillmentGroupItem total
populateItemTotalAmount(order, partialOrderItemMap);
fixItemTotalRoundingIssues(order, partialOrderItemMap);
// Calculate the fulfillmentGroupItem prorated orderSavings
Money totalAllItemsAmount = calculateTotalPriceForAllFulfillmentItems(order);
Money totalOrderAdjustmentDistributed = distributeOrderSavingsToItems(order, totalAllItemsAmount.getAmount());
fixOrderSavingsRoundingIssues(order, totalOrderAdjustmentDistributed);
// Step 3: Finalize the taxable amounts
updateTaxableAmountsOnItems(order);
context.setSeedData(order);
return context;
}
/**
* Sets the fulfillment amount which includes the relative portion of the total price for
* the corresponding order item.
*
* @param order
* @param partialOrderItemMap
*/
protected void populateItemTotalAmount(Order order, Map<OrderItem, List<FulfillmentGroupItem>> partialOrderItemMap) {
for (FulfillmentGroup fulfillmentGroup : order.getFulfillmentGroups()) {
for (FulfillmentGroupItem fgItem : fulfillmentGroup.getFulfillmentGroupItems()) {
OrderItem orderItem = fgItem.getOrderItem();
int fgItemQty = fgItem.getQuantity();
int orderItemQty = orderItem.getQuantity();
Money totalItemAmount = orderItem.getTotalPrice();
if (fgItemQty != orderItemQty) {
// We need to keep track of all of these items in case we need to distribute a remainder
// to one or more of the items.
List<FulfillmentGroupItem> fgItemList = partialOrderItemMap.get(orderItem);
if (fgItemList == null) {
fgItemList = new ArrayList<FulfillmentGroupItem>();
partialOrderItemMap.put(orderItem, fgItemList);
}
fgItemList.add(fgItem);
fgItem.setTotalItemAmount(totalItemAmount.multiply(fgItemQty).divide(orderItemQty));
} else {
fgItem.setTotalItemAmount(totalItemAmount);
}
}
}
}
/**
* Because an item may have multiple price details that don't round cleanly, we may have pennies
* left over that need to be distributed.
*
* @param order
* @param partialOrderItemMap
*/
protected void fixItemTotalRoundingIssues(Order order, Map<OrderItem, List<FulfillmentGroupItem>> partialOrderItemMap) {
for (OrderItem orderItem : partialOrderItemMap.keySet()) {
Money totalItemAmount = orderItem.getTotalPrice();
Money totalFGItemAmount = sumItemAmount(partialOrderItemMap.get(orderItem), order);
Money amountDiff = totalItemAmount.subtract(totalFGItemAmount);
if (!(amountDiff.getAmount().compareTo(BigDecimal.ZERO) == 0)) {
long numApplicationsNeeded = countNumberOfUnits(amountDiff);
Money unitAmount = getUnitAmount(amountDiff);
for (FulfillmentGroupItem fgItem : partialOrderItemMap.get(orderItem)) {
numApplicationsNeeded = numApplicationsNeeded -
applyDifferenceToAmount(fgItem, numApplicationsNeeded, unitAmount);
if (numApplicationsNeeded == 0) {
break;
}
}
}
}
}
/**
* Returns the total price for all fulfillment items.
* @param order
* @return
*/
protected Money calculateTotalPriceForAllFulfillmentItems(Order order) {
Money totalAllItemsAmount = new Money(order.getCurrency());
for (FulfillmentGroup fulfillmentGroup : order.getFulfillmentGroups()) {
for (FulfillmentGroupItem fgItem : fulfillmentGroup.getFulfillmentGroupItems()) {
totalAllItemsAmount = totalAllItemsAmount.add(fgItem.getTotalItemAmount());
}
}
return totalAllItemsAmount;
}
/**
* Distributes the order adjustments (if any) to the individual fulfillment group items.
* @param order
* @param totalAllItems
* @return
*/
protected Money distributeOrderSavingsToItems(Order order, BigDecimal totalAllItems) {
Money returnAmount = new Money(order.getCurrency());
BigDecimal orderAdjAmt = order.getOrderAdjustmentsValue().getAmount();
for (FulfillmentGroup fulfillmentGroup : order.getFulfillmentGroups()) {
for (FulfillmentGroupItem fgItem : fulfillmentGroup.getFulfillmentGroupItems()) {
BigDecimal fgItemAmount = fgItem.getTotalItemAmount().getAmount();
BigDecimal proratedAdjAmt = totalAllItems.compareTo(BigDecimal.ZERO) == 0 ? totalAllItems : orderAdjAmt.multiply(fgItemAmount).divide(totalAllItems, RoundingMode.FLOOR);
fgItem.setProratedOrderAdjustmentAmount(new Money(proratedAdjAmt, order.getCurrency()));
returnAmount = returnAmount.add(fgItem.getProratedOrderAdjustmentAmount());
}
}
return returnAmount;
}
/**
* It is possible due to rounding that the order adjustments do not match the
* total. This method fixes by adding or removing the pennies.
* @param order
* @param partialOrderItemMap
*/
protected void fixOrderSavingsRoundingIssues(Order order, Money totalOrderAdjustmentDistributed) {
if (!order.getHasOrderAdjustments()) {
return;
}
Money orderAdjustmentTotal = order.getOrderAdjustmentsValue();
Money amountDiff = totalOrderAdjustmentDistributed.subtract(orderAdjustmentTotal);
if (!(amountDiff.getAmount().compareTo(BigDecimal.ZERO) == 0)) {
long numApplicationsNeeded = countNumberOfUnits(amountDiff);
Money unitAmount = getUnitAmount(amountDiff);
for (FulfillmentGroup fulfillmentGroup : order.getFulfillmentGroups()) {
for (FulfillmentGroupItem fgItem : fulfillmentGroup.getFulfillmentGroupItems()) {
numApplicationsNeeded = numApplicationsNeeded -
applyDifferenceToProratedAdj(fgItem, numApplicationsNeeded, unitAmount);
if (numApplicationsNeeded == 0) {
break;
}
}
}
}
}
/**
* Returns the total price for all fulfillment items.
* @param order
* @return
*/
protected void updateTaxableAmountsOnItems(Order order) {
Money zero = new Money(order.getCurrency());
for (FulfillmentGroup fulfillmentGroup : order.getFulfillmentGroups()) {
for (FulfillmentGroupItem fgItem : fulfillmentGroup.getFulfillmentGroupItems()) {
if (fgItem.getOrderItem().isTaxable()) {
Money proratedOrderAdjAmt = fgItem.getProratedOrderAdjustmentAmount();
if (proratedOrderAdjAmt != null) {
fgItem.setTotalItemTaxableAmount(fgItem.getTotalItemAmount().subtract(proratedOrderAdjAmt));
} else {
fgItem.setTotalItemTaxableAmount(fgItem.getTotalItemAmount());
}
} else {
fgItem.setTotalItemTaxableAmount(zero);
}
}
}
}
protected Money sumItemAmount(List<FulfillmentGroupItem> items, Order order) {
Money totalAmount = new Money(order.getCurrency());
for (FulfillmentGroupItem fgItem : items) {
totalAmount = totalAmount.add(fgItem.getTotalItemAmount());
}
return totalAmount;
}
protected Money sumTaxAmount(List<FulfillmentGroupItem> items, Order order) {
Money taxAmount = new Money(order.getCurrency());
for (FulfillmentGroupItem fgItem : items) {
taxAmount = taxAmount.add(fgItem.getTotalItemTaxableAmount());
}
return taxAmount;
}
public long countNumberOfUnits(Money difference) {
double numUnits = difference.multiply(Math.pow(10, difference.getCurrency().getDefaultFractionDigits())).doubleValue();
return Math.round(numUnits);
}
/**
* Returns the unit amount (e.g. .01 for US)
* @param currency
* @return
*/
public Money getUnitAmount(Money difference) {
Currency currency = difference.getCurrency();
BigDecimal divisor = new BigDecimal(Math.pow(10, currency.getDefaultFractionDigits()));
BigDecimal unitAmount = new BigDecimal("1").divide(divisor);
if (difference.lessThan(BigDecimal.ZERO)) {
unitAmount = unitAmount.negate();
}
return new Money(unitAmount, currency);
}
public long applyDifferenceToAmount(FulfillmentGroupItem fgItem, long numApplicationsNeeded, Money unitAmount) {
BigDecimal numTimesToApply = new BigDecimal(Math.min(numApplicationsNeeded, fgItem.getQuantity()));
Money oldAmount = fgItem.getTotalItemAmount();
Money changeToAmount = unitAmount.multiply(numTimesToApply);
fgItem.setTotalItemAmount(oldAmount.add(changeToAmount));
return numTimesToApply.longValue();
}
public long applyDifferenceToProratedAdj(FulfillmentGroupItem fgItem, long numApplicationsNeeded, Money unitAmount) {
BigDecimal numTimesToApply = new BigDecimal(Math.min(numApplicationsNeeded, fgItem.getQuantity()));
Money oldAmount = fgItem.getProratedOrderAdjustmentAmount();
Money changeToAmount = unitAmount.multiply(numTimesToApply);
fgItem.setProratedOrderAdjustmentAmount(oldAmount.add(changeToAmount));
return numTimesToApply.longValue();
}
public long applyTaxDifference(FulfillmentGroupItem fgItem, long numApplicationsNeeded, Money unitAmount) {
BigDecimal numTimesToApply = new BigDecimal(Math.min(numApplicationsNeeded, fgItem.getQuantity()));
Money oldAmount = fgItem.getTotalItemTaxableAmount();
Money changeToAmount = unitAmount.multiply(numTimesToApply);
fgItem.setTotalItemTaxableAmount(oldAmount.add(changeToAmount));
return numTimesToApply.longValue();
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_service_workflow_FulfillmentItemPricingActivity.java
|
3,591 |
public final class TransactionOptions implements DataSerializable {
private long timeoutMillis;
private int durability;
private TransactionType transactionType;
/**
* Creates a new default configured TransactionsOptions.
* <p/>
* It will be configured with a timeout of 2 minutes, durability of 1 and a TransactionType.TWO_PHASE.
*/
public TransactionOptions() {
setTimeout(2, TimeUnit.MINUTES).setDurability(1).setTransactionType(TransactionType.TWO_PHASE);
}
/**
* Gets the {@link TransactionType}.
*
* @return the TransactionType.
*/
public TransactionType getTransactionType() {
return transactionType;
}
/**
* Sets the {@link TransactionType}.
* <p/>
* A local transaction is less safe than a two phase transaction; when a member fails during the commit
* of a local transaction, it could be that some of the changes are committed, while others are not and this
* can leave your system in an inconsistent state.
*
* @param transactionType the new TransactionType.
* @return the updated TransactionOptions.
* @see #getTransactionType()
* @see #setDurability(int)
*/
public TransactionOptions setTransactionType(TransactionType transactionType) {
if (transactionType == null) {
throw new IllegalArgumentException("transactionType can't be null");
}
this.transactionType = transactionType;
return this;
}
/**
* Gets the timeout in milliseconds.
*
* @return the timeout in milliseconds.
* @see #setTimeout(long, java.util.concurrent.TimeUnit)
*/
public long getTimeoutMillis() {
return timeoutMillis;
}
/**
* Sets the timeout.
* <p/>
* The timeout determines the maximum lifespan of a transaction. So if a transaction is configured with a
* timeout of 2 minutes, then it will automatically rollback if it hasn't committed yet.
*
* @param timeout the timeout.
* @param timeUnit the TimeUnit of the timeout.
* @return the updated TransactionOptions
* @throws IllegalArgumentException if timeout smaller or equal than 0, or timeUnit is null.
* @see #getTimeoutMillis()
*/
public TransactionOptions setTimeout(long timeout, TimeUnit timeUnit) {
if (timeout <= 0) {
throw new IllegalArgumentException("Timeout must be positive!");
}
if (timeUnit == null) {
throw new IllegalArgumentException("timeunit can't be null");
}
this.timeoutMillis = timeUnit.toMillis(timeout);
return this;
}
/**
* Gets the transaction durability.
*
* @return the transaction durability.
* @see #setDurability(int)
*/
public int getDurability() {
return durability;
}
/**
* Sets the transaction durability.
* <p/>
* The durability is the number of machines that can take over if a member fails during a transaction
* commit or rollback. This value only has meaning when {@link TransactionType#TWO_PHASE} is selected.
*
* @param durability the durability
* @return the updated TransactionOptions.
* @throws IllegalArgumentException if durability smaller than 0.
*/
public TransactionOptions setDurability(int durability) {
if (durability < 0) {
throw new IllegalArgumentException("Durability cannot be negative!");
}
this.durability = durability;
return this;
}
/**
* Creates a new TransactionOptions configured with default settings.
*
* @return the created default TransactionOptions.
* @see #TransactionOptions()
*/
public static TransactionOptions getDefault() {
return new TransactionOptions();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeLong(timeoutMillis);
out.writeInt(durability);
out.writeInt(transactionType.value);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
timeoutMillis = in.readLong();
durability = in.readInt();
transactionType = TransactionType.getByValue(in.readInt());
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("TransactionOptions");
sb.append("{timeoutMillis=").append(timeoutMillis);
sb.append(", durability=").append(durability);
sb.append(", txType=").append(transactionType.value);
sb.append('}');
return sb.toString();
}
public enum TransactionType {
TWO_PHASE(1), LOCAL(2);
private final int value;
TransactionType(int value) {
this.value = value;
}
public static TransactionType getByValue(int value) {
for (TransactionType type : values()) {
if (type.value == value) {
return type;
}
}
return TWO_PHASE;
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_transaction_TransactionOptions.java
|
30 |
@Service("blLocaleFieldService")
public class LocaleFieldServiceImpl extends AbstractRuleBuilderFieldService {
@Override
public void init() {
fields.add(new FieldData.Builder()
.label("rule_localeName")
.name("friendlyName")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_localeCode")
.name("localeCode")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
}
@Override
public String getName() {
return RuleIdentifier.LOCALE;
}
@Override
public String getDtoClassName() {
return "org.broadleafcommerce.common.locale.domain.LocaleImpl";
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_LocaleFieldServiceImpl.java
|
2,352 |
return new Combiner<KeyIn, ValueIn, List<ValueIn>>() {
private final List<ValueIn> values = new ArrayList<ValueIn>();
@Override
public void combine(KeyIn key, ValueIn value) {
values.add(value);
}
@Override
public List<ValueIn> finalizeChunk() {
List<ValueIn> values = new ArrayList<ValueIn>(this.values);
this.values.clear();
return values;
}
};
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_DefaultContext.java
|
1,191 |
public class BulkUdpService extends AbstractLifecycleComponent<BulkUdpService> {
private final Client client;
private final NetworkService networkService;
private final boolean enabled;
final String host;
final String port;
final ByteSizeValue receiveBufferSize;
final ReceiveBufferSizePredictorFactory receiveBufferSizePredictorFactory;
final int bulkActions;
final ByteSizeValue bulkSize;
final TimeValue flushInterval;
final int concurrentRequests;
private BulkProcessor bulkProcessor;
private ConnectionlessBootstrap bootstrap;
private Channel channel;
@Inject
public BulkUdpService(Settings settings, Client client, NetworkService networkService) {
super(settings);
this.client = client;
this.networkService = networkService;
this.host = componentSettings.get("host");
this.port = componentSettings.get("port", "9700-9800");
this.bulkActions = componentSettings.getAsInt("bulk_actions", 1000);
this.bulkSize = componentSettings.getAsBytesSize("bulk_size", new ByteSizeValue(5, ByteSizeUnit.MB));
this.flushInterval = componentSettings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
this.concurrentRequests = componentSettings.getAsInt("concurrent_requests", 4);
this.receiveBufferSize = componentSettings.getAsBytesSize("receive_buffer_size", new ByteSizeValue(10, ByteSizeUnit.MB));
this.receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory(componentSettings.getAsBytesSize("receive_predictor_size", receiveBufferSize).bytesAsInt());
this.enabled = componentSettings.getAsBoolean("enabled", false);
logger.debug("using enabled [{}], host [{}], port [{}], bulk_actions [{}], bulk_size [{}], flush_interval [{}], concurrent_requests [{}]",
enabled, host, port, bulkActions, bulkSize, flushInterval, concurrentRequests);
}
@Override
protected void doStart() throws ElasticsearchException {
if (!enabled) {
return;
}
bulkProcessor = BulkProcessor.builder(client, new BulkListener())
.setBulkActions(bulkActions)
.setBulkSize(bulkSize)
.setFlushInterval(flushInterval)
.setConcurrentRequests(concurrentRequests)
.build();
bootstrap = new ConnectionlessBootstrap(new NioDatagramChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, "bulk_udp_worker"))));
bootstrap.setOption("receiveBufferSize", receiveBufferSize.bytesAsInt());
bootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
// Enable broadcast
bootstrap.setOption("broadcast", "false");
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(new Handler());
}
});
InetAddress hostAddressX;
try {
hostAddressX = networkService.resolveBindHostAddress(host);
} catch (IOException e) {
logger.warn("failed to resolve host {}", e, host);
return;
}
final InetAddress hostAddress = hostAddressX;
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
try {
channel = bootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
} catch (Exception e) {
lastException.set(e);
return false;
}
return true;
}
});
if (!success) {
logger.warn("failed to bind to {}/{}", lastException.get(), hostAddress, port);
return;
}
logger.info("address {}", channel.getLocalAddress());
}
@Override
protected void doStop() throws ElasticsearchException {
if (!enabled) {
return;
}
if (channel != null) {
channel.close().awaitUninterruptibly();
}
if (bootstrap != null) {
bootstrap.releaseExternalResources();
}
bulkProcessor.close();
}
@Override
protected void doClose() throws ElasticsearchException {
}
class Handler extends SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
ChannelBuffer buffer = (ChannelBuffer) e.getMessage();
logger.trace("received message size [{}]", buffer.readableBytes());
try {
bulkProcessor.add(new ChannelBufferBytesReference(buffer), false, null, null);
} catch (Exception e1) {
logger.warn("failed to execute bulk request", e1);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
if (e.getCause() instanceof BindException) {
// ignore, this happens when we retry binding to several ports, its fine if we fail...
return;
}
logger.warn("failure caught", e.getCause());
}
}
class BulkListener implements BulkProcessor.Listener {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] executing [{}]/[{}]", executionId, request.numberOfActions(), new ByteSizeValue(request.estimatedSizeInBytes()));
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] executed [{}]/[{}], took [{}]", executionId, request.numberOfActions(), new ByteSizeValue(request.estimatedSizeInBytes()), response.getTook());
}
if (response.hasFailures()) {
logger.warn("[{}] failed to execute bulk request: {}", executionId, response.buildFailureMessage());
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable e) {
logger.warn("[{}] failed to execute bulk request", e, executionId);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_bulk_udp_BulkUdpService.java
|
36 |
@Service("blTimeFieldService")
public class TimeFieldServiceImpl extends AbstractRuleBuilderFieldService {
@Override
public void init() {
fields.add(new FieldData.Builder()
.label("rule_timeHourOfDay")
.name("hour")
.operators("blcOperators_Enumeration")
.options("blcOptions_HourOfDay")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_timeDayOfWeek")
.name("dayOfWeek")
.operators("blcOperators_Enumeration")
.options("blcOptions_DayOfWeek")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_timeMonth")
.name("month")
.operators("blcOperators_Enumeration")
.options("blcOptions_Month")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_timeDayOfMonth")
.name("dayOfMonth")
.operators("blcOperators_Enumeration")
.options("blcOptions_DayOfMonth")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_timeMinute")
.name("minute")
.operators("blcOperators_Enumeration")
.options("blcOptions_Minute")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_timeDate")
.name("date")
.operators("blcOperators_Date")
.options("[]")
.type(SupportedFieldType.DATE)
.build());
}
@Override
public String getName() {
return RuleIdentifier.TIME;
}
@Override
public String getDtoClassName() {
return "org.broadleafcommerce.common.TimeDTO";
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_TimeFieldServiceImpl.java
|
875 |
public class KryoSerializer {
public static final int DEFAULT_MAX_OUTPUT_SIZE = 10 * 1024 * 1024; // 10 MB in bytes
public static final int KRYO_ID_OFFSET = 50;
private final boolean registerRequired;
private final ThreadLocal<Kryo> kryos;
private final Map<Integer,TypeRegistration> registrations;
private final int maxOutputSize;
private static final StaticBuffer.Factory<Input> INPUT_FACTORY = new StaticBuffer.Factory<Input>() {
@Override
public Input get(byte[] array, int offset, int limit) {
//Needs to copy array - otherwise we see BufferUnderflow exceptions from concurrent access
//See https://github.com/EsotericSoftware/kryo#threading
return new Input(Arrays.copyOfRange(array,offset,limit));
}
};
public KryoSerializer(final List<Class> defaultRegistrations) {
this(defaultRegistrations, false);
}
public KryoSerializer(final List<Class> defaultRegistrations, boolean registrationRequired) {
this(defaultRegistrations, registrationRequired, DEFAULT_MAX_OUTPUT_SIZE);
}
public KryoSerializer(final List<Class> defaultRegistrations, boolean registrationRequired, int maxOutputSize) {
this.maxOutputSize = maxOutputSize;
this.registerRequired = registrationRequired;
this.registrations = new HashMap<Integer,TypeRegistration>();
for (Class clazz : defaultRegistrations) {
// Preconditions.checkArgument(isValidClass(clazz),"Class does not have a default constructor: %s",clazz.getName());
objectVerificationCache.put(clazz,Boolean.TRUE);
}
kryos = new ThreadLocal<Kryo>() {
public Kryo initialValue() {
Kryo k = new Kryo();
k.setRegistrationRequired(registerRequired);
k.register(Class.class,new DefaultSerializers.ClassSerializer());
for (int i=0;i<defaultRegistrations.size();i++) {
Class clazz = defaultRegistrations.get(i);
k.register(clazz, KRYO_ID_OFFSET + i);
}
return k;
}
};
}
Kryo getKryo() {
return kryos.get();
}
public Object readClassAndObject(ReadBuffer buffer) {
Input i = buffer.asRelative(INPUT_FACTORY);
int startPos = i.position();
Object value = getKryo().readClassAndObject(i);
buffer.movePositionTo(buffer.getPosition()+i.position()-startPos);
return value;
}
// public <T> T readObject(ReadBuffer buffer, Class<T> type) {
// Input i = buffer.asRelative(INPUT_FACTORY);
// int startPos = i.position();
// T value = getKryo().readObjectOrNull(i, type);
// buffer.movePositionTo(buffer.getPosition()+i.position()-startPos);
// return value;
// }
public <T> T readObjectNotNull(ReadBuffer buffer, Class<T> type) {
Input i = buffer.asRelative(INPUT_FACTORY);
int startPos = i.position();
T value = getKryo().readObject(i, type);
buffer.movePositionTo(buffer.getPosition()+i.position()-startPos);
return value;
}
private Output getOutput(Object object) {
return new Output(128,maxOutputSize);
}
private void writeOutput(WriteBuffer out, Output output) {
byte[] array = output.getBuffer();
int limit = output.position();
for (int i=0;i<limit;i++) out.putByte(array[i]);
}
// public void writeObject(WriteBuffer out, Object object, Class<?> type) {
// Preconditions.checkArgument(isValidObject(object), "Cannot de-/serialize object: %s", object);
// Output output = getOutput(object);
// getKryo().writeObjectOrNull(output, object, type);
// writeOutput(out,output);
// }
public void writeObjectNotNull(WriteBuffer out, Object object) {
Preconditions.checkNotNull(object);
Preconditions.checkArgument(isValidObject(object), "Cannot de-/serialize object: %s", object);
Output output = getOutput(object);
getKryo().writeObject(output, object);
writeOutput(out,output);
}
public void writeClassAndObject(WriteBuffer out, Object object) {
Preconditions.checkArgument(isValidObject(object), "Cannot de-/serialize object: %s", object);
Output output = getOutput(object);
getKryo().writeClassAndObject(output, object);
writeOutput(out,output);
}
private final Cache<Class<?>,Boolean> objectVerificationCache = CacheBuilder.newBuilder()
.maximumSize(10000).concurrencyLevel(4).initialCapacity(32).build();
final boolean isValidObject(final Object o) {
if (o==null) return true;
Boolean status = objectVerificationCache.getIfPresent(o.getClass());
if (status==null) {
Kryo kryo = getKryo();
if (!(kryo.getSerializer(o.getClass()) instanceof FieldSerializer)) status=Boolean.TRUE;
else if (!isValidClass(o.getClass())) status=Boolean.FALSE;
else {
try {
Output out = new Output(128, maxOutputSize);
kryo.writeClassAndObject(out,o);
Input in = new Input(out.getBuffer(),0,out.position());
Object ocopy = kryo.readClassAndObject(in);
status=(o.equals(ocopy)?Boolean.TRUE:Boolean.FALSE);
} catch (Throwable e) {
status=Boolean.FALSE;
}
}
objectVerificationCache.put(o.getClass(),status);
}
return status;
}
public static final boolean isValidClass(Class<?> type) {
if (type.isPrimitive()) return true;
else if (Enum.class.isAssignableFrom(type)) return true;
else if (type.isArray()) {
return isValidClass(type.getComponentType());
} else {
for (Constructor c : type.getDeclaredConstructors()) {
if (c.getParameterTypes().length==0) return true;
}
return false;
}
}
private static class TypeRegistration {
final Class type;
final com.esotericsoftware.kryo.Serializer serializer;
TypeRegistration(Class type, com.esotericsoftware.kryo.Serializer serializer) {
this.type=type;
this.serializer=serializer;
}
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_database_serialize_kryo_KryoSerializer.java
|
763 |
public class ListSetBackupOperation extends CollectionOperation implements BackupOperation {
private long oldItemId;
private long itemId;
private Data value;
public ListSetBackupOperation() {
}
public ListSetBackupOperation(String name, long oldItemId, long itemId, Data value) {
super(name);
this.oldItemId = oldItemId;
this.itemId = itemId;
this.value = value;
}
@Override
public int getId() {
return CollectionDataSerializerHook.LIST_SET_BACKUP;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
getOrCreateListContainer().setBackup(oldItemId, itemId, value);
}
@Override
public void afterRun() throws Exception {
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(oldItemId);
out.writeLong(itemId);
value.writeData(out);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
oldItemId = in.readLong();
itemId = in.readLong();
value = new Data();
value.readData(in);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_list_ListSetBackupOperation.java
|
26 |
static final class RunAfterBoth extends Completion {
final CompletableFuture<?> src;
final CompletableFuture<?> snd;
final Runnable fn;
final CompletableFuture<Void> dst;
final Executor executor;
RunAfterBoth(CompletableFuture<?> src,
CompletableFuture<?> snd,
Runnable fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.snd = snd;
this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<?> a;
final CompletableFuture<?> b;
final Runnable fn;
final CompletableFuture<Void> dst;
Object r, s; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
(b = this.snd) != null &&
(s = b.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult)
ex = ((AltResult)r).ex;
else
ex = null;
if (ex == null && (s instanceof AltResult))
ex = ((AltResult)s).ex;
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncRun(fn, dst));
else
fn.run();
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
653 |
public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOperationAction<GetIndexTemplatesRequest, GetIndexTemplatesResponse> {
@Inject
public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
}
@Override
protected String transportAction() {
return GetIndexTemplatesAction.NAME;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected GetIndexTemplatesRequest newRequest() {
return new GetIndexTemplatesRequest();
}
@Override
protected GetIndexTemplatesResponse newResponse() {
return new GetIndexTemplatesResponse();
}
@Override
protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) throws ElasticsearchException {
List<IndexTemplateMetaData> results;
// If we did not ask for a specific name, then we return all templates
if (request.names().length == 0) {
results = Lists.newArrayList(state.metaData().templates().values().toArray(IndexTemplateMetaData.class));
} else {
results = Lists.newArrayList();
}
for (String name : request.names()) {
if (Regex.isSimpleMatchPattern(name)) {
for (ObjectObjectCursor<String, IndexTemplateMetaData> entry : state.metaData().templates()) {
if (Regex.simpleMatch(name, entry.key)) {
results.add(entry.value);
}
}
} else if (state.metaData().templates().containsKey(name)) {
results.add(state.metaData().templates().get(name));
}
}
listener.onResponse(new GetIndexTemplatesResponse(results));
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_template_get_TransportGetIndexTemplatesAction.java
|
3,248 |
public class AtomicLongPermission extends InstancePermission {
private static final int READ = 0x4;
private static final int MODIFY = 0x8;
private static final int ALL = READ | MODIFY | CREATE | DESTROY;
public AtomicLongPermission(String name, String... actions) {
super(name, actions);
}
@Override
protected int initMask(String[] actions) {
int mask = NONE;
for (String action : actions) {
if (ActionConstants.ACTION_ALL.equals(action)) {
return ALL;
}
if (ActionConstants.ACTION_CREATE.equals(action)) {
mask |= CREATE;
} else if (ActionConstants.ACTION_READ.equals(action)) {
mask |= READ;
} else if (ActionConstants.ACTION_MODIFY.equals(action)) {
mask |= MODIFY;
} else if (ActionConstants.ACTION_DESTROY.equals(action)) {
mask |= DESTROY;
}
}
return mask;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_security_permission_AtomicLongPermission.java
|
3,387 |
public class EventServiceImpl implements EventService {
private static final EventRegistration[] EMPTY_REGISTRATIONS = new EventRegistration[0];
private final ILogger logger;
private final NodeEngineImpl nodeEngine;
private final ConcurrentMap<String, EventServiceSegment> segments;
private final StripedExecutor eventExecutor;
private final int eventQueueTimeoutMs;
private final int eventThreadCount;
private final int eventQueueCapacity;
EventServiceImpl(NodeEngineImpl nodeEngine) {
this.nodeEngine = nodeEngine;
this.logger = nodeEngine.getLogger(EventService.class.getName());
final Node node = nodeEngine.getNode();
GroupProperties groupProperties = node.getGroupProperties();
this.eventThreadCount = groupProperties.EVENT_THREAD_COUNT.getInteger();
this.eventQueueCapacity = groupProperties.EVENT_QUEUE_CAPACITY.getInteger();
this.eventQueueTimeoutMs = groupProperties.EVENT_QUEUE_TIMEOUT_MILLIS.getInteger();
this.eventExecutor = new StripedExecutor(
node.getLogger(EventServiceImpl.class),
node.getThreadNamePrefix("event"),
node.threadGroup,
eventThreadCount,
eventQueueCapacity);
this.segments = new ConcurrentHashMap<String, EventServiceSegment>();
}
@Override
public int getEventThreadCount() {
return eventThreadCount;
}
@Override
public int getEventQueueCapacity() {
return eventQueueCapacity;
}
@Override
public int getEventQueueSize() {
return eventExecutor.getWorkQueueSize();
}
@Override
public EventRegistration registerLocalListener(String serviceName, String topic, Object listener) {
return registerListenerInternal(serviceName, topic, new EmptyFilter(), listener, true);
}
@Override
public EventRegistration registerLocalListener(String serviceName, String topic, EventFilter filter, Object listener) {
return registerListenerInternal(serviceName, topic, filter, listener, true);
}
@Override
public EventRegistration registerListener(String serviceName, String topic, Object listener) {
return registerListenerInternal(serviceName, topic, new EmptyFilter(), listener, false);
}
@Override
public EventRegistration registerListener(String serviceName, String topic, EventFilter filter, Object listener) {
return registerListenerInternal(serviceName, topic, filter, listener, false);
}
private EventRegistration registerListenerInternal(String serviceName, String topic, EventFilter filter,
Object listener, boolean localOnly) {
if (listener == null) {
throw new IllegalArgumentException("Listener required!");
}
if (filter == null) {
throw new IllegalArgumentException("EventFilter required!");
}
EventServiceSegment segment = getSegment(serviceName, true);
Registration reg = new Registration(UUID.randomUUID().toString(), serviceName, topic, filter,
nodeEngine.getThisAddress(), listener, localOnly);
if (segment.addRegistration(topic, reg)) {
if (!localOnly) {
invokeRegistrationOnOtherNodes(serviceName, reg);
}
return reg;
} else {
return null;
}
}
private boolean handleRegistration(Registration reg) {
if (nodeEngine.getThisAddress().equals(reg.getSubscriber())) {
return false;
}
EventServiceSegment segment = getSegment(reg.serviceName, true);
return segment.addRegistration(reg.topic, reg);
}
@Override
public boolean deregisterListener(String serviceName, String topic, Object id) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
final Registration reg = segment.removeRegistration(topic, String.valueOf(id));
if (reg != null && !reg.isLocalOnly()) {
invokeDeregistrationOnOtherNodes(serviceName, topic, String.valueOf(id));
}
return reg != null;
}
return false;
}
@Override
public void deregisterAllListeners(String serviceName, String topic) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
segment.removeRegistrations(topic);
}
}
private void deregisterSubscriber(String serviceName, String topic, String id) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
segment.removeRegistration(topic, id);
}
}
private void invokeRegistrationOnOtherNodes(String serviceName, Registration reg) {
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
Collection<Future> calls = new ArrayList<Future>(members.size());
for (MemberImpl member : members) {
if (!member.localMember()) {
Future f = nodeEngine.getOperationService().invokeOnTarget(serviceName,
new RegistrationOperation(reg), member.getAddress());
calls.add(f);
}
}
for (Future f : calls) {
try {
f.get(5, TimeUnit.SECONDS);
} catch (InterruptedException ignored) {
} catch (TimeoutException ignored) {
} catch (MemberLeftException e) {
logger.finest("Member left while registering listener...", e);
} catch (ExecutionException e) {
throw new HazelcastException(e);
}
}
}
private void invokeDeregistrationOnOtherNodes(String serviceName, String topic, String id) {
Collection<MemberImpl> members = nodeEngine.getClusterService().getMemberList();
Collection<Future> calls = new ArrayList<Future>(members.size());
for (MemberImpl member : members) {
if (!member.localMember()) {
Future f = nodeEngine.getOperationService().invokeOnTarget(serviceName,
new DeregistrationOperation(topic, id), member.getAddress());
calls.add(f);
}
}
for (Future f : calls) {
try {
f.get(5, TimeUnit.SECONDS);
} catch (InterruptedException ignored) {
} catch (TimeoutException ignored) {
} catch (MemberLeftException e) {
logger.finest("Member left while de-registering listener...", e);
} catch (ExecutionException e) {
throw new HazelcastException(e);
}
}
}
@Override
public EventRegistration[] getRegistrationsAsArray(String serviceName, String topic) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
final Collection<Registration> registrations = segment.getRegistrations(topic, false);
return registrations != null && !registrations.isEmpty()
? registrations.toArray(new Registration[registrations.size()])
: EMPTY_REGISTRATIONS;
}
return EMPTY_REGISTRATIONS;
}
@Override
public Collection<EventRegistration> getRegistrations(String serviceName, String topic) {
final EventServiceSegment segment = getSegment(serviceName, false);
if (segment != null) {
final Collection<Registration> registrations = segment.getRegistrations(topic, false);
return registrations != null && !registrations.isEmpty()
? Collections.<EventRegistration>unmodifiableCollection(registrations)
: Collections.<EventRegistration>emptySet();
}
return Collections.emptySet();
}
@Override
public void publishEvent(String serviceName, EventRegistration registration, Object event, int orderKey) {
if (!(registration instanceof Registration)) {
throw new IllegalArgumentException();
}
final Registration reg = (Registration) registration;
if (isLocal(reg)) {
executeLocal(serviceName, event, reg, orderKey);
} else {
final Address subscriber = registration.getSubscriber();
sendEventPacket(subscriber, new EventPacket(registration.getId(), serviceName, event), orderKey);
}
}
@Override
public void publishEvent(String serviceName, Collection<EventRegistration> registrations, Object event, int orderKey) {
final Iterator<EventRegistration> iter = registrations.iterator();
Data eventData = null;
while (iter.hasNext()) {
EventRegistration registration = iter.next();
if (!(registration instanceof Registration)) {
throw new IllegalArgumentException();
}
final Registration reg = (Registration) registration;
if (isLocal(reg)) {
executeLocal(serviceName, event, reg, orderKey);
} else {
if (eventData == null) {
eventData = nodeEngine.toData(event);
}
final Address subscriber = registration.getSubscriber();
sendEventPacket(subscriber, new EventPacket(registration.getId(), serviceName, eventData), orderKey);
}
}
}
private void executeLocal(String serviceName, Object event, Registration reg, int orderKey) {
if (nodeEngine.isActive()) {
try {
if (reg.listener != null) {
eventExecutor.execute(new LocalEventDispatcher(serviceName, event, reg.listener, orderKey, eventQueueTimeoutMs));
} else {
logger.warning("Something seems wrong! Listener instance is null! -> " + reg);
}
} catch (RejectedExecutionException e) {
if (eventExecutor.isLive()) {
logger.warning("EventQueue overloaded! " + event + " failed to publish to " + reg.serviceName + ":" + reg.topic);
}
}
}
}
private void sendEventPacket(Address subscriber, EventPacket eventPacket, int orderKey) {
final String serviceName = eventPacket.serviceName;
final EventServiceSegment segment = getSegment(serviceName, true);
boolean sync = segment.incrementPublish() % 100000 == 0;
if (sync) {
Future f = nodeEngine.getOperationService().createInvocationBuilder(serviceName,
new SendEventOperation(eventPacket, orderKey), subscriber).setTryCount(50).invoke();
try {
f.get(3, TimeUnit.SECONDS);
} catch (Exception ignored) {
}
} else {
final Packet packet = new Packet(nodeEngine.toData(eventPacket), orderKey, nodeEngine.getSerializationContext());
packet.setHeader(Packet.HEADER_EVENT);
nodeEngine.send(packet, subscriber);
}
}
private EventServiceSegment getSegment(String service, boolean forceCreate) {
EventServiceSegment segment = segments.get(service);
if (segment == null && forceCreate) {
return ConcurrencyUtil.getOrPutIfAbsent(segments, service, new ConstructorFunction<String, EventServiceSegment>() {
public EventServiceSegment createNew(String key) {
return new EventServiceSegment(key);
}
});
}
return segment;
}
private boolean isLocal(Registration reg) {
return nodeEngine.getThisAddress().equals(reg.getSubscriber());
}
@PrivateApi
void executeEvent(Runnable eventRunnable) {
if (nodeEngine.isActive()) {
try {
eventExecutor.execute(eventRunnable);
} catch (RejectedExecutionException e) {
if (eventExecutor.isLive()) {
logger.warning("EventQueue overloaded! Failed to execute event process: " + eventRunnable);
}
}
}
}
@PrivateApi
void handleEvent(Packet packet) {
try {
eventExecutor.execute(new RemoteEventPacketProcessor(packet));
} catch (RejectedExecutionException e) {
if (eventExecutor.isLive()) {
final Connection conn = packet.getConn();
String endpoint = conn.getEndPoint() != null ? conn.getEndPoint().toString() : conn.toString();
logger.warning("EventQueue overloaded! Failed to process event packet sent from: " + endpoint);
}
}
}
public PostJoinRegistrationOperation getPostJoinOperation() {
final Collection<Registration> registrations = new LinkedList<Registration>();
for (EventServiceSegment segment : segments.values()) {
for (Registration reg : segment.registrationIdMap.values()) {
if (!reg.isLocalOnly()) {
registrations.add(reg);
}
}
}
return registrations.isEmpty() ? null : new PostJoinRegistrationOperation(registrations);
}
void shutdown() {
logger.finest("Stopping event executor...");
eventExecutor.shutdown();
for (EventServiceSegment segment : segments.values()) {
segment.clear();
}
segments.clear();
}
void onMemberLeft(MemberImpl member) {
final Address address = member.getAddress();
for (EventServiceSegment segment : segments.values()) {
segment.onMemberLeft(address);
}
}
private static class EventServiceSegment {
final String serviceName;
final ConcurrentMap<String, Collection<Registration>> registrations
= new ConcurrentHashMap<String, Collection<Registration>>();
final ConcurrentMap<String, Registration> registrationIdMap = new ConcurrentHashMap<String, Registration>();
final AtomicInteger totalPublishes = new AtomicInteger();
EventServiceSegment(String serviceName) {
this.serviceName = serviceName;
}
private Collection<Registration> getRegistrations(String topic, boolean forceCreate) {
Collection<Registration> listenerList = registrations.get(topic);
if (listenerList == null && forceCreate) {
return ConcurrencyUtil.getOrPutIfAbsent(registrations, topic, new ConstructorFunction<String, Collection<Registration>>() {
public Collection<Registration> createNew(String key) {
return Collections.newSetFromMap(new ConcurrentHashMap<Registration, Boolean>());
}
});
}
return listenerList;
}
private boolean addRegistration(String topic, Registration registration) {
final Collection<Registration> registrations = getRegistrations(topic, true);
if (registrations.add(registration)) {
registrationIdMap.put(registration.id, registration);
return true;
}
return false;
}
private Registration removeRegistration(String topic, String id) {
final Registration registration = registrationIdMap.remove(id);
if (registration != null) {
final Collection<Registration> all = registrations.get(topic);
if (all != null) {
all.remove(registration);
}
}
return registration;
}
void removeRegistrations(String topic) {
final Collection<Registration> all = registrations.remove(topic);
if (all != null) {
for (Registration reg : all) {
registrationIdMap.remove(reg.getId());
}
}
}
void clear() {
registrations.clear();
registrationIdMap.clear();
}
void onMemberLeft(Address address) {
for (Collection<Registration> all : registrations.values()) {
Iterator<Registration> iter = all.iterator();
while (iter.hasNext()) {
Registration reg = iter.next();
if (address.equals(reg.getSubscriber())) {
iter.remove();
registrationIdMap.remove(reg.id);
}
}
}
}
int incrementPublish() {
return totalPublishes.incrementAndGet();
}
}
private class EventPacketProcessor implements StripedRunnable {
private EventPacket eventPacket;
int orderKey;
private EventPacketProcessor() {
}
public EventPacketProcessor(EventPacket packet, int orderKey) {
this.eventPacket = packet;
this.orderKey = orderKey;
}
@Override
public void run() {
process(eventPacket);
}
void process(EventPacket eventPacket) {
Object eventObject = eventPacket.event;
if (eventObject instanceof Data) {
eventObject = nodeEngine.toObject(eventObject);
}
final String serviceName = eventPacket.serviceName;
EventPublishingService<Object, Object> service = nodeEngine.getService(serviceName);
if (service == null) {
if (nodeEngine.isActive()) {
logger.warning("There is no service named: " + serviceName);
}
return;
}
EventServiceSegment segment = getSegment(serviceName, false);
if (segment == null) {
if (nodeEngine.isActive()) {
logger.warning("No service registration found for " + serviceName);
}
return;
}
Registration registration = segment.registrationIdMap.get(eventPacket.id);
if (registration == null) {
if (nodeEngine.isActive()) {
if (logger.isFinestEnabled()) {
logger.finest("No registration found for " + serviceName + " / " + eventPacket.id);
}
}
return;
}
if (!isLocal(registration)) {
logger.severe("Invalid target for " + registration);
return;
}
if (registration.listener == null) {
logger.warning("Something seems wrong! Subscriber is local but listener instance is null! -> " + registration);
return;
}
service.dispatchEvent(eventObject, registration.listener);
}
@Override
public int getKey() {
return orderKey;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("EventPacketProcessor{");
sb.append("eventPacket=").append(eventPacket);
sb.append('}');
return sb.toString();
}
}
private class RemoteEventPacketProcessor extends EventPacketProcessor implements StripedRunnable {
private Packet packet;
public RemoteEventPacketProcessor(Packet packet) {
this.packet = packet;
this.orderKey = packet.getPartitionId();
}
@Override
public void run() {
Data data = packet.getData();
EventPacket eventPacket = (EventPacket) nodeEngine.toObject(data);
process(eventPacket);
}
}
private class LocalEventDispatcher implements StripedRunnable, TimeoutRunnable {
final String serviceName;
final Object event;
final Object listener;
final int orderKey;
final long timeoutMs;
private LocalEventDispatcher(String serviceName, Object event, Object listener, int orderKey, long timeoutMs) {
this.serviceName = serviceName;
this.event = event;
this.listener = listener;
this.orderKey = orderKey;
this.timeoutMs = timeoutMs;
}
@Override
public long getTimeout() {
return timeoutMs;
}
@Override
public TimeUnit getTimeUnit() {
return TimeUnit.MILLISECONDS;
}
@Override
public final void run() {
final EventPublishingService<Object, Object> service = nodeEngine.getService(serviceName);
if (service != null) {
service.dispatchEvent(event, listener);
} else {
if (nodeEngine.isActive()) {
throw new IllegalArgumentException("Service[" + serviceName + "] could not be found!");
}
}
}
@Override
public int getKey() {
return orderKey;
}
}
public static class Registration implements EventRegistration {
private String id;
private String serviceName;
private String topic;
private EventFilter filter;
private Address subscriber;
private transient boolean localOnly;
private transient Object listener;
public Registration() {
}
public Registration(String id, String serviceName, String topic,
EventFilter filter, Address subscriber, Object listener, boolean localOnly) {
this.filter = filter;
this.id = id;
this.listener = listener;
this.serviceName = serviceName;
this.topic = topic;
this.subscriber = subscriber;
this.localOnly = localOnly;
}
@Override
public EventFilter getFilter() {
return filter;
}
@Override
public String getId() {
return id;
}
@Override
public Address getSubscriber() {
return subscriber;
}
@Override
public boolean isLocalOnly() {
return localOnly;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Registration that = (Registration) o;
if (id != null ? !id.equals(that.id) : that.id != null) return false;
if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
if (topic != null ? !topic.equals(that.topic) : that.topic != null) return false;
if (filter != null ? !filter.equals(that.filter) : that.filter != null) return false;
if (subscriber != null ? !subscriber.equals(that.subscriber) : that.subscriber != null) return false;
return true;
}
@Override
public int hashCode() {
int result = id != null ? id.hashCode() : 0;
result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
result = 31 * result + (topic != null ? topic.hashCode() : 0);
result = 31 * result + (filter != null ? filter.hashCode() : 0);
result = 31 * result + (subscriber != null ? subscriber.hashCode() : 0);
return result;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(id);
out.writeUTF(serviceName);
out.writeUTF(topic);
subscriber.writeData(out);
out.writeObject(filter);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
id = in.readUTF();
serviceName = in.readUTF();
topic = in.readUTF();
subscriber = new Address();
subscriber.readData(in);
filter = in.readObject();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Registration");
sb.append("{filter=").append(filter);
sb.append(", id='").append(id).append('\'');
sb.append(", serviceName='").append(serviceName).append('\'');
sb.append(", subscriber=").append(subscriber);
sb.append(", listener=").append(listener);
sb.append('}');
return sb.toString();
}
}
public final static class EventPacket implements IdentifiedDataSerializable {
private String id;
private String serviceName;
private Object event;
public EventPacket() {
}
EventPacket(String id, String serviceName, Object event) {
this.event = event;
this.id = id;
this.serviceName = serviceName;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(id);
out.writeUTF(serviceName);
out.writeObject(event);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
id = in.readUTF();
serviceName = in.readUTF();
event = in.readObject();
}
@Override
public int getFactoryId() {
return SpiDataSerializerHook.F_ID;
}
@Override
public int getId() {
return SpiDataSerializerHook.EVENT_PACKET;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("EventPacket{");
sb.append("id='").append(id).append('\'');
sb.append(", serviceName='").append(serviceName).append('\'');
sb.append(", event=").append(event);
sb.append('}');
return sb.toString();
}
}
public static final class EmptyFilter implements EventFilter, DataSerializable {
public boolean eval(Object arg) {
return true;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
}
@Override
public void readData(ObjectDataInput in) throws IOException {
}
@Override
public boolean equals(Object obj) {
return obj instanceof EmptyFilter;
}
@Override
public int hashCode() {
return 0;
}
}
public static class SendEventOperation extends AbstractOperation {
private EventPacket eventPacket;
private int orderKey;
public SendEventOperation() {
}
public SendEventOperation(EventPacket eventPacket, int orderKey) {
this.eventPacket = eventPacket;
this.orderKey = orderKey;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
eventService.executeEvent(eventService.new EventPacketProcessor(eventPacket, orderKey));
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
eventPacket.writeData(out);
out.writeInt(orderKey);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
eventPacket = new EventPacket();
eventPacket.readData(in);
orderKey = in.readInt();
}
}
public static class RegistrationOperation extends AbstractOperation {
private Registration registration;
private boolean response = false;
public RegistrationOperation() {
}
private RegistrationOperation(Registration registration) {
this.registration = registration;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
response = eventService.handleRegistration(registration);
}
@Override
public Object getResponse() {
return response;
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
registration.writeData(out);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
registration = new Registration();
registration.readData(in);
}
}
public static class DeregistrationOperation extends AbstractOperation {
private String topic;
private String id;
DeregistrationOperation() {
}
private DeregistrationOperation(String topic, String id) {
this.topic = topic;
this.id = id;
}
@Override
public void run() throws Exception {
EventServiceImpl eventService = (EventServiceImpl) getNodeEngine().getEventService();
eventService.deregisterSubscriber(getServiceName(), topic, id);
}
@Override
public Object getResponse() {
return true;
}
@Override
public boolean returnsResponse() {
return true;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
out.writeUTF(topic);
out.writeUTF(id);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
topic = in.readUTF();
id = in.readUTF();
}
}
public static class PostJoinRegistrationOperation extends AbstractOperation {
private Collection<Registration> registrations;
public PostJoinRegistrationOperation() {
}
public PostJoinRegistrationOperation(Collection<Registration> registrations) {
this.registrations = registrations;
}
@Override
public void run() throws Exception {
if (registrations != null && registrations.size() > 0) {
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
EventServiceImpl eventService = nodeEngine.eventService;
for (Registration reg : registrations) {
eventService.handleRegistration(reg);
}
}
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
int len = registrations != null ? registrations.size() : 0;
out.writeInt(len);
if (len > 0) {
for (Registration reg : registrations) {
reg.writeData(out);
}
}
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
int len = in.readInt();
if (len > 0) {
registrations = new ArrayList<Registration>(len);
for (int i = 0; i < len; i++) {
Registration reg = new Registration();
registrations.add(reg);
reg.readData(in);
}
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_EventServiceImpl.java
|
40 |
public enum Cardinality {
/**
* Only a single value may be associated with the given key.
*/
SINGLE,
/**
* Multiple values and duplicate values may be associated with the given key.
*/
LIST,
/**
* Multiple but distinct values may be associated with the given key.
*/
SET;
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_Cardinality.java
|
812 |
public abstract class AbstractAlterOperation extends AtomicLongBackupAwareOperation {
protected IFunction<Long, Long> function;
protected long response;
protected long backup;
public AbstractAlterOperation() {
}
public AbstractAlterOperation(String name, IFunction<Long, Long> function) {
super(name);
this.function = function;
}
@Override
public Object getResponse() {
return response;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeObject(function);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
function = in.readObject();
}
@Override
public Operation getBackupOperation() {
return new SetBackupOperation(name, backup);
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomiclong_operations_AbstractAlterOperation.java
|
1,498 |
public class AllocationService extends AbstractComponent {
private final AllocationDeciders allocationDeciders;
private final ClusterInfoService clusterInfoService;
private final ShardsAllocators shardsAllocators;
@Inject
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) {
super(settings);
this.allocationDeciders = allocationDeciders;
this.shardsAllocators = shardsAllocators;
this.clusterInfoService = clusterInfoService;
}
/**
* Applies the started shards. Note, shards can be called several times within this method.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.</p>
*/
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards) {
return applyStartedShards(clusterState, startedShards, true);
}
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<? extends ShardRouting> startedShards, boolean withReroute) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
boolean changed = applyStartedShards(routingNodes, startedShards);
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
shardsAllocators.applyStartedShards(allocation);
if (withReroute) {
reroute(allocation);
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
return applyFailedShards(clusterState, ImmutableList.of(failedShard));
}
/**
* Applies the failed shards. Note, shards can be called several times within this method.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.</p>
*/
public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List<ShardRouting> failedShards) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo());
boolean changed = false;
for (ShardRouting failedShard : failedShards) {
changed |= applyFailedShard(allocation, failedShard, true);
}
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
shardsAllocators.applyFailedShards(allocation);
reroute(allocation);
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
return reroute(clusterState, commands, false);
}
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean debug) throws ElasticsearchException {
RoutingNodes routingNodes = clusterState.routingNodes();
// we don't shuffle the unassigned shards here, to try and get as close as possible to
// a consistent result of the effect the commands have on the routing
// this allows systems to dry run the commands, see the resulting cluster state, and act on it
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
allocation.debugDecision(debug);
// we ignore disable allocation, because commands are explicit
allocation.ignoreDisable(true);
commands.execute(allocation);
// we revert the ignore disable flag, since when rerouting, we want the original setting to take place
allocation.ignoreDisable(false);
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
/**
* Reroutes the routing table based on the live nodes.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.
*/
public RoutingAllocation.Result reroute(ClusterState clusterState) {
return reroute(clusterState, false);
}
/**
* Reroutes the routing table based on the live nodes.
* <p/>
* <p>If the same instance of the routing table is returned, then no change has been made.
*/
public RoutingAllocation.Result reroute(ClusterState clusterState, boolean debug) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
allocation.debugDecision(debug);
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
/**
* Only handles reroute but *without* any reassignment of unassigned shards or rebalancing. Does
* make sure to handle removed nodes, but only moved the shards to UNASSIGNED, does not reassign
* them.
*/
public RoutingAllocation.Result rerouteWithNoReassign(ClusterState clusterState) {
return rerouteWithNoReassign(clusterState, false);
}
/**
* Only handles reroute but *without* any reassignment of unassigned shards or rebalancing. Does
* make sure to handle removed nodes, but only moved the shards to UNASSIGNED, does not reassign
* them.
*/
public RoutingAllocation.Result rerouteWithNoReassign(ClusterState clusterState, boolean debug) {
RoutingNodes routingNodes = clusterState.routingNodes();
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo());
allocation.debugDecision(debug);
boolean changed = false;
// first, clear from the shards any node id they used to belong to that is now dead
changed |= deassociateDeadNodes(allocation);
// create a sorted list of from nodes with least number of shards to the maximum ones
applyNewNodes(allocation);
// elect primaries *before* allocating unassigned, so backups of primaries that failed
// will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), allocation.explanation());
}
return new RoutingAllocation.Result(true, new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()), allocation.explanation());
}
private boolean reroute(RoutingAllocation allocation) {
boolean changed = false;
// first, clear from the shards any node id they used to belong to that is now dead
changed |= deassociateDeadNodes(allocation);
// create a sorted list of from nodes with least number of shards to the maximum ones
applyNewNodes(allocation);
// elect primaries *before* allocating unassigned, so backups of primaries that failed
// will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().hasUnassigned()) {
changed |= shardsAllocators.allocateUnassigned(allocation);
// elect primaries again, in case this is needed with unassigned allocation
changed |= electPrimariesAndUnassignDanglingReplicas(allocation);
}
// move shards that no longer can be allocated
changed |= moveShards(allocation);
// rebalance
changed |= shardsAllocators.rebalance(allocation);
assert RoutingNodes.assertShardStats(allocation.routingNodes());
return changed;
}
private boolean moveShards(RoutingAllocation allocation) {
boolean changed = false;
// create a copy of the shards interleaving between nodes, and check if they can remain
List<MutableShardRouting> shards = new ArrayList<MutableShardRouting>();
int index = 0;
boolean found = true;
final RoutingNodes routingNodes = allocation.routingNodes();
while (found) {
found = false;
for (RoutingNode routingNode : routingNodes) {
if (index >= routingNode.size()) {
continue;
}
found = true;
shards.add(routingNode.get(index));
}
index++;
}
for (int i = 0; i < shards.size(); i++) {
MutableShardRouting shardRouting = shards.get(i);
// we can only move started shards...
if (!shardRouting.started()) {
continue;
}
final RoutingNode routingNode = routingNodes.node(shardRouting.currentNodeId());
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
if (decision.type() == Decision.Type.NO) {
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
boolean moved = shardsAllocators.move(shardRouting, routingNode, allocation);
if (!moved) {
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
} else {
assert RoutingNodes.assertShardStats(allocation.routingNodes());
changed = true;
}
}
}
return changed;
}
private boolean electPrimariesAndUnassignDanglingReplicas(RoutingAllocation allocation) {
boolean changed = false;
RoutingNodes routingNodes = allocation.routingNodes();
if (!routingNodes.hasUnassignedPrimaries()) {
// move out if we don't have unassigned primaries
return changed;
}
for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
if (shardEntry.primary()) {
MutableShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
if (candidate != null) {
routingNodes.swapPrimaryFlag(shardEntry, candidate);
if (candidate.relocatingNodeId() != null) {
changed = true;
// its also relocating, make sure to move the other routing to primary
RoutingNode node = routingNodes.node(candidate.relocatingNodeId());
if (node != null) {
for (MutableShardRouting shardRouting : node) {
if (shardRouting.shardId().equals(candidate.shardId()) && !shardRouting.primary()) {
routingNodes.swapPrimaryFlag(shardRouting);
break;
}
}
}
}
}
}
}
// go over and remove dangling replicas that are initializing, but we couldn't elect primary ones...
List<ShardRouting> shardsToFail = null;
if (routingNodes.hasUnassignedPrimaries()) {
for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
if (shardEntry.primary()) {
for(MutableShardRouting routing : routingNodes.assignedShards(shardEntry)) {
if (!routing.primary()) {
changed = true;
if (shardsToFail == null) {
shardsToFail = new ArrayList<ShardRouting>();
}
shardsToFail.add(routing);
}
}
}
}
if (shardsToFail != null) {
for (ShardRouting shardToFail : shardsToFail) {
applyFailedShard(allocation, shardToFail, false);
}
}
}
return changed;
}
/**
* Applies the new nodes to the routing nodes and returns them (just the
* new nodes);
*/
private void applyNewNodes(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
DiscoveryNode node = cursor.value;
if (!routingNodes.isKnown(node)) {
routingNodes.addNode(node);
}
}
}
private boolean deassociateDeadNodes(RoutingAllocation allocation) {
boolean changed = false;
for (RoutingNodes.RoutingNodesIterator it = allocation.routingNodes().nodes(); it.hasNext(); ) {
RoutingNode node = it.next();
if (allocation.nodes().dataNodes().containsKey(node.nodeId())) {
// its a live node, continue
continue;
}
changed = true;
// now, go over all the shards routing on the node, and fail them
for (MutableShardRouting shardRouting : node.copyShards()) {
applyFailedShard(allocation, shardRouting, false);
}
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
// since it relies on the fact that the RoutingNode exists in the list of nodes
it.remove();
}
return changed;
}
private boolean applyStartedShards(RoutingNodes routingNodes, Iterable<? extends ShardRouting> startedShardEntries) {
boolean dirty = false;
// apply shards might be called several times with the same shard, ignore it
for (ShardRouting startedShard : startedShardEntries) {
assert startedShard.state() == INITIALIZING;
// retrieve the relocating node id before calling startedShard().
String relocatingNodeId = null;
RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId());
if (currentRoutingNode != null) {
for (MutableShardRouting shard : currentRoutingNode) {
if (shard.shardId().equals(startedShard.shardId())) {
relocatingNodeId = shard.relocatingNodeId();
if (!shard.started()) {
dirty = true;
routingNodes.started(shard);
}
break;
}
}
}
// startedShard is the current state of the shard (post relocation for example)
// this means that after relocation, the state will be started and the currentNodeId will be
// the node we relocated to
if (relocatingNodeId == null) {
continue;
}
RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(relocatingNodeId);
if (sourceRoutingNode != null) {
while (sourceRoutingNode.hasNext()) {
MutableShardRouting shard = sourceRoutingNode.next();
if (shard.shardId().equals(startedShard.shardId())) {
if (shard.relocating()) {
dirty = true;
sourceRoutingNode.remove();
break;
}
}
}
}
}
return dirty;
}
/**
* Applies the relevant logic to handle a failed shard. Returns <tt>true</tt> if changes happened that
* require relocation.
*/
private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList) {
// create a copy of the failed shard, since we assume we can change possible references to it without
// changing the state of failed shard
failedShard = new ImmutableShardRouting(failedShard);
IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index());
if (indexRoutingTable == null) {
return false;
}
RoutingNodes routingNodes = allocation.routingNodes();
if (failedShard.relocatingNodeId() != null) {
// the shard is relocating, either in initializing (recovery from another node) or relocating (moving to another node)
if (failedShard.state() == INITIALIZING) {
// the shard is initializing and recovering from another node
boolean dirty = false;
// first, we need to cancel the current node that is being initialized
RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (initializingNode != null) {
while(initializingNode.hasNext()) {
MutableShardRouting shardRouting = initializingNode.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
initializingNode.remove();
if (addToIgnoreList) {
// make sure we ignore this shard on the relevant node
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
}
break;
}
}
}
if (dirty) {
// now, find the node that we are relocating *from*, and cancel its relocation
RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId());
if (relocatingFromNode != null) {
for (MutableShardRouting shardRouting : relocatingFromNode) {
if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.relocating()) {
dirty = true;
routingNodes.cancelRelocation(shardRouting);
break;
}
}
}
}
return dirty;
} else if (failedShard.state() == RELOCATING) {
boolean dirty = false;
// the shard is relocating, meaning its the source the shard is relocating from
// first, we need to cancel the current relocation from the current node
// now, find the node that we are recovering from, cancel the relocation, remove it from the node
// and add it to the unassigned shards list...
RoutingNodes.RoutingNodeIterator relocatingFromNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (relocatingFromNode != null) {
while(relocatingFromNode.hasNext()) {
MutableShardRouting shardRouting = relocatingFromNode.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
relocatingFromNode.remove();
if (addToIgnoreList) {
// make sure we ignore this shard on the relevant node
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
}
routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(),
null, failedShard.primary(), ShardRoutingState.UNASSIGNED, failedShard.version() + 1));
break;
}
}
}
if (dirty) {
// next, we need to find the target initializing shard that is recovering from, and remove it...
RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId());
if (initializingNode != null) {
while (initializingNode.hasNext()) {
MutableShardRouting shardRouting = initializingNode.next();
if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.state() == INITIALIZING) {
dirty = true;
initializingNode.remove();
}
}
}
}
return dirty;
} else {
throw new ElasticsearchIllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard);
}
} else {
// the shard is not relocating, its either started, or initializing, just cancel it and move on...
boolean dirty = false;
RoutingNodes.RoutingNodeIterator node = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (node != null) {
while(node.hasNext()) {
MutableShardRouting shardRouting = node.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
if (addToIgnoreList) {
// make sure we ignore this shard on the relevant node
allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
}
node.remove();
// move all the shards matching the failed shard to the end of the unassigned list
// so we give a chance for other allocations and won't create poison failed allocations
// that can keep other shards from being allocated (because of limits applied on how many
// shards we can start per node)
List<MutableShardRouting> shardsToMove = Lists.newArrayList();
for (Iterator<MutableShardRouting> unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) {
MutableShardRouting unassignedShardRouting = unassignedIt.next();
if (unassignedShardRouting.shardId().equals(failedShard.shardId())) {
unassignedIt.remove();
shardsToMove.add(unassignedShardRouting);
}
}
if (!shardsToMove.isEmpty()) {
routingNodes.unassigned().addAll(shardsToMove);
}
routingNodes.unassigned().add(new MutableShardRouting(failedShard.index(), failedShard.id(), null,
null, failedShard.restoreSource(), failedShard.primary(), ShardRoutingState.UNASSIGNED, failedShard.version() + 1));
break;
}
}
}
return dirty;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_allocation_AllocationService.java
|
805 |
@Entity
@Table(name = "BLC_FG_ADJUSTMENT")
@Inheritance(strategy=InheritanceType.JOINED)
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationMergeOverrides(
{
@AdminPresentationMergeOverride(name = "", mergeEntries =
@AdminPresentationMergeEntry(propertyType = PropertyType.AdminPresentation.READONLY,
booleanOverrideValue = true))
}
)
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "FulfillmentGroupAdjustmentImpl_baseFulfillmentGroupAdjustment")
public class FulfillmentGroupAdjustmentImpl implements FulfillmentGroupAdjustment, CurrencyCodeIdentifiable {
public static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator= "FGAdjustmentId")
@GenericGenerator(
name="FGAdjustmentId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="FulfillmentGroupAdjustmentImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.offer.domain.FulfillmentGroupAdjustmentImpl")
}
)
@Column(name = "FG_ADJUSTMENT_ID")
protected Long id;
@ManyToOne(targetEntity = FulfillmentGroupImpl.class)
@JoinColumn(name = "FULFILLMENT_GROUP_ID")
@Index(name="FGADJUSTMENT_INDEX", columnNames={"FULFILLMENT_GROUP_ID"})
@AdminPresentation(excluded = true)
protected FulfillmentGroup fulfillmentGroup;
@ManyToOne(targetEntity = OfferImpl.class, optional=false)
@JoinColumn(name = "OFFER_ID")
@Index(name="FGADJUSTMENT_OFFER_INDEX", columnNames={"OFFER_ID"})
@AdminPresentation(friendlyName = "FulfillmentGroupAdjustmentImpl_Offer", order=1000,
prominent = true, gridOrder = 1000)
@AdminPresentationToOneLookup()
protected Offer offer;
@Column(name = "ADJUSTMENT_REASON", nullable=false)
@AdminPresentation(friendlyName = "FulfillmentGroupAdjustmentImpl_FG_Adjustment_Reason", order=2000)
protected String reason;
@Column(name = "ADJUSTMENT_VALUE", nullable=false, precision=19, scale=5)
@AdminPresentation(friendlyName = "FulfillmentGroupAdjustmentImpl_FG_Adjustment_Value", order=3000,
fieldType = SupportedFieldType.MONEY, prominent = true,
gridOrder = 2000)
protected BigDecimal value = Money.ZERO.getAmount();
@Override
public void init(FulfillmentGroup fulfillmentGroup, Offer offer, String reason){
this.fulfillmentGroup = fulfillmentGroup;
this.offer = offer;
if (reason == null) {
this.reason = offer.getName();
} else {
this.reason = reason;
}
}
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public FulfillmentGroup getFulfillmentGroup() {
return fulfillmentGroup;
}
@Override
public void setFulfillmentGroup(FulfillmentGroup fulfillmentGroup) {
this.fulfillmentGroup = fulfillmentGroup;
}
@Override
public Offer getOffer() {
return offer;
}
public void setOffer(Offer offer) {
this.offer = offer;
}
@Override
public String getReason() {
return reason;
}
@Override
public void setReason(String reason) {
this.reason = reason;
}
@Override
public Money getValue() {
return value == null ? null : BroadleafCurrencyUtils.getMoney(value, getFulfillmentGroup().getOrder().getCurrency());
}
@Override
public void setValue(Money value) {
this.value = value.getAmount();
}
@Override
public String getCurrencyCode() {
return ((CurrencyCodeIdentifiable) fulfillmentGroup).getCurrencyCode();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((fulfillmentGroup == null) ? 0 : fulfillmentGroup.hashCode());
result = prime * result + ((offer == null) ? 0 : offer.hashCode());
result = prime * result + ((reason == null) ? 0 : reason.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FulfillmentGroupAdjustmentImpl other = (FulfillmentGroupAdjustmentImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (fulfillmentGroup == null) {
if (other.fulfillmentGroup != null) {
return false;
}
} else if (!fulfillmentGroup.equals(other.fulfillmentGroup)) {
return false;
}
if (offer == null) {
if (other.offer != null) {
return false;
}
} else if (!offer.equals(other.offer)) {
return false;
}
if (reason == null) {
if (other.reason != null) {
return false;
}
} else if (!reason.equals(other.reason)) {
return false;
}
if (value == null) {
if (other.value != null) {
return false;
}
} else if (!value.equals(other.value)) {
return false;
}
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_FulfillmentGroupAdjustmentImpl.java
|
337 |
public class MergeHandlerAdapter implements MergeHandler {
public MergeHandler[] getChildren() {
return null;
}
public String getName() {
return null;
}
public int getPriority() {
return 0;
}
public String getXPath() {
return null;
}
public Node[] merge(List<Node> nodeList1, List<Node> nodeList2,
List<Node> exhaustedNodes) {
return null;
}
public void setChildren(MergeHandler[] children) {
//do nothing
}
public void setName(String name) {
//do nothing
}
public void setPriority(int priority) {
//do nothing
}
public void setXPath(String xpath) {
//do nothing
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_MergeHandlerAdapter.java
|
1,119 |
public class OSQLFunctionCount extends OSQLFunctionMathAbstract {
public static final String NAME = "count";
private long total = 0;
public OSQLFunctionCount() {
super(NAME, 1, 1);
}
public Object execute(OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
if (iParameters[0] != null)
total++;
return total;
}
public boolean aggregateResults() {
return true;
}
public String getSyntax() {
return "Syntax error: count(<field>|*)";
}
@Override
public Object getResult() {
return total;
}
@Override
public void setResult(final Object iResult) {
total = ((Number) iResult).longValue();
}
@Override
public Object mergeDistributedResult(List<Object> resultsToMerge) {
long total = 0;
for (Object iParameter : resultsToMerge) {
final long value = (Long) iParameter;
total += value;
}
return total;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_misc_OSQLFunctionCount.java
|
227 |
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
/**
* Tells to OrientDB to call the method AFTER the record is marshalled and written to the database.
* Applies only to the entity Objects reachable by the OrientDB engine after have registered them.
*/
public @interface OAfterSerialization {
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_annotation_OAfterSerialization.java
|
1,302 |
public class FieldEntity implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, FieldEntity> TYPES = new LinkedHashMap<String, FieldEntity>();
public static final FieldEntity PRODUCT = new FieldEntity("PRODUCT", "product");
public static final FieldEntity CUSTOMER = new FieldEntity("CUSTOMER", "customer");
public static final FieldEntity ORDER = new FieldEntity("ORDER", "order");
public static final FieldEntity ORDERITEM = new FieldEntity("ORDERITEM", "orderItem");
public static final FieldEntity OFFER = new FieldEntity("OFFER", "offer");
public static FieldEntity getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public FieldEntity() {
//do nothing
}
public FieldEntity(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
FieldEntity other = (FieldEntity) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_search_domain_FieldEntity.java
|
282 |
public class NullMessageCreator extends MessageCreator {
private static final Log LOG = LogFactory.getLog(NullMessageCreator.class);
public NullMessageCreator(JavaMailSender mailSender) {
super(mailSender);
}
@Override
public String buildMessageBody(EmailInfo info, HashMap<String,Object> props) {
return info.getEmailTemplate();
}
@Override
public void sendMessage(final HashMap<String,Object> props) throws MailException {
LOG.warn("NullMessageCreator is defined -- specify a real message creator to send emails");
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_message_NullMessageCreator.java
|
1,100 |
public class SimpleCounterBenchmark {
private static long NUMBER_OF_ITERATIONS = 10000000;
private static int NUMBER_OF_THREADS = 100;
public static void main(String[] args) throws Exception {
final AtomicLong counter = new AtomicLong();
StopWatch stopWatch = new StopWatch().start();
System.out.println("Running " + NUMBER_OF_ITERATIONS);
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
counter.incrementAndGet();
}
System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
counter.incrementAndGet();
}
latch.countDown();
}
});
}
stopWatch = new StopWatch().start();
for (Thread thread : threads) {
thread.start();
}
latch.await();
stopWatch.stop();
System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_counter_SimpleCounterBenchmark.java
|
1,346 |
public static interface Listener {
void onNodeIndexDeleted(String index, String nodeId);
void onNodeIndexStoreDeleted(String index, String nodeId);
}
| 0true
|
src_main_java_org_elasticsearch_cluster_action_index_NodeIndexDeletedAction.java
|
2,054 |
public final class GetOperation extends KeyBasedMapOperation
implements IdentifiedDataSerializable, ReadonlyOperation {
private Data result;
public GetOperation() {
}
public GetOperation(String name, Data dataKey) {
super(name, dataKey);
}
public void run() {
result = mapService.toData(recordStore.get(dataKey));
}
public void afterRun() {
mapService.interceptAfterGet(name, result);
}
@Override
public Object getResponse() {
return result;
}
@Override
public String toString() {
return "GetOperation{}";
}
public int getFactoryId() {
return MapDataSerializerHook.F_ID;
}
public int getId() {
return MapDataSerializerHook.GET;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_operation_GetOperation.java
|
23 |
static class EdgeVertex extends Vertex {
private SortedSet<Edge> outEdges = new ConcurrentSkipListSet<Edge>(new Comparator<Edge>() {
@Override
public int compare(Edge e1, Edge e2) {
return e1.getEnd().compareTo(e2.getEnd());
}
});
EdgeVertex(long id) {
super(id);
}
@Override
public Iterable<Vertex> getNeighbors(final int value) {
return Iterables.transform(Iterables.filter(outEdges, new Predicate<Edge>() {
@Override
public boolean apply(@Nullable Edge edge) {
return !CHECK_VALUE || ((Integer) edge.getProperty("number")).intValue() == value;
}
}), new Function<Edge, Vertex>() {
@Override
public Vertex apply(@Nullable Edge edge) {
return edge.getEnd();
}
});
}
void addOutEdge(Edge e) {
outEdges.add(e);
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
747 |
public class GetAction extends Action<GetRequest, GetResponse, GetRequestBuilder> {
public static final GetAction INSTANCE = new GetAction();
public static final String NAME = "get";
private GetAction() {
super(NAME);
}
@Override
public GetResponse newResponse() {
return new GetResponse();
}
@Override
public GetRequestBuilder newRequestBuilder(Client client) {
return new GetRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_get_GetAction.java
|
939 |
class TransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequest();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(Request request, final TransportChannel channel) throws Exception {
// we just send back a response, no need to fork a listener
request.listenerThreaded(false);
// we don't spawn, so if we get a request with no threading, change it to single threaded
if (request.operationThreading() == BroadcastOperationThreading.NO_THREADS) {
request.operationThreading(BroadcastOperationThreading.SINGLE_THREAD);
}
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
try {
channel.sendResponse(response);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response", e1);
}
}
});
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_broadcast_TransportBroadcastOperationAction.java
|
136 |
@Test
public class BooleanSerializerTest {
private static final int FIELD_SIZE = 1;
private static final Boolean OBJECT_TRUE = true;
private static final Boolean OBJECT_FALSE = false;
private OBooleanSerializer booleanSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
booleanSerializer = new OBooleanSerializer();
}
public void testFieldSize() {
Assert.assertEquals(booleanSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
booleanSerializer.serialize(OBJECT_TRUE, stream, 0);
Assert.assertEquals(booleanSerializer.deserialize(stream, 0), OBJECT_TRUE);
booleanSerializer.serialize(OBJECT_FALSE, stream, 0);
Assert.assertEquals(booleanSerializer.deserialize(stream, 0), OBJECT_FALSE);
}
public void testSerializeNative() {
booleanSerializer.serializeNative(OBJECT_TRUE, stream, 0);
Assert.assertEquals(booleanSerializer.deserializeNative(stream, 0), OBJECT_TRUE);
booleanSerializer.serializeNative(OBJECT_FALSE, stream, 0);
Assert.assertEquals(booleanSerializer.deserializeNative(stream, 0), OBJECT_FALSE);
}
public void testNativeDirectMemoryCompatibility() {
booleanSerializer.serializeNative(OBJECT_TRUE, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(booleanSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT_TRUE);
} finally {
pointer.free();
}
booleanSerializer.serializeNative(OBJECT_FALSE, stream, 0);
pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(booleanSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT_FALSE);
} finally {
pointer.free();
}
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_serialization_types_BooleanSerializerTest.java
|
757 |
public static class Failure implements Streamable {
private String index;
private String type;
private String id;
private String message;
Failure() {
}
public Failure(String index, String type, String id, String message) {
this.index = index;
this.type = type;
this.id = id;
this.message = message;
}
/**
* The index name of the action.
*/
public String getIndex() {
return this.index;
}
/**
* The type of the action.
*/
public String getType() {
return type;
}
/**
* The id of the action.
*/
public String getId() {
return id;
}
/**
* The failure message.
*/
public String getMessage() {
return this.message;
}
public static Failure readFailure(StreamInput in) throws IOException {
Failure failure = new Failure();
failure.readFrom(in);
return failure;
}
@Override
public void readFrom(StreamInput in) throws IOException {
index = in.readString();
type = in.readOptionalString();
id = in.readString();
message = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
out.writeOptionalString(type);
out.writeString(id);
out.writeString(message);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_get_MultiGetResponse.java
|
3,431 |
public final class ResponseHandlerFactory {
private static final NoResponseHandler NO_RESPONSE_HANDLER = new NoResponseHandler();
public static void setRemoteResponseHandler(NodeEngine nodeEngine, Operation op) {
op.setResponseHandler(createRemoteResponseHandler(nodeEngine, op));
}
public static ResponseHandler createRemoteResponseHandler(NodeEngine nodeEngine, Operation op) {
if (op.getCallId() == 0) {
if (op.returnsResponse()) {
throw new HazelcastException("Op: " + op.getClass().getName() + " can not return response without call-id!");
}
return NO_RESPONSE_HANDLER;
}
return new RemoteInvocationResponseHandler(nodeEngine, op);
}
public static ResponseHandler createEmptyResponseHandler() {
return NO_RESPONSE_HANDLER;
}
private static class NoResponseHandler implements ResponseHandler {
@Override
public void sendResponse(final Object obj) {
}
@Override
public boolean isLocal() {
return false;
}
}
public static ResponseHandler createErrorLoggingResponseHandler(ILogger logger) {
return new ErrorLoggingResponseHandler(logger);
}
private static class ErrorLoggingResponseHandler implements ResponseHandler {
private final ILogger logger;
private ErrorLoggingResponseHandler(ILogger logger) {
this.logger = logger;
}
@Override
public void sendResponse(final Object obj) {
if (obj instanceof Throwable) {
Throwable t = (Throwable) obj;
logger.severe(t);
}
}
@Override
public boolean isLocal() {
return true;
}
}
private static class RemoteInvocationResponseHandler implements ResponseHandler {
private final NodeEngine nodeEngine;
private final Operation op;
private final AtomicBoolean sent = new AtomicBoolean(false);
private RemoteInvocationResponseHandler(NodeEngine nodeEngine, Operation op) {
this.nodeEngine = nodeEngine;
this.op = op;
}
@Override
public void sendResponse(Object obj) {
long callId = op.getCallId();
Connection conn = op.getConnection();
if (!sent.compareAndSet(false, true)) {
throw new ResponseAlreadySentException("NormalResponse already sent for call: " + callId
+ " to " + conn.getEndPoint() + ", current-response: " + obj);
}
NormalResponse response;
if (!(obj instanceof NormalResponse)) {
response = new NormalResponse(obj, op.getCallId(), 0, op.isUrgent());
} else {
response = (NormalResponse) obj;
}
nodeEngine.getOperationService().send(response, op.getCallerAddress());
}
@Override
public boolean isLocal() {
return false;
}
}
private ResponseHandlerFactory() {
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_spi_impl_ResponseHandlerFactory.java
|
598 |
ex.execute(new Runnable() {
public void run() {
try {
Thread.sleep(random.nextInt(10) * 1000);
final Config config = new Config();
config.setProperty("hazelcast.wait.seconds.before.join", "5");
final NetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.getJoin().getMulticastConfig().setEnabled(false);
TcpIpConfig tcpIpConfig = networkConfig.getJoin().getTcpIpConfig();
tcpIpConfig.setEnabled(true);
int port = 12301;
networkConfig.setPortAutoIncrement(false);
networkConfig.setPort(port + seed);
for (int i = 0; i < count; i++) {
tcpIpConfig.addMember("127.0.0.1:" + (port + i));
}
HazelcastInstance h = Hazelcast.newHazelcastInstance(config);
mapOfInstances.put(seed, h);
latch.countDown();
} catch (Exception e) {
e.printStackTrace();
}
}
});
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_JoinStressTest.java
|
316 |
public class OStorageClusterHoleConfiguration extends OStorageFileConfiguration {
private static final long serialVersionUID = 1L;
private static final String DEF_EXTENSION = ".och";
private static final String DEF_INCREMENT_SIZE = "50%";
public OStorageClusterHoleConfiguration() {
super();
}
public OStorageClusterHoleConfiguration(OStorageSegmentConfiguration iParent, String iPath, String iType, String iMaxSize) {
super(iParent, iPath + DEF_EXTENSION, iType, iMaxSize, DEF_INCREMENT_SIZE);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_config_OStorageClusterHoleConfiguration.java
|
291 |
public class NoSuchNodeException extends FailedNodeException {
public NoSuchNodeException(String nodeId) {
super(nodeId, "No such node [" + nodeId + "]", null);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_NoSuchNodeException.java
|
3,754 |
private class RequestWrapper extends HttpServletRequestWrapper {
final ResponseWrapper res;
HazelcastHttpSession hazelcastSession;
String requestedSessionId;
public RequestWrapper(final HttpServletRequest req,
final ResponseWrapper res) {
super(req);
this.res = res;
req.setAttribute(HAZELCAST_REQUEST, this);
}
public void setHazelcastSession(HazelcastHttpSession hazelcastSession, String requestedSessionId) {
this.hazelcastSession = hazelcastSession;
this.requestedSessionId = requestedSessionId;
}
HttpSession getOriginalSession(boolean create) {
return super.getSession(create);
}
@Override
public RequestDispatcher getRequestDispatcher(final String path) {
final ServletRequest original = getRequest();
return new RequestDispatcher() {
public void forward(ServletRequest servletRequest, ServletResponse servletResponse)
throws ServletException, IOException {
original.getRequestDispatcher(path).forward(servletRequest, servletResponse);
}
public void include(ServletRequest servletRequest, ServletResponse servletResponse)
throws ServletException, IOException {
original.getRequestDispatcher(path).include(servletRequest, servletResponse);
}
};
}
public HazelcastHttpSession fetchHazelcastSession() {
if (requestedSessionId == null) {
requestedSessionId = getSessionCookie(this);
}
if (requestedSessionId == null) {
requestedSessionId = getParameter(HAZELCAST_SESSION_COOKIE_NAME);
}
if (requestedSessionId != null) {
hazelcastSession = getSessionWithId(requestedSessionId);
if (hazelcastSession == null) {
final Boolean existing = (Boolean) getClusterMap().get(requestedSessionId);
if (existing != null && existing) {
// we already have the session in the cluster loading it...
hazelcastSession = createNewSession(RequestWrapper.this, requestedSessionId);
}
}
}
return hazelcastSession;
}
@Override
public HttpSession getSession() {
return getSession(true);
}
@Override
public HazelcastHttpSession getSession(final boolean create) {
if (hazelcastSession != null && !hazelcastSession.isValid()) {
LOGGER.finest("Session is invalid!");
destroySession(hazelcastSession, true);
hazelcastSession = null;
} else if (hazelcastSession != null) {
return hazelcastSession;
}
HttpSession originalSession = getOriginalSession(false);
if (originalSession != null) {
String hazelcastSessionId = MAP_ORIGINAL_SESSIONS.get(originalSession.getId());
if (hazelcastSessionId != null) {
hazelcastSession = MAP_SESSIONS.get(hazelcastSessionId);
return hazelcastSession;
}
MAP_ORIGINAL_SESSIONS.remove(originalSession.getId());
originalSession.invalidate();
}
hazelcastSession = fetchHazelcastSession();
if (hazelcastSession == null && create) {
hazelcastSession = createNewSession(RequestWrapper.this, null);
}
if (deferredWrite) {
prepareReloadingSession(hazelcastSession);
}
return hazelcastSession;
}
} // END of RequestWrapper
| 1no label
|
hazelcast-wm_src_main_java_com_hazelcast_web_WebFilter.java
|
759 |
public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetShardRequest> {
private int shardId;
private String preference;
Boolean realtime;
boolean refresh;
IntArrayList locations;
List<String> types;
List<String> ids;
List<String[]> fields;
LongArrayList versions;
List<VersionType> versionTypes;
List<FetchSourceContext> fetchSourceContexts;
MultiGetShardRequest() {
}
MultiGetShardRequest(String index, int shardId) {
super(index);
this.shardId = shardId;
locations = new IntArrayList();
types = new ArrayList<String>();
ids = new ArrayList<String>();
fields = new ArrayList<String[]>();
versions = new LongArrayList();
versionTypes = new ArrayList<VersionType>();
fetchSourceContexts = new ArrayList<FetchSourceContext>();
}
public int shardId() {
return this.shardId;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public MultiGetShardRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
public boolean realtime() {
return this.realtime == null ? true : this.realtime;
}
public MultiGetShardRequest realtime(Boolean realtime) {
this.realtime = realtime;
return this;
}
public boolean refresh() {
return this.refresh;
}
public MultiGetShardRequest refresh(boolean refresh) {
this.refresh = refresh;
return this;
}
public void add(int location, @Nullable String type, String id, String[] fields, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
this.locations.add(location);
this.types.add(type);
this.ids.add(id);
this.fields.add(fields);
this.versions.add(version);
this.versionTypes.add(versionType);
this.fetchSourceContexts.add(fetchSourceContext);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new IntArrayList(size);
types = new ArrayList<String>(size);
ids = new ArrayList<String>(size);
fields = new ArrayList<String[]>(size);
versions = new LongArrayList(size);
versionTypes = new ArrayList<VersionType>(size);
fetchSourceContexts = new ArrayList<FetchSourceContext>(size);
for (int i = 0; i < size; i++) {
locations.add(in.readVInt());
if (in.readBoolean()) {
types.add(in.readSharedString());
} else {
types.add(null);
}
ids.add(in.readString());
int size1 = in.readVInt();
if (size1 > 0) {
String[] fields = new String[size1];
for (int j = 0; j < size1; j++) {
fields[j] = in.readString();
}
this.fields.add(fields);
} else {
fields.add(null);
}
versions.add(in.readVLong());
versionTypes.add(VersionType.fromValue(in.readByte()));
fetchSourceContexts.add(FetchSourceContext.optionalReadFromStream(in));
}
preference = in.readOptionalString();
refresh = in.readBoolean();
byte realtime = in.readByte();
if (realtime == 0) {
this.realtime = false;
} else if (realtime == 1) {
this.realtime = true;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(types.size());
for (int i = 0; i < types.size(); i++) {
out.writeVInt(locations.get(i));
if (types.get(i) == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeSharedString(types.get(i));
}
out.writeString(ids.get(i));
if (fields.get(i) == null) {
out.writeVInt(0);
} else {
out.writeVInt(fields.get(i).length);
for (String field : fields.get(i)) {
out.writeString(field);
}
}
out.writeVLong(versions.get(i));
out.writeByte(versionTypes.get(i).getValue());
FetchSourceContext fetchSourceContext = fetchSourceContexts.get(i);
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
}
out.writeOptionalString(preference);
out.writeBoolean(refresh);
if (realtime == null) {
out.writeByte((byte) -1);
} else if (realtime == false) {
out.writeByte((byte) 0);
} else {
out.writeByte((byte) 1);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_get_MultiGetShardRequest.java
|
2,161 |
public class TxnUnlockOperation extends LockAwareOperation implements MapTxnOperation, BackupAwareOperation{
private long version;
private String ownerUuid;
public TxnUnlockOperation() {
}
public TxnUnlockOperation(String name, Data dataKey, long version) {
super(name, dataKey, -1);
this.version = version;
}
@Override
public void run() {
System.out.println( "Owner tid:" + getThreadId() + " pid:"+getPartitionId());
recordStore.unlock(dataKey, ownerUuid, getThreadId());
}
public boolean shouldWait() {
return !recordStore.canAcquireLock(dataKey, ownerUuid, getThreadId());
}
public long getVersion() {
return version;
}
public void setVersion(long version) {
this.version = version;
}
@Override
public Object getResponse() {
return Boolean.TRUE;
}
public boolean shouldNotify() {
return true;
}
public Operation getBackupOperation() {
TxnUnlockBackupOperation txnUnlockOperation = new TxnUnlockBackupOperation(name, dataKey);
txnUnlockOperation.setThreadId(getThreadId());
return txnUnlockOperation;
}
public void onWaitExpire() {
final ResponseHandler responseHandler = getResponseHandler();
responseHandler.sendResponse(false);
}
public final int getAsyncBackupCount() {
return mapContainer.getAsyncBackupCount();
}
public final int getSyncBackupCount() {
return mapContainer.getBackupCount();
}
@Override
public void setOwnerUuid(String ownerUuid) {
this.ownerUuid = ownerUuid;
}
@Override
public boolean shouldBackup() {
return true;
}
public WaitNotifyKey getNotifiedKey() {
return getWaitKey();
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(version);
out.writeUTF(ownerUuid);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
version = in.readLong();
ownerUuid = in.readUTF();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_tx_TxnUnlockOperation.java
|
2,765 |
@ChannelHandler.Sharable
public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
private final NettyHttpServerTransport serverTransport;
public HttpRequestHandler(NettyHttpServerTransport serverTransport) {
this.serverTransport = serverTransport;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
HttpRequest request = (HttpRequest) e.getMessage();
// the netty HTTP handling always copy over the buffer to its own buffer, either in NioWorker internally
// when reading, or using a cumalation buffer
serverTransport.dispatchRequest(new NettyHttpRequest(request, e.getChannel()), new NettyHttpChannel(serverTransport, e.getChannel(), request));
super.messageReceived(ctx, e);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
serverTransport.exceptionCaught(ctx, e);
}
}
| 1no label
|
src_main_java_org_elasticsearch_http_netty_HttpRequestHandler.java
|
1,456 |
public class DiscoveryNodeService extends AbstractComponent {
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<CustomAttributesProvider>();
@Inject
public DiscoveryNodeService(Settings settings) {
super(settings);
}
public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) {
customAttributesProviders.add(customAttributesProvider);
return this;
}
public Map<String, String> buildAttributes() {
Map<String, String> attributes = Maps.newHashMap(settings.getByPrefix("node.").getAsMap());
attributes.remove("name"); // name is extracted in other places
if (attributes.containsKey("client")) {
if (attributes.get("client").equals("false")) {
attributes.remove("client"); // this is the default
} else {
// if we are client node, don't store data ...
attributes.put("data", "false");
}
}
if (attributes.containsKey("data")) {
if (attributes.get("data").equals("true")) {
attributes.remove("data");
}
}
for (CustomAttributesProvider provider : customAttributesProviders) {
try {
Map<String, String> customAttributes = provider.buildAttributes();
if (customAttributes != null) {
for (Map.Entry<String, String> entry : customAttributes.entrySet()) {
if (!attributes.containsKey(entry.getKey())) {
attributes.put(entry.getKey(), entry.getValue());
}
}
}
} catch (Exception e) {
logger.warn("failed to build custom attributes from provider [{}]", e, provider);
}
}
return attributes;
}
public static interface CustomAttributesProvider {
Map<String, String> buildAttributes();
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_node_DiscoveryNodeService.java
|
36 |
public class TitanGraphQueryTestSuite extends GraphQueryTestSuite {
public TitanGraphQueryTestSuite(final GraphTest graphTest) {
super(graphTest);
}
@Override
public void testGraphQueryForVertices() {
TitanGraph g = (TitanGraph) graphTest.generateGraph();
if (g.getRelationType("age") == null) {
TitanManagement mgmt = g.getManagementSystem();
mgmt.makePropertyKey("age").dataType(Integer.class).cardinality(Cardinality.SINGLE).make();
mgmt.commit();
}
g.shutdown();
super.testGraphQueryForVertices();
}
@Override
public void testGraphQueryForEdges() {
TitanGraph g = (TitanGraph) graphTest.generateGraph();
if (g.getRelationType("weight") == null) {
TitanManagement mgmt = g.getManagementSystem();
mgmt.makePropertyKey("weight").dataType(Double.class).cardinality(Cardinality.SINGLE).make();
mgmt.commit();
}
g.shutdown();
super.testGraphQueryForEdges();
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_blueprints_TitanGraphQueryTestSuite.java
|
286 |
public interface OScriptInjection {
public void bind(Bindings binding);
public void unbind(Bindings binding);
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_OScriptInjection.java
|
975 |
public class IndicesReplicationOperationRequest<T extends IndicesReplicationOperationRequest> extends ActionRequest<T> {
protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT;
protected String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
protected ReplicationType replicationType = ReplicationType.DEFAULT;
protected WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
public TimeValue timeout() {
return timeout;
}
/**
* A timeout to wait if the delete by query operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
@SuppressWarnings("unchecked")
public final T timeout(TimeValue timeout) {
this.timeout = timeout;
return (T) this;
}
/**
* A timeout to wait if the delete by query operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
@SuppressWarnings("unchecked")
public T timeout(String timeout) {
this.timeout = TimeValue.parseTimeValue(timeout, null);
return (T) this;
}
public String[] indices() {
return this.indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public T indicesOptions(IndicesOptions indicesOptions) {
if (indicesOptions == null) {
throw new IllegalArgumentException("IndicesOptions must not be null");
}
this.indicesOptions = indicesOptions;
return (T) this;
}
/**
* The indices the request will execute against.
*/
@SuppressWarnings("unchecked")
public final T indices(String[] indices) {
this.indices = indices;
return (T) this;
}
public ReplicationType replicationType() {
return this.replicationType;
}
/**
* Sets the replication type.
*/
@SuppressWarnings("unchecked")
public final T replicationType(ReplicationType replicationType) {
if (replicationType == null) {
throw new IllegalArgumentException("ReplicationType must not be null");
}
this.replicationType = replicationType;
return (T) this;
}
/**
* Sets the replication type.
*/
public final T replicationType(String replicationType) {
return replicationType(ReplicationType.fromString(replicationType));
}
public WriteConsistencyLevel consistencyLevel() {
return this.consistencyLevel;
}
/**
* Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT}
*/
@SuppressWarnings("unchecked")
public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) {
if (consistencyLevel == null) {
throw new IllegalArgumentException("WriteConsistencyLevel must not be null");
}
this.consistencyLevel = consistencyLevel;
return (T) this;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
replicationType = ReplicationType.fromId(in.readByte());
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeByte(replicationType.id());
out.writeByte(consistencyLevel.id());
timeout.writeTo(out);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_replication_IndicesReplicationOperationRequest.java
|
313 |
new Thread() {
public void run() {
try {
map.lock(key);
map.remove(key);
removeWhileLocked.countDown();
checkingKey.await();
map.unlock(key);
}catch(Exception e){}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
1,303 |
@Test
public class LocalPaginatedStorageUpdateCrashRestore {
private ODatabaseDocumentTx baseDocumentTx;
private ODatabaseDocumentTx testDocumentTx;
private File buildDir;
private int idGen = 0;
private OLockManager<Integer, Thread> idLockManager = new OLockManager<Integer, Thread>(true, 1000);
private ExecutorService executorService = Executors.newCachedThreadPool();
private Process process;
@BeforeClass
public void beforeClass() throws Exception {
OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0);
OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0);
String buildDirectory = System.getProperty("buildDirectory", ".");
buildDirectory += "/localPaginatedStorageUpdateCrashRestore";
buildDir = new File(buildDirectory);
if (buildDir.exists())
buildDir.delete();
buildDir.mkdir();
String javaExec = System.getProperty("java.home") + "/bin/java";
System.setProperty("ORIENTDB_HOME", buildDirectory);
ProcessBuilder processBuilder = new ProcessBuilder(javaExec, "-Xmx2048m", "-classpath", System.getProperty("java.class.path"),
"-DORIENTDB_HOME=" + buildDirectory, RemoteDBRunner.class.getName());
processBuilder.inheritIO();
process = processBuilder.start();
Thread.sleep(5000);
}
public static final class RemoteDBRunner {
public static void main(String[] args) throws Exception {
OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0);
OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0);
OServer server = OServerMain.create();
server.startup(RemoteDBRunner.class
.getResourceAsStream("/com/orientechnologies/orient/core/storage/impl/local/paginated/db-update-config.xml"));
server.activate();
while (true)
;
}
}
@AfterClass
public void afterClass() {
testDocumentTx.drop();
baseDocumentTx.drop();
Assert.assertTrue(buildDir.delete());
}
@BeforeMethod
public void beforeMethod() {
baseDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDir.getAbsolutePath()
+ "/baseLocalPaginatedStorageUpdateCrashRestore");
if (baseDocumentTx.exists()) {
baseDocumentTx.open("admin", "admin");
baseDocumentTx.drop();
}
baseDocumentTx.create();
testDocumentTx = new ODatabaseDocumentTx("remote:localhost:3500/testLocalPaginatedStorageUpdateCrashRestore");
testDocumentTx.open("admin", "admin");
}
public void testDocumentUpdate() throws Exception {
createSchema(baseDocumentTx);
createSchema(testDocumentTx);
System.out.println("Schema was created.");
System.out.println("Document creation was started.");
createDocuments();
System.out.println("Document creation was finished.");
System.out.println("Start documents update.");
List<Future> futures = new ArrayList<Future>();
for (int i = 0; i < 5; i++) {
futures.add(executorService.submit(new DataUpdateTask(baseDocumentTx, testDocumentTx)));
}
Thread.sleep(150000);
long lastTs = System.currentTimeMillis();
process.destroy();
for (Future future : futures) {
try {
future.get();
} catch (Exception e) {
e.printStackTrace();
}
}
System.out.println("Documents update was stopped.");
testDocumentTx = new ODatabaseDocumentTx("plocal:" + buildDir.getAbsolutePath()
+ "/testLocalPaginatedStorageUpdateCrashRestore");
testDocumentTx.open("admin", "admin");
testDocumentTx.close();
testDocumentTx.open("admin", "admin");
System.out.println("Start documents comparison.");
compareDocuments(lastTs);
}
private void createSchema(ODatabaseDocumentTx dbDocumentTx) {
ODatabaseRecordThreadLocal.INSTANCE.set(dbDocumentTx);
OSchema schema = dbDocumentTx.getMetadata().getSchema();
if (!schema.existsClass("TestClass")) {
OClass testClass = schema.createClass("TestClass");
testClass.createProperty("id", OType.LONG);
testClass.createProperty("timestamp", OType.LONG);
testClass.createProperty("stringValue", OType.STRING);
testClass.createIndex("idIndex", OClass.INDEX_TYPE.UNIQUE, "id");
schema.save();
}
}
private void createDocuments() {
Random random = new Random();
for (int i = 0; i < 1000000; i++) {
final ODocument document = new ODocument("TestClass");
document.field("id", idGen++);
document.field("timestamp", System.currentTimeMillis());
document.field("stringValue", "sfe" + random.nextLong());
saveDoc(document, baseDocumentTx, testDocumentTx);
if (i % 10000 == 0)
System.out.println(i + " documents were created.");
}
}
private void saveDoc(ODocument document, ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDB) {
ODatabaseRecordThreadLocal.INSTANCE.set(baseDB);
ODocument testDoc = new ODocument();
document.copyTo(testDoc);
document.save();
ODatabaseRecordThreadLocal.INSTANCE.set(testDB);
testDoc.save();
ODatabaseRecordThreadLocal.INSTANCE.set(baseDB);
}
private void compareDocuments(long lastTs) {
long minTs = Long.MAX_VALUE;
int clusterId = baseDocumentTx.getClusterIdByName("TestClass");
OStorage baseStorage = baseDocumentTx.getStorage();
OPhysicalPosition[] physicalPositions = baseStorage.ceilingPhysicalPositions(clusterId, new OPhysicalPosition(
OClusterPositionFactory.INSTANCE.valueOf(0)));
int recordsRestored = 0;
int recordsTested = 0;
while (physicalPositions.length > 0) {
final ORecordId rid = new ORecordId(clusterId);
for (OPhysicalPosition physicalPosition : physicalPositions) {
rid.clusterPosition = physicalPosition.clusterPosition;
ODatabaseRecordThreadLocal.INSTANCE.set(baseDocumentTx);
ODocument baseDocument = baseDocumentTx.load(rid);
ODatabaseRecordThreadLocal.INSTANCE.set(testDocumentTx);
List<ODocument> testDocuments = testDocumentTx.query(new OSQLSynchQuery<ODocument>("select from TestClass where id = "
+ baseDocument.field("id")));
Assert.assertTrue(!testDocuments.isEmpty());
ODocument testDocument = testDocuments.get(0);
if (testDocument.field("timestamp").equals(baseDocument.field("timestamp"))
&& testDocument.field("stringValue").equals(baseDocument.field("stringValue"))) {
recordsRestored++;
} else {
if (((Long) baseDocument.field("timestamp")) < minTs)
minTs = baseDocument.field("timestamp");
}
recordsTested++;
if (recordsTested % 10000 == 0)
System.out.println(recordsTested + " were tested, " + recordsRestored + " were restored ...");
}
physicalPositions = baseStorage.higherPhysicalPositions(clusterId, physicalPositions[physicalPositions.length - 1]);
}
System.out.println(recordsRestored + " records were restored. Total records " + recordsTested
+ ". Max interval for lost records " + (lastTs - minTs));
}
public class DataUpdateTask implements Callable<Void> {
private ODatabaseDocumentTx baseDB;
private ODatabaseDocumentTx testDB;
public DataUpdateTask(ODatabaseDocumentTx baseDB, ODatabaseDocumentTx testDocumentTx) {
this.baseDB = new ODatabaseDocumentTx(baseDB.getURL());
this.testDB = new ODatabaseDocumentTx(testDocumentTx.getURL());
}
@Override
public Void call() throws Exception {
Random random = new Random();
baseDB.open("admin", "admin");
testDB.open("admin", "admin");
int counter = 0;
try {
while (true) {
final int idToUpdate = random.nextInt(idGen);
idLockManager.acquireLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE);
try {
OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<ODocument>("select from TestClass where id = " + idToUpdate);
final List<ODocument> result = baseDB.query(query);
Assert.assertTrue(!result.isEmpty());
final ODocument document = result.get(0);
document.field("timestamp", System.currentTimeMillis());
document.field("stringValue", "vde" + random.nextLong());
saveDoc(document, baseDB, testDB);
counter++;
if (counter % 50000 == 0)
System.out.println(counter + " records were updated.");
} finally {
idLockManager.releaseLock(Thread.currentThread(), idToUpdate, OLockManager.LOCK.EXCLUSIVE);
}
}
} finally {
baseDB.close();
testDB.close();
}
}
}
}
| 1no label
|
server_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedStorageUpdateCrashRestore.java
|
4,265 |
public class BufferingFsTranslogFile implements FsTranslogFile {
private final long id;
private final ShardId shardId;
private final RafReference raf;
private final ReadWriteLock rwl = new ReentrantReadWriteLock();
private volatile int operationCounter;
private long lastPosition;
private volatile long lastWrittenPosition;
private volatile long lastSyncPosition = 0;
private byte[] buffer;
private int bufferCount;
public BufferingFsTranslogFile(ShardId shardId, long id, RafReference raf, int bufferSize) throws IOException {
this.shardId = shardId;
this.id = id;
this.raf = raf;
this.buffer = new byte[bufferSize];
raf.raf().setLength(0);
}
public long id() {
return this.id;
}
public int estimatedNumberOfOperations() {
return operationCounter;
}
public long translogSizeInBytes() {
return lastWrittenPosition;
}
@Override
public Translog.Location add(byte[] data, int from, int size) throws IOException {
rwl.writeLock().lock();
try {
operationCounter++;
long position = lastPosition;
if (size >= buffer.length) {
flushBuffer();
// we use the channel to write, since on windows, writing to the RAF might not be reflected
// when reading through the channel
raf.channel().write(ByteBuffer.wrap(data, from, size));
lastWrittenPosition += size;
lastPosition += size;
return new Translog.Location(id, position, size);
}
if (size > buffer.length - bufferCount) {
flushBuffer();
}
System.arraycopy(data, from, buffer, bufferCount, size);
bufferCount += size;
lastPosition += size;
return new Translog.Location(id, position, size);
} finally {
rwl.writeLock().unlock();
}
}
private void flushBuffer() throws IOException {
if (bufferCount > 0) {
// we use the channel to write, since on windows, writing to the RAF might not be reflected
// when reading through the channel
raf.channel().write(ByteBuffer.wrap(buffer, 0, bufferCount));
lastWrittenPosition += bufferCount;
bufferCount = 0;
}
}
@Override
public byte[] read(Translog.Location location) throws IOException {
rwl.readLock().lock();
try {
if (location.translogLocation >= lastWrittenPosition) {
byte[] data = new byte[location.size];
System.arraycopy(buffer, (int) (location.translogLocation - lastWrittenPosition), data, 0, location.size);
return data;
}
} finally {
rwl.readLock().unlock();
}
ByteBuffer buffer = ByteBuffer.allocate(location.size);
raf.channel().read(buffer, location.translogLocation);
return buffer.array();
}
@Override
public FsChannelSnapshot snapshot() throws TranslogException {
rwl.writeLock().lock();
try {
flushBuffer();
if (!raf.increaseRefCount()) {
return null;
}
return new FsChannelSnapshot(this.id, raf, lastWrittenPosition, operationCounter);
} catch (IOException e) {
throw new TranslogException(shardId, "failed to flush", e);
} finally {
rwl.writeLock().unlock();
}
}
@Override
public boolean syncNeeded() {
return lastPosition != lastSyncPosition;
}
@Override
public void sync() {
try {
// check if we really need to sync here...
long last = lastPosition;
if (last == lastSyncPosition) {
return;
}
lastSyncPosition = last;
rwl.writeLock().lock();
try {
flushBuffer();
} finally {
rwl.writeLock().unlock();
}
raf.channel().force(false);
} catch (Exception e) {
// ignore
}
}
@Override
public void close(boolean delete) {
if (!delete) {
rwl.writeLock().lock();
try {
flushBuffer();
sync();
} catch (IOException e) {
throw new TranslogException(shardId, "failed to close", e);
} finally {
rwl.writeLock().unlock();
}
}
raf.decreaseRefCount(delete);
}
@Override
public void reuse(FsTranslogFile other) {
if (!(other instanceof BufferingFsTranslogFile)) {
return;
}
rwl.writeLock().lock();
try {
flushBuffer();
this.buffer = ((BufferingFsTranslogFile) other).buffer;
} catch (IOException e) {
throw new TranslogException(shardId, "failed to flush", e);
} finally {
rwl.writeLock().unlock();
}
}
@Override
public void updateBufferSize(int bufferSize) {
rwl.writeLock().lock();
try {
if (this.buffer.length == bufferSize) {
return;
}
flushBuffer();
this.buffer = new byte[bufferSize];
} catch (IOException e) {
throw new TranslogException(shardId, "failed to flush", e);
} finally {
rwl.writeLock().unlock();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_translog_fs_BufferingFsTranslogFile.java
|
3,867 |
public class IdsQueryParser implements QueryParser {
public static final String NAME = "ids";
@Inject
public IdsQueryParser() {
}
@Override
public String[] names() {
return new String[]{NAME};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
List<BytesRef> ids = new ArrayList<BytesRef>();
Collection<String> types = null;
String currentFieldName = null;
float boost = 1.0f;
String queryName = null;
XContentParser.Token token;
boolean idsProvided = false;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("values".equals(currentFieldName)) {
idsProvided = true;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
BytesRef value = parser.bytesOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
}
ids.add(value);
}
} else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) {
types = new ArrayList<String>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String value = parser.textOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No type specified for term filter");
}
types.add(value);
}
} else {
throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) {
types = ImmutableList.of(parser.text());
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]");
}
}
}
if (!idsProvided) {
throw new QueryParsingException(parseContext.index(), "[ids] query, no ids values provided");
}
if (ids.isEmpty()) {
return Queries.newMatchNoDocsQuery();
}
if (types == null || types.isEmpty()) {
types = parseContext.queryTypes();
} else if (types.size() == 1 && Iterables.getFirst(types, null).equals("_all")) {
types = parseContext.mapperService().types();
}
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
ConstantScoreQuery query = new ConstantScoreQuery(filter);
query.setBoost(boost);
if (queryName != null) {
parseContext.addNamedQuery(queryName, query);
}
return query;
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_IdsQueryParser.java
|
3,221 |
public abstract class AbstractReplicatedRecordStore<K, V>
extends AbstractBaseReplicatedRecordStore<K, V> {
static final String CLEAR_REPLICATION_MAGIC_KEY = ReplicatedMapService.SERVICE_NAME + "$CLEAR$MESSAGE$";
public AbstractReplicatedRecordStore(String name, NodeEngine nodeEngine, CleanerRegistrator cleanerRegistrator,
ReplicatedMapService replicatedMapService) {
super(name, nodeEngine, cleanerRegistrator, replicatedMapService);
}
@Override
public Object remove(Object key) {
ValidationUtil.isNotNull(key, "key");
long time = System.currentTimeMillis();
storage.checkState();
V oldValue;
K marshalledKey = (K) marshallKey(key);
synchronized (getMutex(marshalledKey)) {
final ReplicatedRecord current = storage.get(marshalledKey);
final VectorClock vectorClock;
if (current == null) {
oldValue = null;
} else {
vectorClock = current.getVectorClock();
oldValue = (V) current.getValue();
// Force removal of the underlying stored entry
storage.remove(marshalledKey, current);
vectorClock.incrementClock(localMember);
ReplicationMessage message = buildReplicationMessage(key, null, vectorClock, -1);
replicationPublisher.publishReplicatedMessage(message);
}
cancelTtlEntry(marshalledKey);
}
Object unmarshalledOldValue = unmarshallValue(oldValue);
fireEntryListenerEvent(key, unmarshalledOldValue, null);
if (replicatedMapConfig.isStatisticsEnabled()) {
mapStats.incrementRemoves(System.currentTimeMillis() - time);
}
return unmarshalledOldValue;
}
@Override
public Object get(Object key) {
ValidationUtil.isNotNull(key, "key");
long time = System.currentTimeMillis();
storage.checkState();
ReplicatedRecord replicatedRecord = storage.get(marshallKey(key));
// Force return null on ttl expiration (but before cleanup thread run)
long ttlMillis = replicatedRecord == null ? 0 : replicatedRecord.getTtlMillis();
if (ttlMillis > 0 && System.currentTimeMillis() - replicatedRecord.getUpdateTime() >= ttlMillis) {
replicatedRecord = null;
}
Object value = replicatedRecord == null ? null : unmarshallValue(replicatedRecord.getValue());
if (replicatedMapConfig.isStatisticsEnabled()) {
mapStats.incrementGets(System.currentTimeMillis() - time);
}
return value;
}
@Override
public Object put(Object key, Object value) {
ValidationUtil.isNotNull(key, "key");
ValidationUtil.isNotNull(value, "value");
storage.checkState();
return put(key, value, 0, TimeUnit.MILLISECONDS);
}
@Override
public Object put(Object key, Object value, long ttl, TimeUnit timeUnit) {
ValidationUtil.isNotNull(key, "key");
ValidationUtil.isNotNull(value, "value");
ValidationUtil.isNotNull(timeUnit, "timeUnit");
if (ttl < 0) {
throw new IllegalArgumentException("ttl must be a positive integer");
}
long time = System.currentTimeMillis();
storage.checkState();
V oldValue = null;
K marshalledKey = (K) marshallKey(key);
V marshalledValue = (V) marshallValue(value);
synchronized (getMutex(marshalledKey)) {
final long ttlMillis = ttl == 0 ? 0 : timeUnit.toMillis(ttl);
final ReplicatedRecord old = storage.get(marshalledKey);
final VectorClock vectorClock;
if (old == null) {
vectorClock = new VectorClock();
ReplicatedRecord<K, V> record = buildReplicatedRecord(marshalledKey, marshalledValue, vectorClock, ttlMillis);
storage.put(marshalledKey, record);
} else {
oldValue = (V) old.getValue();
vectorClock = old.getVectorClock();
storage.get(marshalledKey).setValue(marshalledValue, localMemberHash, ttlMillis);
}
if (ttlMillis > 0) {
scheduleTtlEntry(ttlMillis, marshalledKey, null);
} else {
cancelTtlEntry(marshalledKey);
}
vectorClock.incrementClock(localMember);
ReplicationMessage message = buildReplicationMessage(key, value, vectorClock, ttlMillis);
replicationPublisher.publishReplicatedMessage(message);
}
Object unmarshalledOldValue = unmarshallValue(oldValue);
fireEntryListenerEvent(key, unmarshalledOldValue, value);
if (replicatedMapConfig.isStatisticsEnabled()) {
mapStats.incrementPuts(System.currentTimeMillis() - time);
}
return unmarshalledOldValue;
}
@Override
public boolean containsKey(Object key) {
ValidationUtil.isNotNull(key, "key");
storage.checkState();
mapStats.incrementOtherOperations();
return storage.containsKey(marshallKey(key));
}
@Override
public boolean containsValue(Object value) {
ValidationUtil.isNotNull(value, "value");
storage.checkState();
mapStats.incrementOtherOperations();
for (Map.Entry<K, ReplicatedRecord<K, V>> entry : storage.entrySet()) {
V entryValue = entry.getValue().getValue();
if (value == entryValue || (entryValue != null && unmarshallValue(entryValue).equals(value))) {
return true;
}
}
return false;
}
@Override
public Set keySet() {
storage.checkState();
Set keySet = new HashSet(storage.size());
for (K key : storage.keySet()) {
keySet.add(unmarshallKey(key));
}
mapStats.incrementOtherOperations();
return keySet;
}
@Override
public Collection values() {
storage.checkState();
List values = new ArrayList(storage.size());
for (ReplicatedRecord record : storage.values()) {
values.add(unmarshallValue(record.getValue()));
}
mapStats.incrementOtherOperations();
return values;
}
@Override
public Collection values(Comparator comparator) {
List values = (List) values();
Collections.sort(values, comparator);
return values;
}
@Override
public Set entrySet() {
storage.checkState();
Set entrySet = new HashSet(storage.size());
for (Map.Entry<K, ReplicatedRecord<K, V>> entry : storage.entrySet()) {
Object key = unmarshallKey(entry.getKey());
Object value = unmarshallValue(entry.getValue().getValue());
entrySet.add(new AbstractMap.SimpleEntry(key, value));
}
mapStats.incrementOtherOperations();
return entrySet;
}
@Override
public ReplicatedRecord getReplicatedRecord(Object key) {
ValidationUtil.isNotNull(key, "key");
storage.checkState();
return storage.get(marshallKey(key));
}
@Override
public boolean isEmpty() {
mapStats.incrementOtherOperations();
return storage.isEmpty();
}
@Override
public int size() {
mapStats.incrementOtherOperations();
return storage.size();
}
@Override
public void clear(boolean distribute, boolean emptyReplicationQueue) {
storage.checkState();
if (emptyReplicationQueue) {
replicationPublisher.emptyReplicationQueue();
}
storage.clear();
if (distribute) {
replicationPublisher.distributeClear(emptyReplicationQueue);
}
mapStats.incrementOtherOperations();
}
@Override
public String addEntryListener(EntryListener listener, Object key) {
ValidationUtil.isNotNull(listener, "listener");
EventFilter eventFilter = new ReplicatedEntryEventFilter(marshallKey(key));
mapStats.incrementOtherOperations();
return replicatedMapService.addEventListener(listener, eventFilter, getName());
}
@Override
public String addEntryListener(EntryListener listener, Predicate predicate, Object key) {
ValidationUtil.isNotNull(listener, "listener");
EventFilter eventFilter = new ReplicatedQueryEventFilter(marshallKey(key), predicate);
mapStats.incrementOtherOperations();
return replicatedMapService.addEventListener(listener, eventFilter, getName());
}
@Override
public boolean removeEntryListenerInternal(String id) {
ValidationUtil.isNotNull(id, "id");
mapStats.incrementOtherOperations();
return replicatedMapService.removeEventListener(getName(), id);
}
private ReplicationMessage buildReplicationMessage(Object key, Object value, VectorClock vectorClock, long ttlMillis) {
return new ReplicationMessage(getName(), key, value, vectorClock, localMember, localMemberHash, ttlMillis);
}
private ReplicatedRecord buildReplicatedRecord(Object key, Object value, VectorClock vectorClock, long ttlMillis) {
return new ReplicatedRecord(key, value, vectorClock, localMemberHash, ttlMillis);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_record_AbstractReplicatedRecordStore.java
|
590 |
public class OIndexAbstractDelegate<T> implements OIndex<T> {
protected OIndex<T> delegate;
public OIndexAbstractDelegate(final OIndex<T> iDelegate) {
this.delegate = iDelegate;
}
@SuppressWarnings("unchecked")
public OIndexInternal<T> getInternal() {
OIndex<?> internal = delegate;
while (!(internal instanceof OIndexInternal) && internal != null)
internal = internal.getInternal();
return (OIndexInternal<T>) internal;
}
public OIndex<T> create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
return delegate.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener);
}
public Iterator<Entry<Object, T>> iterator() {
return delegate.iterator();
}
@Override
public Iterator<Entry<Object, T>> inverseIterator() {
return delegate.inverseIterator();
}
@Override
public Iterator<OIdentifiable> valuesIterator() {
return delegate.valuesIterator();
}
@Override
public Iterator<OIdentifiable> valuesInverseIterator() {
return delegate.valuesInverseIterator();
}
public T get(final Object iKey) {
return delegate.get(iKey);
}
public long count(final Object iKey) {
return delegate.count(iKey);
}
public boolean contains(final Object iKey) {
return delegate.contains(iKey);
}
public OIndex<T> put(final Object iKey, final OIdentifiable iValue) {
return delegate.put(iKey, iValue);
}
public boolean remove(final Object key) {
return delegate.remove(key);
}
public boolean remove(final Object iKey, final OIdentifiable iRID) {
return delegate.remove(iKey, iRID);
}
public OIndex<T> clear() {
return delegate.clear();
}
public Iterable<Object> keys() {
return delegate.keys();
}
public Collection<OIdentifiable> getValuesBetween(final Object iRangeFrom, final Object iRangeTo) {
return delegate.getValuesBetween(iRangeFrom, iRangeTo);
}
public Collection<OIdentifiable> getValuesBetween(final Object iRangeFrom, final boolean iFromInclusive, final Object iRangeTo,
final boolean iToInclusive) {
return delegate.getValuesBetween(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive);
}
public long count(final Object iRangeFrom, final boolean iFromInclusive, final Object iRangeTo, final boolean iToInclusive,
final int fetchLimit) {
return delegate.count(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive, fetchLimit);
}
public Collection<ODocument> getEntriesBetween(final Object iRangeFrom, final Object iRangeTo, final boolean iInclusive) {
return delegate.getEntriesBetween(iRangeFrom, iRangeTo, iInclusive);
}
public Collection<ODocument> getEntriesBetween(final Object iRangeFrom, final Object iRangeTo) {
return delegate.getEntriesBetween(iRangeFrom, iRangeTo);
}
public Collection<OIdentifiable> getValuesMajor(final Object fromKey, final boolean isInclusive) {
return delegate.getValuesMajor(fromKey, isInclusive);
}
public Collection<ODocument> getEntriesMajor(final Object fromKey, final boolean isInclusive) {
return delegate.getEntriesMajor(fromKey, isInclusive);
}
public Collection<OIdentifiable> getValuesMinor(final Object toKey, final boolean isInclusive) {
return delegate.getValuesMinor(toKey, isInclusive);
}
public Collection<ODocument> getEntriesMinor(final Object toKey, final boolean isInclusive) {
return delegate.getEntriesMinor(toKey, isInclusive);
}
public long getSize() {
return delegate.getSize();
}
@Override
public void flush() {
delegate.flush();
}
public OIndex<T> delete() {
return delegate.delete();
}
@Override
public void deleteWithoutIndexLoad(String indexName) {
delegate.deleteWithoutIndexLoad(indexName);
}
public String getName() {
return delegate.getName();
}
public String getType() {
return delegate.getType();
}
public boolean isAutomatic() {
return delegate.isAutomatic();
}
public ODocument getConfiguration() {
return delegate.getConfiguration();
}
public ORID getIdentity() {
return delegate.getIdentity();
}
public void unload() {
delegate.unload();
}
public long rebuild() {
return delegate.rebuild();
}
public long rebuild(final OProgressListener iProgressListener) {
return delegate.rebuild(iProgressListener);
}
public OType[] getKeyTypes() {
return delegate.getKeyTypes();
}
public Collection<OIdentifiable> getValues(final Collection<?> iKeys) {
return delegate.getValues(iKeys);
}
public Collection<ODocument> getEntries(final Collection<?> iKeys) {
return delegate.getEntries(iKeys);
}
public OIndexDefinition getDefinition() {
return delegate.getDefinition();
}
@Override
public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final OIndexAbstractDelegate<?> that = (OIndexAbstractDelegate<?>) o;
if (!delegate.equals(that.delegate))
return false;
return true;
}
@Override
public int hashCode() {
return delegate.hashCode();
}
public void getValuesBetween(final Object iRangeFrom, final boolean iFromInclusive, final Object iRangeTo,
final boolean iToInclusive, IndexValuesResultListener valuesResultListener) {
delegate.getValuesBetween(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive, valuesResultListener);
}
public void getValuesMajor(final Object fromKey, final boolean isInclusive, IndexValuesResultListener valuesResultListener) {
delegate.getValuesMajor(fromKey, isInclusive, valuesResultListener);
}
public void getValuesMinor(final Object toKey, final boolean isInclusive, IndexValuesResultListener valuesResultListener) {
delegate.getValuesMinor(toKey, isInclusive, valuesResultListener);
}
public void getEntriesMajor(final Object fromKey, final boolean isInclusive, IndexEntriesResultListener entriesResultListener) {
delegate.getEntriesMajor(fromKey, isInclusive, entriesResultListener);
}
public void getEntriesMinor(final Object toKey, final boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
delegate.getEntriesMinor(toKey, isInclusive, entriesResultListener);
}
public void getEntriesBetween(final Object iRangeFrom, final Object iRangeTo, final boolean iInclusive,
final IndexEntriesResultListener entriesResultListener) {
delegate.getEntriesBetween(iRangeFrom, iRangeTo, iInclusive, entriesResultListener);
}
public void getValues(final Collection<?> iKeys, final IndexValuesResultListener resultListener) {
delegate.getValues(iKeys, resultListener);
}
public void getEntries(final Collection<?> iKeys, IndexEntriesResultListener resultListener) {
delegate.getEntries(iKeys, resultListener);
}
public void checkEntry(final OIdentifiable iRecord, final Object iKey) {
delegate.checkEntry(iRecord, iKey);
}
public Set<String> getClusters() {
return delegate.getClusters();
}
@Override
public String toString() {
return delegate.toString();
}
public long getKeySize() {
return delegate.getKeySize();
}
public String getDatabaseName() {
return delegate.getDatabaseName();
}
@Override
public boolean supportsOrderedIterations() {
return delegate.supportsOrderedIterations();
}
@Override
public boolean isRebuiding() {
return delegate.isRebuiding();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexAbstractDelegate.java
|
1,455 |
public class DiscoveryNodeFiltersTests extends ElasticsearchTestCase {
@Test
public void nameMatch() {
Settings settings = ImmutableSettings.settingsBuilder()
.put("xxx.name", "name1")
.build();
DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(false));
}
@Test
public void idMatch() {
Settings settings = ImmutableSettings.settingsBuilder()
.put("xxx._id", "id1")
.build();
DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(false));
}
@Test
public void idOrNameMatch() {
Settings settings = ImmutableSettings.settingsBuilder()
.put("xxx._id", "id1,blah")
.put("xxx.name", "blah,name2")
.build();
DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(false));
}
@Test
public void tagAndGroupMatch() {
Settings settings = ImmutableSettings.settingsBuilder()
.put("xxx.tag", "A")
.put("xxx.group", "B")
.build();
DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(AND, "xxx.", settings);
DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE,
ImmutableMap.<String, String>of("tag", "A", "group", "B"), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE,
ImmutableMap.<String, String>of("tag", "A", "group", "B", "name", "X"), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE,
ImmutableMap.<String, String>of("tag", "A", "group", "F", "name", "X"), Version.CURRENT);
assertThat(filters.match(node), equalTo(false));
node = new DiscoveryNode("name4", "id4", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(false));
}
@Test
public void starMatch() {
Settings settings = ImmutableSettings.settingsBuilder()
.put("xxx.name", "*")
.build();
DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
assertThat(filters.match(node), equalTo(true));
}
}
| 0true
|
src_test_java_org_elasticsearch_cluster_node_DiscoveryNodeFiltersTests.java
|
1,337 |
public class ClusterStateUpdateResponse {
private final boolean acknowledged;
public ClusterStateUpdateResponse(boolean acknowledged) {
this.acknowledged = acknowledged;
}
/**
* Whether the cluster state update was acknowledged or not
*/
public boolean isAcknowledged() {
return acknowledged;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_ack_ClusterStateUpdateResponse.java
|
1,010 |
protected class ShardSingleOperationRequest extends TransportRequest {
private Request request;
private int shardId;
ShardSingleOperationRequest() {
}
public ShardSingleOperationRequest(Request request, int shardId) {
super(request);
this.request = request;
this.shardId = shardId;
}
public Request request() {
return request;
}
public int shardId() {
return shardId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = newRequest();
request.readFrom(in);
shardId = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
out.writeVInt(shardId);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_single_custom_TransportSingleCustomOperationAction.java
|
3,683 |
public static class Defaults extends IntegerFieldMapper.Defaults {
public static final String NAME = CONTENT_TYPE;
public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.DISABLED;
public static final FieldType SIZE_FIELD_TYPE = new FieldType(IntegerFieldMapper.Defaults.FIELD_TYPE);
static {
SIZE_FIELD_TYPE.freeze();
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_SizeFieldMapper.java
|
389 |
public class SupportLogManager extends LogManager {
/**
* Retrieve a SupportLogger instance
*
* @param moduleName The name of the module - will appear in the log message
* @param name The name for the logger - will appear in the log message
* @return the specialized Logger instance supporting the SUPPORT log level
*/
public static SupportLogger getLogger(final String moduleName, String name) {
return (SupportLogger) getLoggerRepository().getLogger(name + "(" + moduleName + ")", new LoggerFactory() {
@Override
public Logger makeNewLoggerInstance(String s) {
return new SupportLogger(moduleName, s);
}
});
}
/**
* Retrieve a SupportLogger instance
*
* @param moduleName The name of the module - will appear in the log message
* @param clazz The class from which the logging is being called - will appear in the log message
* @return the specialized Logger instance supporting the SUPPORT log level
*/
public static SupportLogger getLogger(final String moduleName, Class<?> clazz) {
return getLogger(moduleName, clazz.getSimpleName());
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_logging_SupportLogManager.java
|
44 |
public enum OccurrenceLocation {
EXISTS(false),
NONEMPTY(false),
IS(false),
EXTENDS(false),
SATISFIES(false),
CLASS_ALIAS(false),
OF(false),
UPPER_BOUND(false),
TYPE_ALIAS(false),
CASE(false),
CATCH(false),
IMPORT(false),
EXPRESSION(false),
PARAMETER_LIST(false),
TYPE_PARAMETER_LIST(false),
TYPE_ARGUMENT_LIST(false),
META(false),
PACKAGE_REF(true),
MODULE_REF(true),
INTERFACE_REF(true),
CLASS_REF(true),
ALIAS_REF(true),
TYPE_PARAMETER_REF(true),
VALUE_REF(true),
FUNCTION_REF(true),
DOCLINK(false);
public final boolean reference;
OccurrenceLocation(boolean reference) {
this.reference = reference;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_OccurrenceLocation.java
|
834 |
@SuppressWarnings("deprecation")
public class CreateOfferUtility {
private OfferDao offerDao;
private OfferCodeDao offerCodeDao;
private OfferService offerService;
public CreateOfferUtility(OfferDao offerDao, OfferCodeDao offerCodeDao, OfferService offerService) {
this.offerDao = offerDao;
this.offerCodeDao = offerCodeDao;
this.offerService = offerService;
}
public OfferCode createOfferCode(String offerName, OfferType offerType, OfferDiscountType discountType, double value, String customerRule, String orderRule, boolean stackable, boolean combinable, int priority) {
return createOfferCode("NONAME", offerName, offerType, discountType, value, customerRule, orderRule, stackable, combinable, priority);
}
public OfferCode createOfferCode(String offerCodeName, String offerName, OfferType offerType, OfferDiscountType discountType, double value, String customerRule, String orderRule, boolean stackable, boolean combinable, int priority) {
OfferCode offerCode = offerCodeDao.create();
Offer offer = createOffer(offerName, offerType, discountType, value, customerRule, orderRule, stackable, combinable, priority);
offerCode.setOffer(offer);
offerCode.setOfferCode(offerCodeName);
offerCode = offerService.saveOfferCode(offerCode);
return offerCode;
}
public Offer createOffer(String offerName, OfferType offerType, OfferDiscountType discountType, double value, String customerRule, String orderRule, boolean stackable, boolean combinable, int priority) {
Offer offer = offerDao.create();
offer.setName(offerName);
offer.setStartDate(SystemTime.asDate());
Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, -1);
offer.setStartDate(calendar.getTime());
calendar.add(Calendar.DATE, 2);
offer.setEndDate(calendar.getTime());
offer.setType(offerType);
offer.setDiscountType(discountType);
offer.setValue(BigDecimal.valueOf(value));
offer.setDeliveryType(OfferDeliveryType.CODE);
offer.setStackable(stackable);
offer.setAppliesToOrderRules(orderRule);
offer.setAppliesToCustomerRules(customerRule);
offer.setCombinableWithOtherOffers(combinable);
offer.setPriority(priority);
offer = offerService.save(offer);
offer.setMaxUses(50);
return offer;
}
public Offer updateOfferCodeMaxCustomerUses(OfferCode code, Long maxUses) {
code.getOffer().setMaxUsesPerCustomer(maxUses);
return offerService.save(code.getOffer());
}
}
| 1no label
|
integration_src_test_java_org_broadleafcommerce_core_offer_service_CreateOfferUtility.java
|
419 |
public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSnapshotRequest> {
private String snapshot;
private String repository;
private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.strict();
private String renamePattern;
private String renameReplacement;
private boolean waitForCompletion;
private boolean includeGlobalState = true;
private Settings settings = EMPTY_SETTINGS;
RestoreSnapshotRequest() {
}
/**
* Constructs a new put repository request with the provided repository and snapshot names.
*
* @param repository repository name
* @param snapshot snapshot name
*/
public RestoreSnapshotRequest(String repository, String snapshot) {
this.snapshot = snapshot;
this.repository = repository;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (snapshot == null) {
validationException = addValidationError("name is missing", validationException);
}
if (repository == null) {
validationException = addValidationError("repository is missing", validationException);
}
if (indices == null) {
validationException = addValidationError("indices are missing", validationException);
}
if (indicesOptions == null) {
validationException = addValidationError("indicesOptions is missing", validationException);
}
if (settings == null) {
validationException = addValidationError("settings are missing", validationException);
}
return validationException;
}
/**
* Sets the name of the snapshot.
*
* @param snapshot snapshot name
* @return this request
*/
public RestoreSnapshotRequest snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Returns the name of the snapshot.
*
* @return snapshot name
*/
public String snapshot() {
return this.snapshot;
}
/**
* Sets repository name
*
* @param repository repository name
* @return this request
*/
public RestoreSnapshotRequest repository(String repository) {
this.repository = repository;
return this;
}
/**
* Returns repository name
*
* @return repository name
*/
public String repository() {
return this.repository;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this request
*/
public RestoreSnapshotRequest indices(String... indices) {
this.indices = indices;
return this;
}
/**
* Sets the list of indices that should be restored from snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are not supported. An empty list or {"_all"} will restore all open
* indices in the snapshot.
*
* @param indices list of indices
* @return this request
*/
public RestoreSnapshotRequest indices(List<String> indices) {
this.indices = indices.toArray(new String[indices.size()]);
return this;
}
/**
* Returns list of indices that should be restored from snapshot
*
* @return
*/
public String[] indices() {
return indices;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @return the desired behaviour regarding indices to ignore and wildcard indices expression
*/
public IndicesOptions indicesOptions() {
return indicesOptions;
}
/**
* Specifies what type of requested indices to ignore and how to deal with wildcard expressions.
* For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions
* @return this request
*/
public RestoreSnapshotRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* Sets rename pattern that should be applied to restored indices.
* <p/>
* Indices that match the rename pattern will be renamed according to {@link #renameReplacement(String)}. The
* rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)}
* The request will fail if two or more indices will be renamed into the same name.
*
* @param renamePattern rename pattern
* @return this request
*/
public RestoreSnapshotRequest renamePattern(String renamePattern) {
this.renamePattern = renamePattern;
return this;
}
/**
* Returns rename pattern
*
* @return rename pattern
*/
public String renamePattern() {
return renamePattern;
}
/**
* Sets rename replacement
* <p/>
* See {@link #renamePattern(String)} for more information.
*
* @param renameReplacement rename replacement
* @return
*/
public RestoreSnapshotRequest renameReplacement(String renameReplacement) {
this.renameReplacement = renameReplacement;
return this;
}
/**
* Returns rename replacement
*
* @return rename replacement
*/
public String renameReplacement() {
return renameReplacement;
}
/**
* If this parameter is set to true the operation will wait for completion of restore process before returning.
*
* @param waitForCompletion if true the operation will wait for completion
* @return this request
*/
public RestoreSnapshotRequest waitForCompletion(boolean waitForCompletion) {
this.waitForCompletion = waitForCompletion;
return this;
}
/**
* Returns wait for completion setting
*
* @return true if the operation will wait for completion
*/
public boolean waitForCompletion() {
return waitForCompletion;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Sets repository-specific restore settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* Sets repository-specific restore settings in JSON, YAML or properties format
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(String source) {
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
return this;
}
/**
* Sets repository-specific restore settings
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this request
*/
public RestoreSnapshotRequest settings(Map<String, Object> source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
settings(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
/**
* Returns repository-specific restore settings
*
* @return restore settings
*/
public Settings settings() {
return this.settings;
}
/**
* If set to true the restore procedure will restore global cluster state.
* <p/>
* The global cluster state includes persistent settings and index template definitions.
*
* @param includeGlobalState true if global state should be restored from the snapshot
* @return this request
*/
public RestoreSnapshotRequest includeGlobalState(boolean includeGlobalState) {
this.includeGlobalState = includeGlobalState;
return this;
}
/**
* Returns true if global state should be restored from this snapshot
*
* @return true if global state should be restored
*/
public boolean includeGlobalState() {
return includeGlobalState;
}
/**
* Parses restore definition
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(XContentBuilder source) {
try {
return source(source.bytes());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for repository request", e);
}
}
/**
* Parses restore definition
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(Map source) {
boolean ignoreUnavailable = IndicesOptions.lenient().ignoreUnavailable();
boolean allowNoIndices = IndicesOptions.lenient().allowNoIndices();
boolean expandWildcardsOpen = IndicesOptions.lenient().expandWildcardsOpen();
boolean expandWildcardsClosed = IndicesOptions.lenient().expandWildcardsClosed();
for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
String name = entry.getKey();
if (name.equals("indices")) {
if (entry.getValue() instanceof String) {
indices(Strings.splitStringByCommaToArray((String) entry.getValue()));
} else if (entry.getValue() instanceof ArrayList) {
indices((ArrayList<String>) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings");
}
} else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) {
ignoreUnavailable = nodeBooleanValue(entry.getValue());
} else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) {
allowNoIndices = nodeBooleanValue(entry.getValue());
} else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) {
expandWildcardsOpen = nodeBooleanValue(entry.getValue());
} else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) {
expandWildcardsClosed = nodeBooleanValue(entry.getValue());
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("include_global_state")) {
includeGlobalState = nodeBooleanValue(entry.getValue());
} else if (name.equals("rename_pattern")) {
if (entry.getValue() instanceof String) {
renamePattern((String) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed rename_pattern");
}
} else if (name.equals("rename_replacement")) {
if (entry.getValue() instanceof String) {
renameReplacement((String) entry.getValue());
} else {
throw new ElasticsearchIllegalArgumentException("malformed rename_replacement");
}
} else {
throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name);
}
}
indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed));
return this;
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(String source) {
if (hasLength(source)) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
}
return this;
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @param offset offset
* @param length length
* @return this request
*/
public RestoreSnapshotRequest source(byte[] source, int offset, int length) {
if (length > 0) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
}
}
return this;
}
/**
* Parses restore definition
* <p/>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
snapshot = in.readString();
repository = in.readString();
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
renamePattern = in.readOptionalString();
renameReplacement = in.readOptionalString();
waitForCompletion = in.readBoolean();
includeGlobalState = in.readBoolean();
settings = readSettingsFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(snapshot);
out.writeString(repository);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(renamePattern);
out.writeOptionalString(renameReplacement);
out.writeBoolean(waitForCompletion);
out.writeBoolean(includeGlobalState);
writeSettingsToStream(settings, out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_restore_RestoreSnapshotRequest.java
|
0 |
public interface AdminCatalogService {
/**
* Clear out any Skus that are already attached to the Product
* if there were any there and generate a new set of Skus based
* on the permutations of ProductOptions attached to this Product
*
* @param productId - the Product to generate Skus from
* @return the number of generated Skus from the ProductOption permutations
*/
public Integer generateSkusFromProduct(Long productId);
/**
* This will create a new product along with a new Sku for the defaultSku, along with new
* Skus for all of the additional Skus. This is achieved by simply detaching the entities
* from the persistent session, resetting the primary keys and then saving the entity.
*
* Note: Media for the product is not saved separately, meaning if you make a change to the
* original product's media items (the one specified by <b>productId</b>) it will change the
* cloned product's media and vice-versa.
*
* @param productId
* @return
*/
public Boolean cloneProduct(Long productId);
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_AdminCatalogService.java
|
5 |
static final class AltResult {
final Throwable ex; // null only for NIL
AltResult(Throwable ex) { this.ex = ex; }
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
1,100 |
public class OSQLFunctionFirst extends OSQLFunctionConfigurableAbstract {
public static final String NAME = "first";
private Object first = this;
public OSQLFunctionFirst() {
super(NAME, 1, 1);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
final OCommandContext iContext) {
Object value = iParameters[0];
if (value instanceof OSQLFilterItem)
value = ((OSQLFilterItem) value).getValue(iCurrentRecord, iContext);
if (OMultiValue.isMultiValue(value))
value = OMultiValue.getFirstValue(value);
if (first == this)
// ONLY THE FIRST TIME
first = value;
return value;
}
public boolean aggregateResults() {
return configuredParameters.length == 1;
}
@Override
public Object getResult() {
return first;
}
@Override
public boolean filterResult() {
return true;
}
public String getSyntax() {
return "Syntax error: first(<field>)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionFirst.java
|
134 |
public class NullLogBuffer implements LogBuffer
{
public static final LogBuffer INSTANCE = new NullLogBuffer();
private NullLogBuffer() {}
@Override public LogBuffer put( byte b ) throws IOException { return this; }
@Override public LogBuffer putShort( short b ) throws IOException { return this; }
@Override public LogBuffer putInt( int i ) throws IOException { return this; }
@Override public LogBuffer putLong( long l ) throws IOException { return this; }
@Override public LogBuffer putFloat( float f ) throws IOException { return this; }
@Override public LogBuffer putDouble( double d ) throws IOException { return this; }
@Override public LogBuffer put( byte[] bytes ) throws IOException { return this; }
@Override public LogBuffer put( char[] chars ) throws IOException { return this; }
@Override public void writeOut() throws IOException {}
@Override public void force() throws IOException {}
@Override
public long getFileChannelPosition() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public StoreChannel getFileChannel()
{
throw new UnsupportedOperationException();
}
}
| 0true
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_NullLogBuffer.java
|
1,551 |
public class TransformMap {
public static final String CLASS = Tokens.makeNamespace(TransformMap.class) + ".class";
public static final String CLOSURE = Tokens.makeNamespace(TransformMap.class) + ".closure";
private static final ScriptEngine engine = new GremlinGroovyScriptEngine();
public enum Counters {
VERTICES_PROCESSED,
EDGES_PROCESSED
}
public static Configuration createConfiguration(final Class<? extends Element> klass, final String closure) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.set(CLOSURE, closure);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, Text> {
private Closure closure;
private boolean isVertex;
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
try {
this.closure = (Closure) engine.eval(context.getConfiguration().get(CLOSURE));
} catch (final ScriptException e) {
throw new IOException(e.getMessage(), e);
}
this.outputs = new SafeMapperOutputs(context);
}
private final Text textWritable = new Text();
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
final Object result = this.closure.call(value);
this.textWritable.set(null == result ? Tokens.NULL : result.toString());
for (int i = 0; i < value.pathCount(); i++) {
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
}
} else {
long edgesProcessed = 0;
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
final Object result = this.closure.call(edge);
this.textWritable.set(null == result ? Tokens.NULL : result.toString());
for (int i = 0; i < edge.pathCount(); i++) {
this.outputs.write(Tokens.SIDEEFFECT, NullWritable.get(), this.textWritable);
}
edgesProcessed++;
}
}
DEFAULT_COMPAT.incrementContextCounter(context, Counters.EDGES_PROCESSED, edgesProcessed);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, NullWritable, Text>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_TransformMap.java
|
6 |
@Component("blCustomerCustomPersistenceHandler")
public class CustomerCustomPersistenceHandler extends CustomPersistenceHandlerAdapter {
private static final Log LOG = LogFactory.getLog(StructuredContentTypeCustomPersistenceHandler.class);
@Resource(name="blCustomerService")
protected CustomerService customerService;
@Override
public Boolean canHandleAdd(PersistencePackage persistencePackage) {
return persistencePackage.getCeilingEntityFullyQualifiedClassname() != null && persistencePackage.getCeilingEntityFullyQualifiedClassname().equals(Customer.class.getName());
}
@Override
public Entity add(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao, RecordHelper helper) throws ServiceException {
Entity entity = persistencePackage.getEntity();
try {
PersistencePerspective persistencePerspective = persistencePackage.getPersistencePerspective();
Customer adminInstance = (Customer) Class.forName(entity.getType()[0]).newInstance();
adminInstance.setId(customerService.findNextCustomerId());
Map<String, FieldMetadata> adminProperties = helper.getSimpleMergedProperties(Customer.class.getName(), persistencePerspective);
adminInstance = (Customer) helper.createPopulatedInstance(adminInstance, entity, adminProperties, false);
if (customerService.readCustomerByUsername(adminInstance.getUsername()) != null) {
Entity error = new Entity();
error.addValidationError("username", "nonUniqueUsernameError");
return error;
}
adminInstance = (Customer) dynamicEntityDao.merge(adminInstance);
Entity adminEntity = helper.getRecord(adminProperties, adminInstance, null, null);
return adminEntity;
} catch (Exception e) {
LOG.error("Unable to execute persistence activity", e);
throw new ServiceException("Unable to add entity for " + entity.getType()[0], e);
}
}
}
| 1no label
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_server_service_handler_CustomerCustomPersistenceHandler.java
|
1,192 |
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(new Handler());
}
});
| 0true
|
src_main_java_org_elasticsearch_bulk_udp_BulkUdpService.java
|
3,693 |
public class TimestampFieldMapper extends DateFieldMapper implements InternalMapper, RootMapper {
public static final String NAME = "_timestamp";
public static final String CONTENT_TYPE = "_timestamp";
public static final String DEFAULT_DATE_TIME_FORMAT = "dateOptionalTime";
public static class Defaults extends DateFieldMapper.Defaults {
public static final String NAME = "_timestamp";
public static final FieldType FIELD_TYPE = new FieldType(DateFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setStored(false);
FIELD_TYPE.setIndexed(true);
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.freeze();
}
public static final EnabledAttributeMapper ENABLED = EnabledAttributeMapper.DISABLED;
public static final String PATH = null;
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern(DEFAULT_DATE_TIME_FORMAT);
}
public static class Builder extends NumberFieldMapper.Builder<Builder, TimestampFieldMapper> {
private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
private String path = Defaults.PATH;
private FormatDateTimeFormatter dateTimeFormatter = Defaults.DATE_TIME_FORMATTER;
public Builder() {
super(Defaults.NAME, new FieldType(Defaults.FIELD_TYPE));
}
public Builder enabled(EnabledAttributeMapper enabledState) {
this.enabledState = enabledState;
return builder;
}
public Builder path(String path) {
this.path = path;
return builder;
}
public Builder dateTimeFormatter(FormatDateTimeFormatter dateTimeFormatter) {
this.dateTimeFormatter = dateTimeFormatter;
return builder;
}
@Override
public TimestampFieldMapper build(BuilderContext context) {
boolean roundCeil = Defaults.ROUND_CEIL;
if (context.indexSettings() != null) {
Settings settings = context.indexSettings();
roundCeil = settings.getAsBoolean("index.mapping.date.round_ceil", settings.getAsBoolean("index.mapping.date.parse_upper_inclusive", Defaults.ROUND_CEIL));
}
return new TimestampFieldMapper(fieldType, docValues, enabledState, path, dateTimeFormatter, roundCeil,
ignoreMalformed(context), coerce(context), postingsProvider, docValuesProvider, normsLoading, fieldDataSettings, context.indexSettings());
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
TimestampFieldMapper.Builder builder = timestamp();
parseField(builder, builder.name, node, parserContext);
for (Map.Entry<String, Object> entry : node.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("enabled")) {
EnabledAttributeMapper enabledState = nodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED;
builder.enabled(enabledState);
} else if (fieldName.equals("path")) {
builder.path(fieldNode.toString());
} else if (fieldName.equals("format")) {
builder.dateTimeFormatter(parseDateTimeFormatter(builder.name(), fieldNode.toString()));
}
}
return builder;
}
}
private EnabledAttributeMapper enabledState;
private final String path;
public TimestampFieldMapper() {
this(new FieldType(Defaults.FIELD_TYPE), null, Defaults.ENABLED, Defaults.PATH, Defaults.DATE_TIME_FORMATTER,
Defaults.ROUND_CEIL, Defaults.IGNORE_MALFORMED, Defaults.COERCE, null, null, null, null, ImmutableSettings.EMPTY);
}
protected TimestampFieldMapper(FieldType fieldType, Boolean docValues, EnabledAttributeMapper enabledState, String path,
FormatDateTimeFormatter dateTimeFormatter, boolean roundCeil,
Explicit<Boolean> ignoreMalformed,Explicit<Boolean> coerce, PostingsFormatProvider postingsProvider,
DocValuesFormatProvider docValuesProvider, Loading normsLoading,
@Nullable Settings fieldDataSettings, Settings indexSettings) {
super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), dateTimeFormatter,
Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, docValues,
Defaults.NULL_VALUE, TimeUnit.MILLISECONDS /*always milliseconds*/,
roundCeil, ignoreMalformed, coerce, postingsProvider, docValuesProvider, null, normsLoading, fieldDataSettings,
indexSettings, MultiFields.empty(), null);
this.enabledState = enabledState;
this.path = path;
}
@Override
public FieldType defaultFieldType() {
return Defaults.FIELD_TYPE;
}
public boolean enabled() {
return this.enabledState.enabled;
}
public String path() {
return this.path;
}
public FormatDateTimeFormatter dateTimeFormatter() {
return this.dateTimeFormatter;
}
/**
* Override the default behavior to return a timestamp
*/
@Override
public Object valueForSearch(Object value) {
return value(value);
}
@Override
public void validate(ParseContext context) throws MapperParsingException {
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
public void postParse(ParseContext context) throws IOException {
}
@Override
public void parse(ParseContext context) throws IOException {
// nothing to do here, we call the parent in preParse
}
@Override
public boolean includeInObject() {
return true;
}
@Override
protected void innerParseCreateField(ParseContext context, List<Field> fields) throws IOException {
if (enabledState.enabled) {
long timestamp = context.sourceToParse().timestamp();
if (!fieldType.indexed() && !fieldType.stored() && !hasDocValues()) {
context.ignoredValue(names.indexName(), String.valueOf(timestamp));
}
if (fieldType.indexed() || fieldType.stored()) {
fields.add(new LongFieldMapper.CustomLongNumericField(this, timestamp, fieldType));
}
if (hasDocValues()) {
fields.add(new NumericDocValuesField(names.indexName(), timestamp));
}
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// if all are defaults, no sense to write it at all
if (!includeDefaults && fieldType.indexed() == Defaults.FIELD_TYPE.indexed() && customFieldDataSettings == null &&
fieldType.stored() == Defaults.FIELD_TYPE.stored() && enabledState == Defaults.ENABLED && path == Defaults.PATH
&& dateTimeFormatter.format().equals(Defaults.DATE_TIME_FORMATTER.format())) {
return builder;
}
builder.startObject(CONTENT_TYPE);
if (includeDefaults || enabledState != Defaults.ENABLED) {
builder.field("enabled", enabledState.enabled);
}
if (enabledState.enabled) {
if (includeDefaults || fieldType.indexed() != Defaults.FIELD_TYPE.indexed()) {
builder.field("index", indexTokenizeOptionToString(fieldType.indexed(), fieldType.tokenized()));
}
if (includeDefaults || fieldType.stored() != Defaults.FIELD_TYPE.stored()) {
builder.field("store", fieldType.stored());
}
if (includeDefaults || path != Defaults.PATH) {
builder.field("path", path);
}
if (includeDefaults || !dateTimeFormatter.format().equals(Defaults.DATE_TIME_FORMATTER.format())) {
builder.field("format", dateTimeFormatter.format());
}
if (customFieldDataSettings != null) {
builder.field("fielddata", (Map) customFieldDataSettings.getAsMap());
} else if (includeDefaults) {
builder.field("fielddata", (Map) fieldDataType.getSettings().getAsMap());
}
}
builder.endObject();
return builder;
}
@Override
public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
if (!mergeContext.mergeFlags().simulate()) {
if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
this.enabledState = timestampFieldMapperMergeWith.enabledState;
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_mapper_internal_TimestampFieldMapper.java
|
4,665 |
private final PercolatorType queryPercolator = new PercolatorType() {
@Override
public byte id() {
return 0x04;
}
@Override
public ReduceResult reduce(List<PercolateShardResponse> shardResults) {
return matchPercolator.reduce(shardResults);
}
@Override
public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) {
Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate");
try {
Match match = match(logger, context, highlightPhase);
queryBasedPercolating(percolatorSearcher, context, match);
List<BytesRef> matches = match.matches();
List<Map<String, HighlightField>> hls = match.hls();
long count = match.counter();
BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]);
return new PercolateShardResponse(finalMatches, hls, count, context, request.index(), request.shardId());
} catch (Throwable e) {
logger.debug("failed to execute", e);
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e);
} finally {
percolatorSearcher.release();
}
}
};
| 1no label
|
src_main_java_org_elasticsearch_percolator_PercolatorService.java
|
917 |
public class IndicesOptions {
private static final IndicesOptions[] VALUES;
static {
byte max = 1 << 4;
VALUES = new IndicesOptions[max];
for (byte id = 0; id < max; id++) {
VALUES[id] = new IndicesOptions(id);
}
}
private final byte id;
private IndicesOptions(byte id) {
this.id = id;
}
/**
* @return Whether specified concrete indices should be ignored when unavailable (missing or closed)
*/
public boolean ignoreUnavailable() {
return (id & 1) != 0;
}
/**
* @return Whether to ignore if a wildcard indices expression resolves into no concrete indices.
* The `_all` string or when no indices have been specified also count as wildcard expressions.
*/
public boolean allowNoIndices() {
return (id & 2) != 0;
}
/**
* @return Whether wildcard indices expressions should expanded into open indices should be
*/
public boolean expandWildcardsOpen() {
return (id & 4) != 0;
}
/**
* @return Whether wildcard indices expressions should expanded into closed indices should be
*/
public boolean expandWildcardsClosed() {
return (id & 8) != 0;
}
public void writeIndicesOptions(StreamOutput out) throws IOException {
out.write(id);
}
public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException {
byte id = in.readByte();
if (id >= VALUES.length) {
throw new ElasticsearchIllegalArgumentException("No valid missing index type id: " + id);
}
return VALUES[id];
}
public static IndicesOptions fromOptions(boolean ignoreUnavailable, boolean allowNoIndices, boolean expandToOpenIndices, boolean expandToClosedIndices) {
byte id = toByte(ignoreUnavailable, allowNoIndices, expandToOpenIndices, expandToClosedIndices);
return VALUES[id];
}
public static IndicesOptions fromRequest(RestRequest request, IndicesOptions defaultSettings) {
String sWildcards = request.param("expand_wildcards");
String sIgnoreUnavailable = request.param("ignore_unavailable");
String sAllowNoIndices = request.param("allow_no_indices");
if (sWildcards == null && sIgnoreUnavailable == null && sAllowNoIndices == null) {
return defaultSettings;
}
boolean expandWildcardsOpen = defaultSettings.expandWildcardsOpen();
boolean expandWildcardsClosed = defaultSettings.expandWildcardsClosed();
if (sWildcards != null) {
String[] wildcards = Strings.splitStringByCommaToArray(sWildcards);
for (String wildcard : wildcards) {
if ("open".equals(wildcard)) {
expandWildcardsOpen = true;
} else if ("closed".equals(wildcard)) {
expandWildcardsClosed = true;
} else {
throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]");
}
}
}
return fromOptions(
toBool(sIgnoreUnavailable, defaultSettings.ignoreUnavailable()),
toBool(sAllowNoIndices, defaultSettings.allowNoIndices()),
expandWildcardsOpen,
expandWildcardsClosed
);
}
/**
* @return indices options that requires any specified index to exists, expands wildcards only to open indices and
* allow that no indices are resolved from wildcard expressions (not returning an error).
*/
public static IndicesOptions strict() {
return VALUES[6];
}
/**
* @return indices options that ignore unavailable indices, expand wildcards only to open indices and
* allow that no indices are resolved from wildcard expressions (not returning an error).
*/
public static IndicesOptions lenient() {
return VALUES[7];
}
private static byte toByte(boolean ignoreUnavailable, boolean allowNoIndices, boolean wildcardExpandToOpen, boolean wildcardExpandToClosed) {
byte id = 0;
if (ignoreUnavailable) {
id |= 1;
}
if (allowNoIndices) {
id |= 2;
}
if (wildcardExpandToOpen) {
id |= 4;
}
if (wildcardExpandToClosed) {
id |= 8;
}
return id;
}
private static boolean toBool(String sValue, boolean defaultValue) {
if (sValue == null) {
return defaultValue;
}
return !(sValue.equals("false") || sValue.equals("0") || sValue.equals("off"));
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_support_IndicesOptions.java
|
507 |
public class OEngineMemory extends OEngineAbstract {
public static final String NAME = "memory";
public OEngineMemory() {
}
public OStorage createStorage(String iURL, Map<String, String> iConfiguration) {
try {
return new OStorageMemory(iURL);
} catch (Throwable t) {
OLogManager.instance().error(this, "Error on opening in memory storage: " + iURL, t, ODatabaseException.class);
}
return null;
}
public String getName() {
return NAME;
}
public boolean isShared() {
return true;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_engine_memory_OEngineMemory.java
|
1 |
public abstract class AbstractTextCommand implements TextCommand {
protected final TextCommandType type;
private SocketTextReader socketTextReader;
private SocketTextWriter socketTextWriter;
private long requestId = -1;
protected AbstractTextCommand(TextCommandType type) {
this.type = type;
}
@Override
public TextCommandType getType() {
return type;
}
@Override
public SocketTextReader getSocketTextReader() {
return socketTextReader;
}
@Override
public SocketTextWriter getSocketTextWriter() {
return socketTextWriter;
}
@Override
public long getRequestId() {
return requestId;
}
@Override
public void init(SocketTextReader socketTextReader, long requestId) {
this.socketTextReader = socketTextReader;
this.requestId = requestId;
this.socketTextWriter = socketTextReader.getSocketTextWriter();
}
@Override
public boolean isUrgent() {
return false;
}
@Override
public boolean shouldReply() {
return true;
}
@Override
public String toString() {
return "AbstractTextCommand[" + type + "]{"
+ "requestId="
+ requestId
+ '}';
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_ascii_AbstractTextCommand.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.