Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
143k
| target
class label 2
classes | project
stringlengths 33
157
|
---|---|---|---|
2,528 |
public class XContentMapValues {
/**
* Extracts raw values (string, int, and so on) based on the path provided returning all of them
* as a single list.
*/
public static List<Object> extractRawValues(String path, Map<String, Object> map) {
List<Object> values = Lists.newArrayList();
String[] pathElements = Strings.splitStringToArray(path, '.');
if (pathElements.length == 0) {
return values;
}
extractRawValues(values, map, pathElements, 0);
return values;
}
@SuppressWarnings({"unchecked"})
private static void extractRawValues(List values, Map<String, Object> part, String[] pathElements, int index) {
if (index == pathElements.length) {
return;
}
String key = pathElements[index];
Object currentValue = part.get(key);
int nextIndex = index + 1;
while (currentValue == null && nextIndex != pathElements.length) {
key += "." + pathElements[nextIndex];
currentValue = part.get(key);
nextIndex++;
}
if (currentValue == null) {
return;
}
if (currentValue instanceof Map) {
extractRawValues(values, (Map<String, Object>) currentValue, pathElements, nextIndex);
} else if (currentValue instanceof List) {
extractRawValues(values, (List) currentValue, pathElements, nextIndex);
} else {
values.add(currentValue);
}
}
@SuppressWarnings({"unchecked"})
private static void extractRawValues(List values, List<Object> part, String[] pathElements, int index) {
for (Object value : part) {
if (value == null) {
continue;
}
if (value instanceof Map) {
extractRawValues(values, (Map<String, Object>) value, pathElements, index);
} else if (value instanceof List) {
extractRawValues(values, (List) value, pathElements, index);
} else {
values.add(value);
}
}
}
public static Object extractValue(String path, Map<String, Object> map) {
String[] pathElements = Strings.splitStringToArray(path, '.');
if (pathElements.length == 0) {
return null;
}
return extractValue(pathElements, 0, map);
}
@SuppressWarnings({"unchecked"})
private static Object extractValue(String[] pathElements, int index, Object currentValue) {
if (index == pathElements.length) {
return currentValue;
}
if (currentValue == null) {
return null;
}
if (currentValue instanceof Map) {
Map map = (Map) currentValue;
String key = pathElements[index];
Object mapValue = map.get(key);
int nextIndex = index + 1;
while (mapValue == null && nextIndex != pathElements.length) {
key += "." + pathElements[nextIndex];
mapValue = map.get(key);
nextIndex++;
}
return extractValue(pathElements, nextIndex, mapValue);
}
if (currentValue instanceof List) {
List valueList = (List) currentValue;
List newList = new ArrayList(valueList.size());
for (Object o : valueList) {
Object listValue = extractValue(pathElements, index, o);
if (listValue != null) {
newList.add(listValue);
}
}
return newList;
}
return null;
}
public static Map<String, Object> filter(Map<String, Object> map, String[] includes, String[] excludes) {
Map<String, Object> result = Maps.newHashMap();
filter(map, result, includes == null ? Strings.EMPTY_ARRAY : includes, excludes == null ? Strings.EMPTY_ARRAY : excludes, new StringBuilder());
return result;
}
private static void filter(Map<String, Object> map, Map<String, Object> into, String[] includes, String[] excludes, StringBuilder sb) {
if (includes.length == 0 && excludes.length == 0) {
into.putAll(map);
return;
}
for (Map.Entry<String, Object> entry : map.entrySet()) {
String key = entry.getKey();
int mark = sb.length();
if (sb.length() > 0) {
sb.append('.');
}
sb.append(key);
String path = sb.toString();
if (Regex.simpleMatch(excludes, path)) {
sb.setLength(mark);
continue;
}
boolean exactIncludeMatch = false; // true if the current position was specifically mentioned
boolean pathIsPrefixOfAnInclude = false; // true if potentially a sub scope can be included
if (includes.length == 0) {
// implied match anything
exactIncludeMatch = true;
} else {
for (String include : includes) {
// check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field
// note, this does not work well with middle matches, like obj1.*.obj3
if (include.charAt(0) == '*') {
if (Regex.simpleMatch(include, path)) {
exactIncludeMatch = true;
break;
}
pathIsPrefixOfAnInclude = true;
break;
}
if (include.startsWith(path)) {
if (include.length() == path.length()) {
exactIncludeMatch = true;
break;
} else if (include.length() > path.length() && include.charAt(path.length()) == '.') {
// include might may match deeper paths. Dive deeper.
pathIsPrefixOfAnInclude = true;
break;
}
}
if (Regex.simpleMatch(include, path)) {
exactIncludeMatch = true;
break;
}
}
}
if (!(pathIsPrefixOfAnInclude || exactIncludeMatch)) {
// skip subkeys, not interesting.
sb.setLength(mark);
continue;
}
if (entry.getValue() instanceof Map) {
Map<String, Object> innerInto = Maps.newHashMap();
// if we had an exact match, we want give deeper excludes their chance
filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);
if (exactIncludeMatch || !innerInto.isEmpty()) {
into.put(entry.getKey(), innerInto);
}
} else if (entry.getValue() instanceof List) {
List<Object> list = (List<Object>) entry.getValue();
List<Object> innerInto = new ArrayList<Object>(list.size());
// if we had an exact match, we want give deeper excludes their chance
filter(list, innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);
into.put(entry.getKey(), innerInto);
} else if (exactIncludeMatch) {
into.put(entry.getKey(), entry.getValue());
}
sb.setLength(mark);
}
}
private static void filter(List<Object> from, List<Object> to, String[] includes, String[] excludes, StringBuilder sb) {
if (includes.length == 0 && excludes.length == 0) {
to.addAll(from);
return;
}
for (Object o : from) {
if (o instanceof Map) {
Map<String, Object> innerInto = Maps.newHashMap();
filter((Map<String, Object>) o, innerInto, includes, excludes, sb);
if (!innerInto.isEmpty()) {
to.add(innerInto);
}
} else if (o instanceof List) {
List<Object> innerInto = new ArrayList<Object>();
filter((List<Object>) o, innerInto, includes, excludes, sb);
if (!innerInto.isEmpty()) {
to.add(innerInto);
}
} else {
to.add(o);
}
}
}
public static boolean isObject(Object node) {
return node instanceof Map;
}
public static boolean isArray(Object node) {
return node instanceof List;
}
public static String nodeStringValue(Object node, String defaultValue) {
if (node == null) {
return defaultValue;
}
return node.toString();
}
public static float nodeFloatValue(Object node, float defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeFloatValue(node);
}
public static float nodeFloatValue(Object node) {
if (node instanceof Number) {
return ((Number) node).floatValue();
}
return Float.parseFloat(node.toString());
}
public static double nodeDoubleValue(Object node, double defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeDoubleValue(node);
}
public static double nodeDoubleValue(Object node) {
if (node instanceof Number) {
return ((Number) node).doubleValue();
}
return Double.parseDouble(node.toString());
}
public static int nodeIntegerValue(Object node) {
if (node instanceof Number) {
return ((Number) node).intValue();
}
return Integer.parseInt(node.toString());
}
public static int nodeIntegerValue(Object node, int defaultValue) {
if (node == null) {
return defaultValue;
}
if (node instanceof Number) {
return ((Number) node).intValue();
}
return Integer.parseInt(node.toString());
}
public static short nodeShortValue(Object node, short defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeShortValue(node);
}
public static short nodeShortValue(Object node) {
if (node instanceof Number) {
return ((Number) node).shortValue();
}
return Short.parseShort(node.toString());
}
public static byte nodeByteValue(Object node, byte defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeByteValue(node);
}
public static byte nodeByteValue(Object node) {
if (node instanceof Number) {
return ((Number) node).byteValue();
}
return Byte.parseByte(node.toString());
}
public static long nodeLongValue(Object node, long defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeLongValue(node);
}
public static long nodeLongValue(Object node) {
if (node instanceof Number) {
return ((Number) node).longValue();
}
return Long.parseLong(node.toString());
}
public static boolean nodeBooleanValue(Object node, boolean defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeBooleanValue(node);
}
public static boolean nodeBooleanValue(Object node) {
if (node instanceof Boolean) {
return (Boolean) node;
}
if (node instanceof Number) {
return ((Number) node).intValue() != 0;
}
String value = node.toString();
return !(value.equals("false") || value.equals("0") || value.equals("off"));
}
public static TimeValue nodeTimeValue(Object node, TimeValue defaultValue) {
if (node == null) {
return defaultValue;
}
return nodeTimeValue(node);
}
public static TimeValue nodeTimeValue(Object node) {
if (node instanceof Number) {
return TimeValue.timeValueMillis(((Number) node).longValue());
}
return TimeValue.parseTimeValue(node.toString(), null);
}
public static Map<String, Object> nodeMapValue(Object node, String desc) {
if (node instanceof Map) {
return (Map<String, Object>) node;
} else {
throw new ElasticsearchParseException(desc + " should be a hash but was of type: " + node.getClass());
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_xcontent_support_XContentMapValues.java
|
116 |
public class NullPageDTO extends PageDTO {
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_dto_NullPageDTO.java
|
283 |
public class OCommandScriptException extends OException {
private String text;
private int position;
private static final long serialVersionUID = -7430575036316163711L;
public OCommandScriptException(String iMessage) {
super(iMessage, null);
}
public OCommandScriptException(String iMessage, Throwable cause) {
super(iMessage, cause);
}
public OCommandScriptException(String iMessage, String iText, int iPosition, Throwable cause) {
super(iMessage, cause);
text = iText;
position = iPosition < 0 ? 0 : iPosition;
}
public OCommandScriptException(String iMessage, String iText, int iPosition) {
super(iMessage);
text = iText;
position = iPosition < 0 ? 0 : iPosition;
}
@Override
public String getMessage() {
if (text == null)
return super.getMessage();
final StringBuilder buffer = new StringBuilder();
buffer.append("Error on parsing script at position #");
buffer.append(position);
buffer.append(": " + super.getMessage());
buffer.append("\nScript: ");
buffer.append(text);
buffer.append("\n------");
for (int i = 0; i < position - 1; ++i)
buffer.append("-");
buffer.append("^");
return buffer.toString();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_OCommandScriptException.java
|
70 |
@Repository("blStaticAssetDao")
public class StaticAssetDaoImpl implements StaticAssetDao {
private static SandBox DUMMY_SANDBOX = new SandBoxImpl();
{
DUMMY_SANDBOX.setId(-1l);
}
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name="blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
@Override
public StaticAsset readStaticAssetById(Long id) {
return em.find(StaticAssetImpl.class, id);
}
public List<StaticAsset> readAllStaticAssets() {
CriteriaBuilder builder = em.getCriteriaBuilder();
CriteriaQuery<StaticAsset> criteria = builder.createQuery(StaticAsset.class);
Root<StaticAssetImpl> handler = criteria.from(StaticAssetImpl.class);
criteria.select(handler);
try {
return em.createQuery(criteria).getResultList();
} catch (NoResultException e) {
return new ArrayList<StaticAsset>();
}
}
@Override
public StaticAsset readStaticAssetByFullUrl(String fullUrl, SandBox targetSandBox) {
TypedQuery<StaticAsset> query;
if (targetSandBox == null) {
query = em.createNamedQuery("BC_READ_STATIC_ASSET_BY_FULL_URL_AND_TARGET_SANDBOX_NULL", StaticAsset.class);
query.setParameter("fullUrl", fullUrl);
} else {
query = em.createNamedQuery("BC_READ_STATIC_ASSET_BY_FULL_URL", StaticAsset.class);
query.setParameter("targetSandbox", targetSandBox);
query.setParameter("fullUrl", fullUrl);
}
query.setHint(QueryHints.HINT_CACHEABLE, true);
List<StaticAsset> results = query.getResultList();
if (CollectionUtils.isEmpty(results)) {
return null;
} else {
return results.iterator().next();
}
}
@Override
public StaticAsset addOrUpdateStaticAsset(StaticAsset asset, boolean clearLevel1Cache) {
if (clearLevel1Cache) {
em.detach(asset);
}
return em.merge(asset);
}
@Override
public void delete(StaticAsset asset) {
if (!em.contains(asset)) {
asset = readStaticAssetById(asset.getId());
}
em.remove(asset);
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_dao_StaticAssetDaoImpl.java
|
162 |
public class StructuredContentRuleType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, StructuredContentRuleType> TYPES = new LinkedHashMap<String, StructuredContentRuleType>();
public static final StructuredContentRuleType REQUEST = new StructuredContentRuleType("REQUEST", "Request");
public static final StructuredContentRuleType TIME = new StructuredContentRuleType("TIME", "Time");
public static final StructuredContentRuleType PRODUCT = new StructuredContentRuleType("PRODUCT", "Product");
public static final StructuredContentRuleType CUSTOMER = new StructuredContentRuleType("CUSTOMER", "Customer");
/**
* Allows translation from the passed in String to a <code>StructuredContentRuleType</code>
* @param type
* @return The matching rule type
*/
public static StructuredContentRuleType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public StructuredContentRuleType() {
//do nothing
}
/**
* Initialize the type and friendlyType
* @param <code>type</code>
* @param <code>friendlyType</code>
*/
public StructuredContentRuleType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
/**
* Sets the type
* @param type
*/
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
/**
* Gets the type
* @return
*/
public String getType() {
return type;
}
/**
* Gets the name of the type
* @return
*/
public String getFriendlyType() {
return friendlyType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
StructuredContentRuleType other = (StructuredContentRuleType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_service_type_StructuredContentRuleType.java
|
173 |
public class SocketSimpleClient implements SimpleClient {
private final Node node;
final Socket socket = new Socket();
final ObjectDataInputStream in;
final ObjectDataOutputStream out;
public SocketSimpleClient(Node node) throws IOException {
this.node = node;
socket.connect(node.address.getInetSocketAddress());
OutputStream outputStream = socket.getOutputStream();
outputStream.write(Protocols.CLIENT_BINARY.getBytes());
outputStream.write(ClientTypes.JAVA.getBytes());
outputStream.flush();
SerializationService ss = getSerializationService();
in = ss.createObjectDataInputStream(new BufferedInputStream(socket.getInputStream()));
out = ss.createObjectDataOutputStream(new BufferedOutputStream(outputStream));
}
public void auth() throws IOException {
AuthenticationRequest auth = new AuthenticationRequest(new UsernamePasswordCredentials("dev", "dev-pass"));
send(auth);
receive();
}
public void send(Object o) throws IOException {
final Data data = getSerializationService().toData(o);
data.writeData(out);
out.flush();
}
public Object receive() throws IOException {
Data responseData = new Data();
responseData.readData(in);
ClientResponse clientResponse = getSerializationService().toObject(responseData);
return getSerializationService().toObject(clientResponse.getResponse());
}
public void close() throws IOException {
socket.close();
}
@Override
public SerializationService getSerializationService() {
return node.getSerializationService();
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_client_SocketSimpleClient.java
|
151 |
class ObjectClassDefinitionGenerator extends DefinitionGenerator {
private final String brokenName;
private final MemberOrTypeExpression node;
private final CompilationUnit rootNode;
private final String desc;
private final Image image;
private final ProducedType returnType;
private final LinkedHashMap<String, ProducedType> parameters;
@Override
String getBrokenName() {
return brokenName;
}
@Override
ProducedType getReturnType() {
return returnType;
}
@Override
LinkedHashMap<String, ProducedType> getParameters() {
return parameters;
}
@Override
String getDescription() {
return desc;
}
@Override
Image getImage() {
return image;
}
@Override
Tree.CompilationUnit getRootNode() {
return rootNode;
}
@Override
Node getNode() {
return node;
}
private ObjectClassDefinitionGenerator(String brokenName,
Tree.MemberOrTypeExpression node,
Tree.CompilationUnit rootNode,
String desc,
Image image,
ProducedType returnType,
LinkedHashMap<String, ProducedType> paramTypes) {
this.brokenName = brokenName;
this.node = node;
this.rootNode = rootNode;
this.desc = desc;
this.image = image;
this.returnType = returnType;
this.parameters = paramTypes;
}
String generateShared(String indent, String delim) {
return "shared " + generate(indent, delim);
}
String generate(String indent, String delim) {
StringBuffer def = new StringBuffer();
boolean isUpperCase =
Character.isUpperCase(brokenName.charAt(0));
boolean isVoid = returnType==null;
if (isUpperCase && parameters!=null) {
List<TypeParameter> typeParams = new ArrayList<TypeParameter>();
StringBuilder typeParamDef = new StringBuilder();
StringBuilder typeParamConstDef = new StringBuilder();
appendTypeParams(typeParams, typeParamDef, typeParamConstDef, returnType);
appendTypeParams(typeParams, typeParamDef, typeParamConstDef, parameters.values());
if (typeParamDef.length() > 0) {
typeParamDef.insert(0, "<");
typeParamDef.setLength(typeParamDef.length() - 1);
typeParamDef.append(">");
}
String defIndent = getDefaultIndent();
String supertype = isVoid ?
null : supertypeDeclaration(returnType);
def.append("class ").append(brokenName).append(typeParamDef);
appendParameters(parameters, def);
if (supertype!=null) {
def.append(delim).append(indent).append(defIndent).append(defIndent)
.append(supertype);
}
def.append(typeParamConstDef);
def.append(" {").append(delim);
if (!isVoid) {
appendMembers(indent, delim, def, defIndent);
}
def.append(indent).append("}");
}
else if (!isUpperCase && parameters==null) {
String defIndent = getDefaultIndent();
String supertype = isVoid ?
null : supertypeDeclaration(returnType);
def.append("object ").append(brokenName);
if (supertype!=null) {
def.append(delim).append(indent).append(defIndent).append(defIndent)
.append(supertype);
}
def.append(" {").append(delim);
if (!isVoid) {
appendMembers(indent, delim, def, defIndent);
}
def.append(indent).append("}");
}
else {
return null;
}
return def.toString();
}
Set<Declaration> getImports() {
Set<Declaration> imports = new HashSet<Declaration>();
importType(imports, returnType, rootNode);
if (parameters!=null) {
importTypes(imports, parameters.values(), rootNode);
}
if (returnType!=null) {
importMembers(imports);
}
return imports;
}
private void importMembers(Set<Declaration> imports) {
//TODO: this is a major copy/paste from appendMembers() below
TypeDeclaration td = getDefaultedSupertype();
Set<String> ambiguousNames = new HashSet<String>();
Collection<DeclarationWithProximity> members =
td.getMatchingMemberDeclarations(rootNode.getUnit(),
null, "", 0).values();
for (DeclarationWithProximity dwp: members) {
Declaration dec = dwp.getDeclaration();
for (Declaration d: overloads(dec)) {
if (d.isFormal() /*&& td.isInheritedFromSupertype(d)*/) {
importSignatureTypes(d, rootNode, imports);
ambiguousNames.add(d.getName());
}
}
}
for (TypeDeclaration superType: td.getSupertypeDeclarations()) {
for (Declaration m: superType.getMembers()) {
if (m.isShared()) {
Declaration r = td.getMember(m.getName(), null, false);
if (r==null ||
!r.refines(m) &&
// !r.getContainer().equals(ut) &&
!ambiguousNames.add(m.getName())) {
importSignatureTypes(m, rootNode, imports);
}
}
}
}
}
private void appendMembers(String indent, String delim, StringBuffer def,
String defIndent) {
TypeDeclaration td = getDefaultedSupertype();
Set<String> ambiguousNames = new HashSet<String>();
Collection<DeclarationWithProximity> members =
td.getMatchingMemberDeclarations(rootNode.getUnit(),
null, "", 0).values();
for (DeclarationWithProximity dwp: members) {
Declaration dec = dwp.getDeclaration();
for (Declaration d: overloads(dec)) {
if (d.isFormal() /*&& td.isInheritedFromSupertype(d)*/) {
if (ambiguousNames.add(d.getName())) {
appendRefinementText(indent, delim, def,
defIndent, d);
}
}
}
}
for (TypeDeclaration superType: td.getSupertypeDeclarations()) {
for (Declaration m: superType.getMembers()) {
if (m.isShared()) {
Declaration r = td.getMember(m.getName(), null, false);
if ((r==null ||
!r.refines(m)) &&
// !r.getContainer().equals(ut)) &&
ambiguousNames.add(m.getName())) {
appendRefinementText(indent, delim, def,
defIndent, m);
}
}
}
}
}
private TypeDeclaration getDefaultedSupertype() {
if (isNotBasic(returnType)) {
return returnType.getDeclaration();
}
else {
Unit unit = rootNode.getUnit();
return intersectionType(returnType,
unit.getBasicDeclaration().getType(),
unit).getDeclaration();
}
}
private void appendRefinementText(String indent, String delim,
StringBuffer def, String defIndent, Declaration d) {
ProducedReference pr =
getRefinedProducedReference(returnType, d);
String text = getRefinementTextFor(d, pr, node.getUnit(),
false, null, "", false);
def.append(indent).append(defIndent).append(text).append(delim);
}
static ObjectClassDefinitionGenerator create(String brokenName,
Tree.MemberOrTypeExpression node,
Tree.CompilationUnit rootNode) {
boolean isUpperCase = Character.isUpperCase(brokenName.charAt(0));
FindArgumentsVisitor fav = new FindArgumentsVisitor(node);
rootNode.visit(fav);
Unit unit = node.getUnit();
ProducedType returnType = unit.denotableType(fav.expectedType);
StringBuilder params = new StringBuilder();
LinkedHashMap<String, ProducedType> paramTypes = getParameters(fav);
if (returnType!=null) {
if(unit.isOptionalType(returnType)){
returnType = returnType.eliminateNull();
}
TypeDeclaration rtd = returnType.getDeclaration();
if ( (rtd instanceof Class) && (
rtd.equals(unit.getObjectDeclaration()) ||
rtd.equals(unit.getAnythingDeclaration()))
) {
returnType = null;
}
}
if (!isValidSupertype(returnType)) {
return null;
}
if (paramTypes!=null && isUpperCase) {
String supertype = supertypeDeclaration(returnType);
if (supertype==null) supertype = "";
String desc = "class '" + brokenName + params + supertype + "'";
return new ObjectClassDefinitionGenerator(brokenName, node, rootNode,
desc, LOCAL_CLASS, returnType, paramTypes);
}
else if (paramTypes==null && !isUpperCase) {
String desc = "object '" + brokenName + "'";
return new ObjectClassDefinitionGenerator(brokenName, node, rootNode,
desc, LOCAL_ATTRIBUTE, returnType, null);
}
else {
return null;
}
}
private static String supertypeDeclaration(ProducedType returnType) {
if (isTypeUnknown(returnType)) {
return null;
}
else {
TypeDeclaration rtd = returnType.getDeclaration();
if (rtd instanceof Class) {
return " extends " + returnType.getProducedTypeName() + "()"; //TODO: supertype arguments!
}
else if (rtd instanceof Interface) {
return " satisfies " + returnType.getProducedTypeName();
}
else if (rtd instanceof IntersectionType) {
String extendsClause = "";
StringBuilder satisfiesClause = new StringBuilder();
for (ProducedType st: rtd.getSatisfiedTypes()) {
if (st.getDeclaration() instanceof Class) {
extendsClause = " extends " + st.getProducedTypeName() + "()"; //TODO: supertype arguments!
}
else if (st.getDeclaration() instanceof Interface) {
if (satisfiesClause.length()==0) {
satisfiesClause.append(" satisfies ");
}
else {
satisfiesClause.append(" & ");
}
satisfiesClause.append(st.getProducedTypeName());
}
}
return extendsClause+satisfiesClause;
}
else {
return null;
}
}
}
private static boolean isValidSupertype(ProducedType returnType) {
if (isTypeUnknown(returnType)) {
return true;
}
else {
TypeDeclaration rtd = returnType.getDeclaration();
if (rtd.getCaseTypes()!=null) {
return false;
}
if (rtd instanceof Class) {
return !rtd.isFinal();
}
else if (rtd instanceof Interface) {
return !rtd.equals(rtd.getUnit().getCallableDeclaration());
}
else if (rtd instanceof IntersectionType) {
for (ProducedType st: rtd.getSatisfiedTypes()) {
if (!isValidSupertype(st)) return false;
}
return true;
}
else {
return false;
}
}
}
private static boolean isNotBasic(ProducedType returnType) {
if (isTypeUnknown(returnType)) {
return false;
}
else {
TypeDeclaration rtd = returnType.getDeclaration();
if (rtd instanceof Class) {
return returnType.getSupertype(rtd.getUnit().getBasicDeclaration())==null;
}
else if (rtd instanceof Interface) {
return false;
}
else if (rtd instanceof IntersectionType) {
for (ProducedType st: rtd.getSatisfiedTypes()) {
if (st.getDeclaration() instanceof Class) {
return returnType.getSupertype(rtd.getUnit().getBasicDeclaration())==null;
}
}
return false;
}
else {
return false;
}
}
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ObjectClassDefinitionGenerator.java
|
611 |
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine,
String valueContainerAlgorithm) {
super(type, algorithm, indexEngine, valueContainerAlgorithm);
}
public Set<OIdentifiable> get(Object key) {
checkForRebuild();
key = getCollatingValue(key);
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(Object key) {
checkForRebuild();
key = getCollatingValue(key);
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
values = new OIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void putInSnapshot(Object key, OIdentifiable value, final Map<Object, Object> snapshot) {
key = getCollatingValue(key);
Object snapshotValue = snapshot.get(key);
Set<OIdentifiable> values;
if (snapshotValue == null)
values = indexEngine.get(key);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
values = null;
else
values = (Set<OIdentifiable>) snapshotValue;
if (values == null) {
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
values = new OIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
snapshot.put(key, values);
}
values.add(value.getIdentity());
if (values instanceof OIndexRIDContainer && ((OIndexRIDContainer) values).isEmbedded())
snapshot.put(key, values);
}
@Override
public boolean remove(Object key, final OIdentifiable value) {
checkForRebuild();
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void removeFromSnapshot(Object key, final OIdentifiable value, final Map<Object, Object> snapshot) {
key = getCollatingValue(key);
final Object snapshotValue = snapshot.get(key);
Set<OIdentifiable> values;
if (snapshotValue == null)
values = indexEngine.get(key);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
values = null;
else
values = (Set<OIdentifiable>) snapshotValue;
if (values == null)
return;
if (values.remove(value)) {
if (values.isEmpty())
snapshot.put(key, RemovedValue.INSTANCE);
else
snapshot.put(key, values);
}
}
@Override
protected void commitSnapshot(Map<Object, Object> snapshot) {
for (Map.Entry<Object, Object> snapshotEntry : snapshot.entrySet()) {
Object key = snapshotEntry.getKey();
Object value = snapshotEntry.getValue();
checkForKeyType(key);
if (value.equals(RemovedValue.INSTANCE))
indexEngine.remove(key);
else
indexEngine.put(key, (Set<OIdentifiable>) value);
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm))
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public void getValuesBetween(Object iRangeFrom, final boolean fromInclusive, Object iRangeTo, final boolean toInclusive,
final IndexValuesResultListener resultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getValuesBetween(iRangeFrom, fromInclusive, iRangeTo, toInclusive, MultiValuesTransformer.INSTANCE,
new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValuesMajor(Object iRangeFrom, final boolean isInclusive, final IndexValuesResultListener valuesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
acquireSharedLock();
try {
indexEngine.getValuesMajor(iRangeFrom, isInclusive, MultiValuesTransformer.INSTANCE, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return valuesResultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValuesMinor(Object iRangeTo, final boolean isInclusive, final IndexValuesResultListener resultListener) {
checkForRebuild();
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getValuesMinor(iRangeTo, isInclusive, MultiValuesTransformer.INSTANCE, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValues(final Collection<?> iKeys, final IndexValuesResultListener resultListener) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
for (Object key : sortedKeys) {
key = getCollatingValue(key);
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (!resultListener.addResult(value))
return;
}
}
}
} finally {
releaseSharedLock();
}
}
public void getEntriesMajor(Object iRangeFrom, final boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
acquireSharedLock();
try {
indexEngine.getEntriesMajor(iRangeFrom, isInclusive, MultiValuesTransformer.INSTANCE,
new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntriesMinor(Object iRangeTo, boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getEntriesMinor(iRangeTo, isInclusive, MultiValuesTransformer.INSTANCE, new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntriesBetween(Object iRangeFrom, Object iRangeTo, boolean inclusive,
final IndexEntriesResultListener indexEntriesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
iRangeFrom = OType.convert(iRangeFrom, types[0].getDefaultJavaType());
iRangeTo = OType.convert(iRangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
indexEngine.getEntriesBetween(iRangeFrom, iRangeTo, inclusive, MultiValuesTransformer.INSTANCE,
new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return indexEntriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public long count(Object iRangeFrom, final boolean fromInclusive, Object iRangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
iRangeFrom = OType.convert(iRangeFrom, types[0].getDefaultJavaType());
iRangeTo = OType.convert(iRangeTo, types[0].getDefaultJavaType());
}
if (iRangeFrom != null && iRangeTo != null && iRangeFrom.getClass() != iRangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(iRangeFrom, fromInclusive, iRangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public void getEntries(Collection<?> iKeys, IndexEntriesResultListener resultListener) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
for (Object key : sortedKeys) {
key = getCollatingValue(key);
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
if (!resultListener.addResult(document))
return;
}
}
}
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
|
178 |
static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
final Throwable ex;
ExceptionNode next;
final long thrower; // use id not ref to avoid weak cycles
final int hashCode; // store task hashCode before weak ref disappears
ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
super(task, exceptionTableRefQueue);
this.ex = ex;
this.next = next;
this.thrower = Thread.currentThread().getId();
this.hashCode = System.identityHashCode(task);
}
}
| 0true
|
src_main_java_jsr166y_ForkJoinTask.java
|
199 |
public class TrackingConcurrentMergeScheduler extends ConcurrentMergeScheduler {
protected final ESLogger logger;
private final MeanMetric totalMerges = new MeanMetric();
private final CounterMetric totalMergesNumDocs = new CounterMetric();
private final CounterMetric totalMergesSizeInBytes = new CounterMetric();
private final CounterMetric currentMerges = new CounterMetric();
private final CounterMetric currentMergesNumDocs = new CounterMetric();
private final CounterMetric currentMergesSizeInBytes = new CounterMetric();
private final Set<OnGoingMerge> onGoingMerges = ConcurrentCollections.newConcurrentSet();
private final Set<OnGoingMerge> readOnlyOnGoingMerges = Collections.unmodifiableSet(onGoingMerges);
public TrackingConcurrentMergeScheduler(ESLogger logger) {
super();
this.logger = logger;
}
public long totalMerges() {
return totalMerges.count();
}
public long totalMergeTime() {
return totalMerges.sum();
}
public long totalMergeNumDocs() {
return totalMergesNumDocs.count();
}
public long totalMergeSizeInBytes() {
return totalMergesSizeInBytes.count();
}
public long currentMerges() {
return currentMerges.count();
}
public long currentMergesNumDocs() {
return currentMergesNumDocs.count();
}
public long currentMergesSizeInBytes() {
return currentMergesSizeInBytes.count();
}
public Set<OnGoingMerge> onGoingMerges() {
return readOnlyOnGoingMerges;
}
@Override
protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
int totalNumDocs = merge.totalNumDocs();
// don't used #totalBytesSize() since need to be executed under IW lock, might be fixed in future Lucene version
long totalSizeInBytes = merge.estimatedMergeBytes;
long time = System.currentTimeMillis();
currentMerges.inc();
currentMergesNumDocs.inc(totalNumDocs);
currentMergesSizeInBytes.inc(totalSizeInBytes);
OnGoingMerge onGoingMerge = new OnGoingMerge(merge);
onGoingMerges.add(onGoingMerge);
if (logger.isTraceEnabled()) {
logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", merge.info == null ? "_na_" : merge.info.info.name, merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes));
}
try {
beforeMerge(onGoingMerge);
super.doMerge(merge);
} finally {
long took = System.currentTimeMillis() - time;
onGoingMerges.remove(onGoingMerge);
afterMerge(onGoingMerge);
currentMerges.dec();
currentMergesNumDocs.dec(totalNumDocs);
currentMergesSizeInBytes.dec(totalSizeInBytes);
totalMergesNumDocs.inc(totalNumDocs);
totalMergesSizeInBytes.inc(totalSizeInBytes);
totalMerges.inc(took);
if (took > 20000) { // if more than 20 seconds, DEBUG log it
logger.debug("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
} else if (logger.isTraceEnabled()) {
logger.trace("merge [{}] done, took [{}]", merge.info == null ? "_na_" : merge.info.info.name, TimeValue.timeValueMillis(took));
}
}
}
/**
* A callback allowing for custom logic before an actual merge starts.
*/
protected void beforeMerge(OnGoingMerge merge) {
}
/**
* A callback allowing for custom logic before an actual merge starts.
*/
protected void afterMerge(OnGoingMerge merge) {
}
@Override
public MergeScheduler clone() {
// Lucene IW makes a clone internally but since we hold on to this instance
// the clone will just be the identity.
return this;
}
}
| 0true
|
src_main_java_org_apache_lucene_index_TrackingConcurrentMergeScheduler.java
|
1,079 |
public class CacheProperty extends AbstractProperty {
public CacheProperty(long id, PropertyKey key, InternalVertex start, Object value, Entry data) {
super(id, key, start.it(), value);
this.data = data;
}
//############## Similar code as CacheEdge but be careful when copying #############################
private final Entry data;
@Override
public InternalRelation it() {
InternalRelation it = null;
InternalVertex startVertex = getVertex(0);
if (startVertex.hasAddedRelations() && startVertex.hasRemovedRelations()) {
//Test whether this relation has been replaced
final long id = super.getLongId();
it = Iterables.getOnlyElement(startVertex.getAddedRelations(new Predicate<InternalRelation>() {
@Override
public boolean apply(@Nullable InternalRelation internalRelation) {
return (internalRelation instanceof StandardProperty) && ((StandardProperty) internalRelation).getPreviousID() == id;
}
}), null);
}
return (it != null) ? it : super.it();
}
private void copyProperties(InternalRelation to) {
for (LongObjectCursor<Object> entry : getPropertyMap()) {
to.setPropertyDirect(tx().getExistingRelationType(entry.key), entry.value);
}
}
private synchronized InternalRelation update() {
StandardProperty copy = new StandardProperty(super.getLongId(), getPropertyKey(), getVertex(0), getValue(), ElementLifeCycle.Loaded);
copyProperties(copy);
copy.remove();
StandardProperty u = (StandardProperty) tx().addProperty(getVertex(0), getPropertyKey(), getValue());
if (type.getConsistencyModifier()!= ConsistencyModifier.FORK) u.setId(super.getLongId());
u.setPreviousID(super.getLongId());
copyProperties(u);
return u;
}
@Override
public long getLongId() {
InternalRelation it = it();
return (it == this) ? super.getLongId() : it.getLongId();
}
private RelationCache getPropertyMap() {
RelationCache map = data.getCache();
if (map == null || !map.hasProperties()) {
map = RelationConstructor.readRelationCache(data, tx());
}
return map;
}
@Override
public <O> O getPropertyDirect(RelationType type) {
return getPropertyMap().get(type.getLongId());
}
@Override
public Iterable<RelationType> getPropertyKeysDirect() {
RelationCache map = getPropertyMap();
List<RelationType> types = new ArrayList<RelationType>(map.numProperties());
for (LongObjectCursor<Object> entry : map) {
types.add(tx().getExistingRelationType(entry.key));
}
return types;
}
@Override
public void setPropertyDirect(RelationType type, Object value) {
update().setPropertyDirect(type, value);
}
@Override
public <O> O removePropertyDirect(RelationType type) {
return update().removePropertyDirect(type);
}
@Override
public byte getLifeCycle() {
if ((getVertex(0).hasRemovedRelations() || getVertex(0).isRemoved()) && tx().isRemovedRelation(super.getLongId()))
return ElementLifeCycle.Removed;
else return ElementLifeCycle.Loaded;
}
@Override
public void remove() {
if (!tx().isRemovedRelation(super.getLongId())) {
tx().removeRelation(this);
}// else throw InvalidElementException.removedException(this);
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_relations_CacheProperty.java
|
1,155 |
public class HazelcastInstanceNotActiveException extends IllegalStateException {
public HazelcastInstanceNotActiveException() {
super("Hazelcast instance is not active!");
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_core_HazelcastInstanceNotActiveException.java
|
589 |
public class TransportRefreshAction extends TransportBroadcastOperationAction<RefreshRequest, RefreshResponse, ShardRefreshRequest, ShardRefreshResponse> {
private final IndicesService indicesService;
@Inject
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService) {
super(settings, threadPool, clusterService, transportService);
this.indicesService = indicesService;
}
@Override
protected String executor() {
return ThreadPool.Names.REFRESH;
}
@Override
protected String transportAction() {
return RefreshAction.NAME;
}
@Override
protected RefreshRequest newRequest() {
return new RefreshRequest();
}
@Override
protected RefreshResponse newResponse(RefreshRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// non active shard, ignore
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
successfulShards++;
}
}
return new RefreshResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
}
@Override
protected ShardRefreshRequest newShardRequest() {
return new ShardRefreshRequest();
}
@Override
protected ShardRefreshRequest newShardRequest(ShardRouting shard, RefreshRequest request) {
return new ShardRefreshRequest(shard.index(), shard.id(), request);
}
@Override
protected ShardRefreshResponse newShardResponse() {
return new ShardRefreshResponse();
}
@Override
protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) throws ElasticsearchException {
IndexShard indexShard = indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
indexShard.refresh(new Engine.Refresh("api").force(request.force()));
logger.trace("{} refresh request executed, force: [{}]", indexShard.shardId(), request.force());
return new ShardRefreshResponse(request.index(), request.shardId());
}
/**
* The refresh request works against *all* shards.
*/
@Override
protected GroupShardsIterator shards(ClusterState clusterState, RefreshRequest request, String[] concreteIndices) {
return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, RefreshRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, RefreshRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_refresh_TransportRefreshAction.java
|
2,351 |
private static class CollectingCombinerFactory<KeyIn, ValueIn>
implements CombinerFactory<KeyIn, ValueIn, List<ValueIn>> {
@Override
public Combiner<KeyIn, ValueIn, List<ValueIn>> newCombiner(KeyIn key) {
return new Combiner<KeyIn, ValueIn, List<ValueIn>>() {
private final List<ValueIn> values = new ArrayList<ValueIn>();
@Override
public void combine(KeyIn key, ValueIn value) {
values.add(value);
}
@Override
public List<ValueIn> finalizeChunk() {
List<ValueIn> values = new ArrayList<ValueIn>(this.values);
this.values.clear();
return values;
}
};
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_mapreduce_impl_task_DefaultContext.java
|
180 |
@Component("blURLHandlerFilter")
public class URLHandlerFilter extends OncePerRequestFilter {
@Resource(name = "blURLHandlerService")
private URLHandlerService urlHandlerService;
@Override
protected void doFilterInternal(HttpServletRequest request,
HttpServletResponse response, FilterChain filterChain)
throws ServletException, IOException {
String contextPath = request.getContextPath();
String requestURIWithoutContext;
if (request.getContextPath() != null) {
requestURIWithoutContext = request.getRequestURI().substring(request.getContextPath().length());
} else {
requestURIWithoutContext = request.getRequestURI();
}
URLHandler handler = urlHandlerService.findURLHandlerByURI(requestURIWithoutContext);
if (handler != null) {
if (URLRedirectType.FORWARD == handler.getUrlRedirectType()) {
request.getRequestDispatcher(handler.getNewURL()).forward(request, response);
} else if (URLRedirectType.REDIRECT_PERM == handler.getUrlRedirectType()) {
String url = UrlUtil.fixRedirectUrl(contextPath, handler.getNewURL());
response.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);
response.setHeader( "Location", url);
response.setHeader( "Connection", "close" );
} else if (URLRedirectType.REDIRECT_TEMP == handler.getUrlRedirectType()) {
String url = UrlUtil.fixRedirectUrl(contextPath, handler.getNewURL());
response.sendRedirect(url);
}
} else {
filterChain.doFilter(request, response);
}
}
/**
* If the url does not include "//" then the system will ensure that the application context
* is added to the start of the URL.
*
* @param url
* @return
*/
protected String fixRedirectUrl(String contextPath, String url) {
if (url.indexOf("//") < 0) {
if (contextPath != null && (! "".equals(contextPath))) {
if (! url.startsWith("/")) {
url = "/" + url;
}
if (! url.startsWith(contextPath)) {
url = contextPath + url;
}
}
}
return url;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_web_URLHandlerFilter.java
|
1,122 |
public class OSQLFunctionEncode extends OSQLFunctionAbstract {
public static final String NAME = "encode";
public static final String FORMAT_BASE64 = "base64";
/**
* Get the date at construction to have the same date for all the iteration.
*/
public OSQLFunctionEncode() {
super(NAME, 2, 2);
}
public Object execute(OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
final Object candidate = iParameters[0];
final String format = iParameters[1].toString();
byte[] data = null;
if (candidate instanceof byte[]) {
data = (byte[]) candidate;
} else if (candidate instanceof ORecordId) {
final ORecord rec = ((ORecordId) candidate).getRecord();
if (rec instanceof ORecordBytes) {
data = ((ORecordBytes) rec).toStream();
}
} else if (candidate instanceof OSerializableStream) {
data = ((OSerializableStream) candidate).toStream();
}
if(data == null){
return null;
}
if(FORMAT_BASE64.equalsIgnoreCase(format)){
return OBase64Utils.encodeBytes(data);
}else{
throw new OException("unknowned format :"+format);
}
}
@Override
public String getSyntax() {
return "Syntax error: encode(<binaryfield>, <format>)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_misc_OSQLFunctionEncode.java
|
243 |
service.submitToKeyOwner(runnable, "key", new ExecutionCallback() {
public void onResponse(Object response) {
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java
|
1,968 |
public final class EvictionHelper {
private static final int ONE_HUNDRED_PERCENT = 100;
private static final int EVICTION_START_THRESHOLD_PERCENTAGE = 95;
private static final int ONE_KILOBYTE = 1024;
private EvictionHelper() {
}
public static boolean checkEvictable(MapContainer mapContainer) {
final MaxSizeConfig maxSizeConfig = mapContainer.getMapConfig().getMaxSizeConfig();
final MaxSizeConfig.MaxSizePolicy maxSizePolicy = maxSizeConfig.getMaxSizePolicy();
boolean result;
switch (maxSizePolicy) {
case PER_NODE:
result = isEvictablePerNode(mapContainer);
break;
case PER_PARTITION:
result = isEvictablePerPartition(mapContainer);
break;
case USED_HEAP_PERCENTAGE:
result = isEvictableHeapPercentage(mapContainer);
break;
case USED_HEAP_SIZE:
result = isEvictableHeapSize(mapContainer);
break;
default:
throw new IllegalArgumentException("Not an appropriate max size policy [" + maxSizePolicy + ']');
}
return result;
}
public static void removeEvictableRecords(final RecordStore recordStore, final MapConfig mapConfig,
final MapService mapService) {
final int partitionSize = recordStore.size();
if (partitionSize < 1) {
return;
}
final int evictableSize = getEvictableSize(partitionSize, mapConfig, mapService);
if (evictableSize < 1) {
return;
}
final MapConfig.EvictionPolicy evictionPolicy = mapConfig.getEvictionPolicy();
final Map<Data, Record> entries = recordStore.getReadonlyRecordMap();
final int size = entries.size();
// size have a tendency to change to here so check again.
if (entries.isEmpty()) {
return;
}
// criteria is a long value, like last access times or hits,
// used for calculating LFU or LRU.
final long[] criterias = new long[size];
int index = 0;
for (final Record record : entries.values()) {
criterias[index] = getEvictionCriteriaValue(record, evictionPolicy);
index++;
//in case size may change (increase or decrease) when iterating.
if (index == size) {
break;
}
}
if (criterias.length == 0) {
return;
}
// just in case there may be unassigned indexes in criterias array due to size variances
// assign them to Long.MAX_VALUE so when sorting asc they will locate
// in the upper array indexes and we wont care about them.
if (index < criterias.length) {
for (int i = index; i < criterias.length; i++) {
criterias[i] = Long.MAX_VALUE;
}
}
Arrays.sort(criterias);
// check in case record store size may be smaller than evictable size.
final int evictableBaseIndex = index == 0 ? index : Math.min(evictableSize, index - 1);
final long criteriaValue = criterias[evictableBaseIndex];
int evictedRecordCounter = 0;
for (final Map.Entry<Data, Record> entry : entries.entrySet()) {
final Record record = entry.getValue();
final long value = getEvictionCriteriaValue(record, evictionPolicy);
if (value <= criteriaValue) {
final Data tmpKey = record.getKey();
final Object tmpValue = record.getValue();
if (evictIfNotLocked(tmpKey, recordStore)) {
evictedRecordCounter++;
final String mapName = mapConfig.getName();
mapService.interceptAfterRemove(mapName, value);
if (mapService.isNearCacheAndInvalidationEnabled(mapName)) {
mapService.invalidateAllNearCaches(mapName, tmpKey);
}
fireEvent(tmpKey, tmpValue, mapName, mapService);
}
}
if (evictedRecordCounter >= evictableSize) {
break;
}
}
}
public static void fireEvent(Data key, Object value, String mapName, MapService mapService) {
final NodeEngine nodeEngine = mapService.getNodeEngine();
mapService.publishEvent(nodeEngine.getThisAddress(), mapName, EntryEventType.EVICTED,
key, mapService.toData(value), null);
}
public static boolean evictIfNotLocked(Data key, RecordStore recordStore) {
if (recordStore.isLocked(key)) {
return false;
}
recordStore.evict(key);
return true;
}
public static int getEvictableSize(int currentPartitionSize, MapConfig mapConfig, MapService mapService) {
int evictableSize;
final MaxSizeConfig.MaxSizePolicy maxSizePolicy = mapConfig.getMaxSizeConfig().getMaxSizePolicy();
final int evictionPercentage = mapConfig.getEvictionPercentage();
switch (maxSizePolicy) {
case PER_PARTITION:
int maxSize = mapConfig.getMaxSizeConfig().getSize();
int targetSizePerPartition = Double.valueOf(maxSize
* ((ONE_HUNDRED_PERCENT - evictionPercentage) / (1D * ONE_HUNDRED_PERCENT))).intValue();
int diffFromTargetSize = currentPartitionSize - targetSizePerPartition;
int prunedSize = currentPartitionSize * evictionPercentage / ONE_HUNDRED_PERCENT + 1;
evictableSize = Math.max(diffFromTargetSize, prunedSize);
break;
case PER_NODE:
maxSize = mapConfig.getMaxSizeConfig().getSize();
int memberCount = mapService.getNodeEngine().getClusterService().getMembers().size();
int maxPartitionSize = (maxSize
* memberCount / mapService.getNodeEngine().getPartitionService().getPartitionCount());
targetSizePerPartition = Double.valueOf(maxPartitionSize
* ((ONE_HUNDRED_PERCENT - evictionPercentage) / (1D * ONE_HUNDRED_PERCENT))).intValue();
diffFromTargetSize = currentPartitionSize - targetSizePerPartition;
prunedSize = currentPartitionSize * evictionPercentage / ONE_HUNDRED_PERCENT + 1;
evictableSize = Math.max(diffFromTargetSize, prunedSize);
break;
case USED_HEAP_PERCENTAGE:
case USED_HEAP_SIZE:
evictableSize = currentPartitionSize * evictionPercentage / ONE_HUNDRED_PERCENT;
break;
default:
throw new IllegalArgumentException("Max size policy is not defined [" + maxSizePolicy + "]");
}
return evictableSize;
}
private static long getEvictionCriteriaValue(Record record, MapConfig.EvictionPolicy evictionPolicy) {
long value;
switch (evictionPolicy) {
case LRU:
case LFU:
value = record.getEvictionCriteriaNumber();
break;
default:
throw new IllegalArgumentException("Not an appropriate eviction policy [" + evictionPolicy + ']');
}
return value;
}
private static boolean isEvictablePerNode(MapContainer mapContainer) {
int nodeTotalSize = 0;
final MapService mapService = mapContainer.getMapService();
final MaxSizeConfig maxSizeConfig = mapContainer.getMapConfig().getMaxSizeConfig();
final int maxSize = getApproximateMaxSize(maxSizeConfig.getSize());
final String mapName = mapContainer.getName();
final NodeEngine nodeEngine = mapService.getNodeEngine();
final InternalPartitionService partitionService = nodeEngine.getPartitionService();
final int partitionCount = partitionService.getPartitionCount();
for (int i = 0; i < partitionCount; i++) {
final Address owner = partitionService.getPartitionOwner(i);
if (nodeEngine.getThisAddress().equals(owner)) {
final PartitionContainer container = mapService.getPartitionContainer(i);
if (container == null) {
return false;
}
nodeTotalSize += getRecordStoreSize(mapName, container);
if (nodeTotalSize >= maxSize) {
return true;
}
}
}
return false;
}
private static int getRecordStoreSize(String mapName, PartitionContainer partitionContainer) {
final RecordStore existingRecordStore = partitionContainer.getExistingRecordStore(mapName);
if (existingRecordStore == null) {
return 0;
}
return existingRecordStore.size();
}
private static long getRecordStoreHeapCost(String mapName, PartitionContainer partitionContainer) {
final RecordStore existingRecordStore = partitionContainer.getExistingRecordStore(mapName);
if (existingRecordStore == null) {
return 0L;
}
return existingRecordStore.getHeapCost();
}
/**
* used when deciding evictable or not.
*/
private static int getApproximateMaxSize(int maxSizeFromConfig) {
// because not to exceed the max size much we start eviction early.
// so decrease the max size with ratio .95 below
return maxSizeFromConfig * EVICTION_START_THRESHOLD_PERCENTAGE / ONE_HUNDRED_PERCENT;
}
private static boolean isEvictablePerPartition(final MapContainer mapContainer) {
final MapService mapService = mapContainer.getMapService();
final MaxSizeConfig maxSizeConfig = mapContainer.getMapConfig().getMaxSizeConfig();
final int maxSize = getApproximateMaxSize(maxSizeConfig.getSize());
final String mapName = mapContainer.getName();
final NodeEngine nodeEngine = mapService.getNodeEngine();
final InternalPartitionService partitionService = nodeEngine.getPartitionService();
for (int i = 0; i < partitionService.getPartitionCount(); i++) {
final Address owner = partitionService.getPartitionOwner(i);
if (nodeEngine.getThisAddress().equals(owner)) {
final PartitionContainer container = mapService.getPartitionContainer(i);
if (container == null) {
return false;
}
final int size = getRecordStoreSize(mapName, container);
if (size >= maxSize) {
return true;
}
}
}
return false;
}
private static boolean isEvictableHeapSize(final MapContainer mapContainer) {
final long usedHeapSize = getUsedHeapSize(mapContainer);
if (usedHeapSize == -1L) {
return false;
}
final MaxSizeConfig maxSizeConfig = mapContainer.getMapConfig().getMaxSizeConfig();
final int maxSize = getApproximateMaxSize(maxSizeConfig.getSize());
return maxSize < (usedHeapSize / ONE_KILOBYTE / ONE_KILOBYTE);
}
private static boolean isEvictableHeapPercentage(final MapContainer mapContainer) {
final long usedHeapSize = getUsedHeapSize(mapContainer);
if (usedHeapSize == -1L) {
return false;
}
final MaxSizeConfig maxSizeConfig = mapContainer.getMapConfig().getMaxSizeConfig();
final int maxSize = getApproximateMaxSize(maxSizeConfig.getSize());
final long total = Runtime.getRuntime().totalMemory();
return maxSize < (1D * ONE_HUNDRED_PERCENT * usedHeapSize / total);
}
private static long getUsedHeapSize(final MapContainer mapContainer) {
long heapCost = 0L;
final MapService mapService = mapContainer.getMapService();
final String mapName = mapContainer.getName();
final NodeEngine nodeEngine = mapService.getNodeEngine();
final Address thisAddress = nodeEngine.getThisAddress();
for (int i = 0; i < nodeEngine.getPartitionService().getPartitionCount(); i++) {
if (nodeEngine.getPartitionService().getPartition(i).isOwnerOrBackup(thisAddress)) {
final PartitionContainer container = mapService.getPartitionContainer(i);
if (container == null) {
return -1L;
}
heapCost += getRecordStoreHeapCost(mapName, container);
}
}
heapCost += mapContainer.getNearCacheSizeEstimator().getSize();
return heapCost;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_eviction_EvictionHelper.java
|
430 |
EventHandler<PortableItemEvent> eventHandler = new EventHandler<PortableItemEvent>() {
public void handle(PortableItemEvent portableItemEvent) {
E item = includeValue ? (E) getContext().getSerializationService().toObject(portableItemEvent.getItem()) : null;
Member member = getContext().getClusterService().getMember(portableItemEvent.getUuid());
ItemEvent<E> itemEvent = new ItemEvent<E>(name, portableItemEvent.getEventType(), item, member);
if (portableItemEvent.getEventType() == ItemEventType.ADDED) {
listener.itemAdded(itemEvent);
} else {
listener.itemRemoved(itemEvent);
}
}
@Override
public void onListenerRegister() {
}
};
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientQueueProxy.java
|
1,146 |
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SE_BAD_FIELD")
public class EntryEvent<K, V> extends EventObject {
private static final long serialVersionUID = -2296203982913729851L;
protected final EntryEventType entryEventType;
protected K key;
protected V oldValue;
protected V value;
protected final Member member;
protected final String name;
public EntryEvent(Object source, Member member, int eventType, K key, V value) {
this(source, member, eventType, key, null, value);
}
public EntryEvent(Object source, Member member, int eventType, K key, V oldValue, V value) {
super(source);
this.name = (String) source;
this.member = member;
this.key = key;
this.oldValue = oldValue;
this.value = value;
this.entryEventType = EntryEventType.getByType(eventType);
}
@Override
public Object getSource() {
return name;
}
/**
* Returns the key of the entry event
*
* @return the key
*/
public K getKey() {
return key;
}
/**
* Returns the old value of the entry event
*
* @return
*/
public V getOldValue() {
return this.oldValue;
}
/**
* Returns the value of the entry event
*
* @return
*/
public V getValue() {
return value;
}
/**
* Returns the member fired this event.
*
* @return the member fired this event.
*/
public Member getMember() {
return member;
}
/**
* Return the event type
*
* @return event type
*/
public EntryEventType getEventType() {
return entryEventType;
}
/**
* Returns the name of the map for this event.
*
* @return name of the map.
*/
public String getName() {
return name;
}
@Override
public String toString() {
return "EntryEvent {" + getSource()
+ "} key=" + getKey()
+ ", oldValue=" + getOldValue()
+ ", value=" + getValue()
+ ", event=" + entryEventType
+ ", by " + member;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_core_EntryEvent.java
|
1,301 |
public class ClusterState implements ToXContent {
public interface Custom {
interface Factory<T extends Custom> {
String type();
T readFrom(StreamInput in) throws IOException;
void writeTo(T customState, StreamOutput out) throws IOException;
void toXContent(T customState, XContentBuilder builder, ToXContent.Params params);
}
}
public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>();
/**
* Register a custom index meta data factory. Make sure to call it from a static block.
*/
public static void registerFactory(String type, Custom.Factory factory) {
customFactories.put(type, factory);
}
@Nullable
public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) {
return customFactories.get(type);
}
public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException {
Custom.Factory<T> factory = customFactories.get(type);
if (factory == null) {
throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]");
}
return factory;
}
private final long version;
private final RoutingTable routingTable;
private final DiscoveryNodes nodes;
private final MetaData metaData;
private final ClusterBlocks blocks;
private final AllocationExplanation allocationExplanation;
private final ImmutableOpenMap<String, Custom> customs;
// built on demand
private volatile RoutingNodes routingNodes;
private SettingsFilter settingsFilter;
public ClusterState(long version, ClusterState state) {
this(version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.allocationExplanation(), state.customs());
}
public ClusterState(long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, AllocationExplanation allocationExplanation, ImmutableOpenMap<String, Custom> customs) {
this.version = version;
this.metaData = metaData;
this.routingTable = routingTable;
this.nodes = nodes;
this.blocks = blocks;
this.allocationExplanation = allocationExplanation;
this.customs = customs;
}
public long version() {
return this.version;
}
public long getVersion() {
return version();
}
public DiscoveryNodes nodes() {
return this.nodes;
}
public DiscoveryNodes getNodes() {
return nodes();
}
public MetaData metaData() {
return this.metaData;
}
public MetaData getMetaData() {
return metaData();
}
public RoutingTable routingTable() {
return routingTable;
}
public RoutingTable getRoutingTable() {
return routingTable();
}
public RoutingNodes routingNodes() {
return routingTable.routingNodes(this);
}
public RoutingNodes getRoutingNodes() {
return readOnlyRoutingNodes();
}
public ClusterBlocks blocks() {
return this.blocks;
}
public ClusterBlocks getBlocks() {
return blocks;
}
public AllocationExplanation allocationExplanation() {
return this.allocationExplanation;
}
public AllocationExplanation getAllocationExplanation() {
return allocationExplanation();
}
public ImmutableOpenMap<String, Custom> customs() {
return this.customs;
}
public ImmutableOpenMap<String, Custom> getCustoms() {
return this.customs;
}
/**
* Returns a built (on demand) routing nodes view of the routing table. <b>NOTE, the routing nodes
* are mutable, use them just for read operations</b>
*/
public RoutingNodes readOnlyRoutingNodes() {
if (routingNodes != null) {
return routingNodes;
}
routingNodes = routingTable.routingNodes(this);
return routingNodes;
}
public ClusterState settingsFilter(SettingsFilter settingsFilter) {
this.settingsFilter = settingsFilter;
return this;
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append(nodes().prettyPrint());
sb.append(routingTable().prettyPrint());
sb.append(readOnlyRoutingNodes().prettyPrint());
return sb.toString();
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
Set<String> metrics = Strings.splitStringByCommaToSet(params.param("metric", "_all"));
boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains("_all");
if (isAllMetricsOnly || metrics.contains("nodes")) {
builder.field("master_node", nodes().masterNodeId());
}
if (isAllMetricsOnly || metrics.contains("blocks")) {
builder.startObject("blocks");
if (!blocks().global().isEmpty()) {
builder.startObject("global");
for (ClusterBlock block : blocks().global()) {
block.toXContent(builder, params);
}
builder.endObject();
}
if (!blocks().indices().isEmpty()) {
builder.startObject("indices");
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks().indices().entrySet()) {
builder.startObject(entry.getKey());
for (ClusterBlock block : entry.getValue()) {
block.toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
}
// nodes
if (isAllMetricsOnly || metrics.contains("nodes")) {
builder.startObject("nodes");
for (DiscoveryNode node : nodes()) {
builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("name", node.name());
builder.field("transport_address", node.address().toString());
builder.startObject("attributes");
for (Map.Entry<String, String> attr : node.attributes().entrySet()) {
builder.field(attr.getKey(), attr.getValue());
}
builder.endObject();
builder.endObject();
}
builder.endObject();
}
// meta data
if (isAllMetricsOnly || metrics.contains("metadata")) {
builder.startObject("metadata");
builder.startObject("templates");
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData().templates().values()) {
IndexTemplateMetaData templateMetaData = cursor.value;
builder.startObject(templateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("template", templateMetaData.template());
builder.field("order", templateMetaData.order());
builder.startObject("settings");
Settings settings = templateMetaData.settings();
if (settingsFilter != null) {
settings = settingsFilter.filterSettings(settings);
}
settings.toXContent(builder, params);
builder.endObject();
builder.startObject("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor1 : templateMetaData.mappings()) {
byte[] mappingSource = cursor1.value.uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor1.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor1.key);
}
builder.field(cursor1.key);
builder.map(mapping);
}
builder.endObject();
builder.endObject();
}
builder.endObject();
builder.startObject("indices");
for (IndexMetaData indexMetaData : metaData()) {
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
builder.startObject("settings");
Settings settings = indexMetaData.settings();
if (settingsFilter != null) {
settings = settingsFilter.filterSettings(settings);
}
settings.toXContent(builder, params);
builder.endObject();
builder.startObject("mappings");
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
byte[] mappingSource = cursor.value.source().uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor.key);
}
builder.field(cursor.key);
builder.map(mapping);
}
builder.endObject();
builder.startArray("aliases");
for (ObjectCursor<String> cursor : indexMetaData.aliases().keys()) {
builder.value(cursor.value);
}
builder.endArray();
builder.endObject();
}
builder.endObject();
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
builder.startObject(cursor.key);
MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.endObject();
}
// routing table
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startObject("routing_table");
builder.startObject("indices");
for (IndexRoutingTable indexRoutingTable : routingTable()) {
builder.startObject(indexRoutingTable.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("shards");
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id()));
for (ShardRouting shardRouting : indexShardRoutingTable) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
builder.endObject();
builder.endObject();
}
// routing nodes
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startObject("routing_nodes");
builder.startArray("unassigned");
for (ShardRouting shardRouting : readOnlyRoutingNodes().unassigned()) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
builder.startObject("nodes");
for (RoutingNode routingNode : readOnlyRoutingNodes()) {
builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE);
for (ShardRouting shardRouting : routingNode) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startArray("allocations");
for (Map.Entry<ShardId, List<AllocationExplanation.NodeExplanation>> entry : allocationExplanation().explanations().entrySet()) {
builder.startObject();
builder.field("index", entry.getKey().index().name());
builder.field("shard", entry.getKey().id());
builder.startArray("explanations");
for (AllocationExplanation.NodeExplanation nodeExplanation : entry.getValue()) {
builder.field("desc", nodeExplanation.description());
if (nodeExplanation.node() != null) {
builder.startObject("node");
builder.field("id", nodeExplanation.node().id());
builder.field("name", nodeExplanation.node().name());
builder.endObject();
}
}
builder.endArray();
builder.endObject();
}
builder.endArray();
}
if (isAllMetricsOnly || metrics.contains("customs")) {
for (ObjectObjectCursor<String, Custom> cursor : customs) {
builder.startObject(cursor.key);
lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
}
return builder;
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(ClusterState state) {
return new Builder(state);
}
public static class Builder {
private long version = 0;
private MetaData metaData = MetaData.EMPTY_META_DATA;
private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE;
private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES;
private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK;
private AllocationExplanation allocationExplanation = AllocationExplanation.EMPTY;
private final ImmutableOpenMap.Builder<String, Custom> customs;
public Builder() {
customs = ImmutableOpenMap.builder();
}
public Builder(ClusterState state) {
this.version = state.version();
this.nodes = state.nodes();
this.routingTable = state.routingTable();
this.metaData = state.metaData();
this.blocks = state.blocks();
this.allocationExplanation = state.allocationExplanation();
this.customs = ImmutableOpenMap.builder(state.customs());
}
public Builder nodes(DiscoveryNodes.Builder nodesBuilder) {
return nodes(nodesBuilder.build());
}
public Builder nodes(DiscoveryNodes nodes) {
this.nodes = nodes;
return this;
}
public Builder routingTable(RoutingTable.Builder routingTable) {
return routingTable(routingTable.build());
}
public Builder routingResult(RoutingAllocation.Result routingResult) {
this.routingTable = routingResult.routingTable();
this.allocationExplanation = routingResult.explanation();
return this;
}
public Builder routingTable(RoutingTable routingTable) {
this.routingTable = routingTable;
return this;
}
public Builder metaData(MetaData.Builder metaDataBuilder) {
return metaData(metaDataBuilder.build());
}
public Builder metaData(MetaData metaData) {
this.metaData = metaData;
return this;
}
public Builder blocks(ClusterBlocks.Builder blocksBuilder) {
return blocks(blocksBuilder.build());
}
public Builder blocks(ClusterBlocks block) {
this.blocks = block;
return this;
}
public Builder allocationExplanation(AllocationExplanation allocationExplanation) {
this.allocationExplanation = allocationExplanation;
return this;
}
public Builder version(long version) {
this.version = version;
return this;
}
public Custom getCustom(String type) {
return customs.get(type);
}
public Builder putCustom(String type, Custom custom) {
customs.put(type, custom);
return this;
}
public Builder removeCustom(String type) {
customs.remove(type);
return this;
}
public ClusterState build() {
return new ClusterState(version, metaData, routingTable, nodes, blocks, allocationExplanation, customs.build());
}
public static byte[] toBytes(ClusterState state) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
writeTo(state, os);
return os.bytes().toBytes();
}
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
return readFrom(new BytesStreamInput(data, false), localNode);
}
public static void writeTo(ClusterState state, StreamOutput out) throws IOException {
out.writeLong(state.version());
MetaData.Builder.writeTo(state.metaData(), out);
RoutingTable.Builder.writeTo(state.routingTable(), out);
DiscoveryNodes.Builder.writeTo(state.nodes(), out);
ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out);
state.allocationExplanation().writeTo(out);
out.writeVInt(state.customs().size());
for (ObjectObjectCursor<String, Custom> cursor : state.customs()) {
out.writeString(cursor.key);
lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
Builder builder = new Builder();
builder.version = in.readLong();
builder.metaData = MetaData.Builder.readFrom(in);
builder.routingTable = RoutingTable.Builder.readFrom(in);
builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
builder.allocationExplanation = AllocationExplanation.readAllocationExplanation(in);
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_ClusterState.java
|
252 |
public enum STRATEGY {
POP_RECORD, COPY_RECORD
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_cache_OLevel2RecordCache.java
|
5,865 |
public class SourceLookup implements Map {
private AtomicReader reader;
private int docId = -1;
private BytesReference sourceAsBytes;
private Map<String, Object> source;
private XContentType sourceContentType;
public Map<String, Object> source() {
return source;
}
public XContentType sourceContentType() {
return sourceContentType;
}
private Map<String, Object> loadSourceIfNeeded() {
if (source != null) {
return source;
}
if (sourceAsBytes != null) {
Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(sourceAsBytes);
sourceContentType = tuple.v1();
source = tuple.v2();
return source;
}
try {
JustSourceFieldsVisitor sourceFieldVisitor = new JustSourceFieldsVisitor();
reader.document(docId, sourceFieldVisitor);
BytesReference source = sourceFieldVisitor.source();
if (source == null) {
this.source = ImmutableMap.of();
this.sourceContentType = null;
} else {
Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(source);
this.sourceContentType = tuple.v1();
this.source = tuple.v2();
}
} catch (Exception e) {
throw new ElasticsearchParseException("failed to parse / load source", e);
}
return this.source;
}
public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(BytesReference source) throws ElasticsearchParseException {
return XContentHelper.convertToMap(source, false);
}
public static Map<String, Object> sourceAsMap(BytesReference source) throws ElasticsearchParseException {
return sourceAsMapAndType(source).v2();
}
public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
return XContentHelper.convertToMap(bytes, offset, length, false);
}
public static Map<String, Object> sourceAsMap(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
return sourceAsMapAndType(bytes, offset, length).v2();
}
public void setNextReader(AtomicReaderContext context) {
if (this.reader == context.reader()) { // if we are called with the same reader, don't invalidate source
return;
}
this.reader = context.reader();
this.source = null;
this.sourceAsBytes = null;
this.docId = -1;
}
public void setNextDocId(int docId) {
if (this.docId == docId) { // if we are called with the same docId, don't invalidate source
return;
}
this.docId = docId;
this.sourceAsBytes = null;
this.source = null;
}
public void setNextSource(BytesReference source) {
this.sourceAsBytes = source;
}
public void setNextSource(Map<String, Object> source) {
this.source = source;
}
/**
* Internal source representation, might be compressed....
*/
public BytesReference internalSourceRef() {
return sourceAsBytes;
}
/**
* Returns the values associated with the path. Those are "low" level values, and it can
* handle path expression where an array/list is navigated within.
*/
public List<Object> extractRawValues(String path) {
return XContentMapValues.extractRawValues(path, loadSourceIfNeeded());
}
public Object filter(String[] includes, String[] excludes) {
return XContentMapValues.filter(loadSourceIfNeeded(), includes, excludes);
}
public Object extractValue(String path) {
return XContentMapValues.extractValue(path, loadSourceIfNeeded());
}
@Override
public Object get(Object key) {
return loadSourceIfNeeded().get(key);
}
@Override
public int size() {
return loadSourceIfNeeded().size();
}
@Override
public boolean isEmpty() {
return loadSourceIfNeeded().isEmpty();
}
@Override
public boolean containsKey(Object key) {
return loadSourceIfNeeded().containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return loadSourceIfNeeded().containsValue(value);
}
@Override
public Set keySet() {
return loadSourceIfNeeded().keySet();
}
@Override
public Collection values() {
return loadSourceIfNeeded().values();
}
@Override
public Set entrySet() {
return loadSourceIfNeeded().entrySet();
}
@Override
public Object put(Object key, Object value) {
throw new UnsupportedOperationException();
}
@Override
public Object remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_lookup_SourceLookup.java
|
640 |
public class ShardStatus extends BroadcastShardOperationResponse {
private ShardRouting shardRouting;
IndexShardState state;
ByteSizeValue storeSize;
long translogId = -1;
long translogOperations = -1;
DocsStatus docs;
MergeStats mergeStats;
RefreshStats refreshStats;
FlushStats flushStats;
PeerRecoveryStatus peerRecoveryStatus;
GatewayRecoveryStatus gatewayRecoveryStatus;
GatewaySnapshotStatus gatewaySnapshotStatus;
ShardStatus() {
}
ShardStatus(ShardRouting shardRouting) {
super(shardRouting.index(), shardRouting.id());
this.shardRouting = shardRouting;
}
/**
* The shard routing information (cluster wide shard state).
*/
public ShardRouting getShardRouting() {
return this.shardRouting;
}
/**
* The shard state (index/local state).
*/
public IndexShardState getState() {
return state;
}
/**
* The current size of the shard index storage.
*/
public ByteSizeValue getStoreSize() {
return storeSize;
}
/**
* The transaction log id.
*/
public long getTranslogId() {
return translogId;
}
/**
* The number of transaction operations in the transaction log.
*/
public long getTranslogOperations() {
return translogOperations;
}
/**
* Docs level information for the shard index, <tt>null</tt> if not applicable.
*/
public DocsStatus getDocs() {
return docs;
}
/**
* Index merge statistics.
*/
public MergeStats getMergeStats() {
return this.mergeStats;
}
/**
* Refresh stats.
*/
public RefreshStats getRefreshStats() {
return this.refreshStats;
}
public FlushStats getFlushStats() {
return this.flushStats;
}
/**
* Peer recovery status (<tt>null</tt> if not applicable). Both real time if an on going recovery
* is in progress and summary once it is done.
*/
public PeerRecoveryStatus getPeerRecoveryStatus() {
return peerRecoveryStatus;
}
/**
* Gateway recovery status (<tt>null</tt> if not applicable). Both real time if an on going recovery
* is in progress adn summary once it is done.
*/
public GatewayRecoveryStatus getGatewayRecoveryStatus() {
return gatewayRecoveryStatus;
}
/**
* The current on going snapshot to the gateway or the last one if none is on going.
*/
public GatewaySnapshotStatus getGatewaySnapshotStatus() {
return gatewaySnapshotStatus;
}
public static ShardStatus readIndexShardStatus(StreamInput in) throws IOException {
ShardStatus shardStatus = new ShardStatus();
shardStatus.readFrom(in);
return shardStatus;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardRouting.writeTo(out);
out.writeByte(state.id());
if (storeSize == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
storeSize.writeTo(out);
}
out.writeLong(translogId);
out.writeLong(translogOperations);
if (docs == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeLong(docs.getNumDocs());
out.writeLong(docs.getMaxDoc());
out.writeLong(docs.getDeletedDocs());
}
if (peerRecoveryStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(peerRecoveryStatus.stage.value());
out.writeVLong(peerRecoveryStatus.startTime);
out.writeVLong(peerRecoveryStatus.time);
out.writeVLong(peerRecoveryStatus.indexSize);
out.writeVLong(peerRecoveryStatus.reusedIndexSize);
out.writeVLong(peerRecoveryStatus.recoveredIndexSize);
out.writeVLong(peerRecoveryStatus.recoveredTranslogOperations);
}
if (gatewayRecoveryStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(gatewayRecoveryStatus.stage.value());
out.writeVLong(gatewayRecoveryStatus.startTime);
out.writeVLong(gatewayRecoveryStatus.time);
out.writeVLong(gatewayRecoveryStatus.indexSize);
out.writeVLong(gatewayRecoveryStatus.reusedIndexSize);
out.writeVLong(gatewayRecoveryStatus.recoveredIndexSize);
out.writeVLong(gatewayRecoveryStatus.recoveredTranslogOperations);
}
if (gatewaySnapshotStatus == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeByte(gatewaySnapshotStatus.stage.value());
out.writeVLong(gatewaySnapshotStatus.startTime);
out.writeVLong(gatewaySnapshotStatus.time);
out.writeVLong(gatewaySnapshotStatus.indexSize);
out.writeVInt(gatewaySnapshotStatus.getExpectedNumberOfOperations());
}
if (mergeStats == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
mergeStats.writeTo(out);
}
if (refreshStats == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
refreshStats.writeTo(out);
}
if (flushStats == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
flushStats.writeTo(out);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardRouting = readShardRoutingEntry(in);
state = IndexShardState.fromId(in.readByte());
if (in.readBoolean()) {
storeSize = readBytesSizeValue(in);
}
translogId = in.readLong();
translogOperations = in.readLong();
if (in.readBoolean()) {
docs = new DocsStatus();
docs.numDocs = in.readLong();
docs.maxDoc = in.readLong();
docs.deletedDocs = in.readLong();
}
if (in.readBoolean()) {
peerRecoveryStatus = new PeerRecoveryStatus(PeerRecoveryStatus.Stage.fromValue(in.readByte()),
in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
}
if (in.readBoolean()) {
gatewayRecoveryStatus = new GatewayRecoveryStatus(GatewayRecoveryStatus.Stage.fromValue(in.readByte()),
in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
}
if (in.readBoolean()) {
gatewaySnapshotStatus = new GatewaySnapshotStatus(GatewaySnapshotStatus.Stage.fromValue(in.readByte()),
in.readVLong(), in.readVLong(), in.readVLong(), in.readVInt());
}
if (in.readBoolean()) {
mergeStats = MergeStats.readMergeStats(in);
}
if (in.readBoolean()) {
refreshStats = RefreshStats.readRefreshStats(in);
}
if (in.readBoolean()) {
flushStats = FlushStats.readFlushStats(in);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_ShardStatus.java
|
365 |
public class TranslatedEntity implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, TranslatedEntity> TYPES = new LinkedHashMap<String, TranslatedEntity>();
public static final TranslatedEntity PRODUCT = new TranslatedEntity("org.broadleafcommerce.core.catalog.domain.Product", "Product");
public static final TranslatedEntity SKU = new TranslatedEntity("org.broadleafcommerce.core.catalog.domain.Sku", "Sku");
public static final TranslatedEntity CATEGORY = new TranslatedEntity("org.broadleafcommerce.core.catalog.domain.Category", "Category");
public static final TranslatedEntity PRODUCT_OPTION = new TranslatedEntity("org.broadleafcommerce.core.catalog.domain.ProductOption", "ProdOption");
public static final TranslatedEntity PRODUCT_OPTION_VALUE = new TranslatedEntity("org.broadleafcommerce.core.catalog.domain.ProductOptionValue", "ProdOptionVal");
public static final TranslatedEntity STATIC_ASSET = new TranslatedEntity("org.broadleafcommerce.cms.file.domain.StaticAsset", "StaticAsset");
public static final TranslatedEntity SEARCH_FACET = new TranslatedEntity("org.broadleafcommerce.core.search.domain.SearchFacet", "SearchFacet");
public static final TranslatedEntity FULFILLMENT_OPTION = new TranslatedEntity("org.broadleafcommerce.core.order.domain.FulfillmentOption", "FulfillmentOption");
public static final TranslatedEntity OFFER = new TranslatedEntity("org.broadleafcommerce.core.offer.domain.Offer", "Offer");
public static TranslatedEntity getInstance(final String type) {
return TYPES.get(type);
}
public static TranslatedEntity getInstanceFromFriendlyType(final String friendlyType) {
for (Entry<String, TranslatedEntity> entry : TYPES.entrySet()) {
if (entry.getValue().getFriendlyType().equals(friendlyType)) {
return entry.getValue();
}
}
return null;
}
private String type;
private String friendlyType;
public TranslatedEntity() {
//do nothing
}
public TranslatedEntity(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
public static Map<String, TranslatedEntity> getTypes() {
return TYPES;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TranslatedEntity other = (TranslatedEntity) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
common_src_main_java_org_broadleafcommerce_common_i18n_domain_TranslatedEntity.java
|
6,422 |
targetTransport.threadPool().generic().execute(new Runnable() {
@Override
public void run() {
targetTransport.messageReceived(data, action, sourceTransport, version, null);
}
});
| 1no label
|
src_main_java_org_elasticsearch_transport_local_LocalTransportChannel.java
|
224 |
public static class Echo extends HazelcastInstanceAwareObject implements Callable<String>, DataSerializable {
String input;
public Echo() {
}
public Echo(String input) {
this.input = input;
}
@Override
public String call() {
getHazelcastInstance().getCountDownLatch("latch").countDown();
return getHazelcastInstance().getCluster().getLocalMember().toString() + ":" + input;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(input);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
input = in.readUTF();
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_examples_ClientTestApp.java
|
4,460 |
static class ShardIndexingStatus {
long translogId = -1;
int translogNumberOfOperations = -1;
boolean inactiveIndexing = false;
long time = -1; // contains the first time we saw this shard with no operations done on it
}
| 1no label
|
src_main_java_org_elasticsearch_indices_memory_IndexingMemoryController.java
|
44 |
public enum OccurrenceLocation {
EXISTS(false),
NONEMPTY(false),
IS(false),
EXTENDS(false),
SATISFIES(false),
CLASS_ALIAS(false),
OF(false),
UPPER_BOUND(false),
TYPE_ALIAS(false),
CASE(false),
CATCH(false),
IMPORT(false),
EXPRESSION(false),
PARAMETER_LIST(false),
TYPE_PARAMETER_LIST(false),
TYPE_ARGUMENT_LIST(false),
META(false),
PACKAGE_REF(true),
MODULE_REF(true),
INTERFACE_REF(true),
CLASS_REF(true),
ALIAS_REF(true),
TYPE_PARAMETER_REF(true),
VALUE_REF(true),
FUNCTION_REF(true),
DOCLINK(false);
public final boolean reference;
OccurrenceLocation(boolean reference) {
this.reference = reference;
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_OccurrenceLocation.java
|
6,109 |
public class SnapshotsService extends AbstractComponent implements ClusterStateListener {
private final ClusterService clusterService;
private final RepositoriesService repositoriesService;
private final ThreadPool threadPool;
private final IndicesService indicesService;
private final TransportService transportService;
private volatile ImmutableMap<SnapshotId, SnapshotShards> shardSnapshots = ImmutableMap.of();
private final CopyOnWriteArrayList<SnapshotCompletionListener> snapshotCompletionListeners = new CopyOnWriteArrayList<SnapshotCompletionListener>();
@Inject
public SnapshotsService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, ThreadPool threadPool,
IndicesService indicesService, TransportService transportService) {
super(settings);
this.clusterService = clusterService;
this.repositoriesService = repositoriesService;
this.threadPool = threadPool;
this.indicesService = indicesService;
this.transportService = transportService;
transportService.registerHandler(UpdateSnapshotStateRequestHandler.ACTION, new UpdateSnapshotStateRequestHandler());
// addLast to make sure that Repository will be created before snapshot
clusterService.addLast(this);
}
/**
* Retrieves snapshot from repository
*
* @param snapshotId snapshot id
* @return snapshot
* @throws SnapshotMissingException if snapshot is not found
*/
public Snapshot snapshot(SnapshotId snapshotId) {
return repositoriesService.repository(snapshotId.getRepository()).readSnapshot(snapshotId);
}
/**
* Returns a list of snapshots from repository sorted by snapshot creation date
*
* @param repositoryName repository name
* @return list of snapshots
*/
public ImmutableList<Snapshot> snapshots(String repositoryName) {
ArrayList<Snapshot> snapshotList = newArrayList();
Repository repository = repositoriesService.repository(repositoryName);
ImmutableList<SnapshotId> snapshotIds = repository.snapshots();
for (SnapshotId snapshotId : snapshotIds) {
snapshotList.add(repository.readSnapshot(snapshotId));
}
CollectionUtil.timSort(snapshotList);
return ImmutableList.copyOf(snapshotList);
}
/**
* Initializes the snapshotting process.
* <p/>
* This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and
* creates a snapshot record in cluster state metadata.
*
* @param request snapshot request
* @param listener snapshot creation listener
*/
public void createSnapshot(final SnapshotRequest request, final CreateSnapshotListener listener) {
final SnapshotId snapshotId = new SnapshotId(request.repository(), request.name());
clusterService.submitStateUpdateTask(request.cause(), new TimeoutClusterStateUpdateTask() {
private SnapshotMetaData.Entry newSnapshot = null;
@Override
public ClusterState execute(ClusterState currentState) {
validate(request, currentState);
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
if (snapshots == null || snapshots.entries().isEmpty()) {
// Store newSnapshot here to be processed in clusterStateProcessed
ImmutableList<String> indices = ImmutableList.copyOf(metaData.concreteIndices(request.indices(), request.indicesOptions()));
logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices);
newSnapshot = new SnapshotMetaData.Entry(snapshotId, request.includeGlobalState(), State.INIT, indices, null);
snapshots = new SnapshotMetaData(newSnapshot);
} else {
// TODO: What should we do if a snapshot is already running?
throw new ConcurrentSnapshotExecutionException(snapshotId, "a snapshot is already running");
}
mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("[{}][{}] failed to create snapshot", t, request.repository(), request.name());
newSnapshot = null;
listener.onFailure(t);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) {
if (newSnapshot != null) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
beginSnapshot(newState, newSnapshot, request.partial, listener);
}
});
}
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
});
}
/**
* Validates snapshot request
*
* @param request snapshot request
* @param state current cluster state
* @throws org.elasticsearch.ElasticsearchException
*/
private void validate(SnapshotRequest request, ClusterState state) throws ElasticsearchException {
RepositoriesMetaData repositoriesMetaData = state.getMetaData().custom(RepositoriesMetaData.TYPE);
if (repositoriesMetaData == null || repositoriesMetaData.repository(request.repository()) == null) {
throw new RepositoryMissingException(request.repository());
}
if (!Strings.hasLength(request.name())) {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "cannot be empty");
}
if (request.name().contains(" ")) {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain whitespace");
}
if (request.name().contains(",")) {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain ','");
}
if (request.name().contains("#")) {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain '#'");
}
if (request.name().charAt(0) == '_') {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not start with '_'");
}
if (!request.name().toLowerCase(Locale.ROOT).equals(request.name())) {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must be lowercase");
}
if (!Strings.validFileName(request.name())) {
throw new InvalidSnapshotNameException(new SnapshotId(request.repository(), request.name()), "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
}
/**
* Starts snapshot.
* <p/>
* Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed.
*
* @param clusterState cluster state
* @param snapshot snapshot meta data
* @param partial allow partial snapshots
* @param userCreateSnapshotListener listener
*/
private void beginSnapshot(ClusterState clusterState, final SnapshotMetaData.Entry snapshot, final boolean partial, final CreateSnapshotListener userCreateSnapshotListener) {
boolean snapshotCreated = false;
try {
Repository repository = repositoriesService.repository(snapshot.snapshotId().getRepository());
MetaData metaData = clusterState.metaData();
if (!snapshot.includeGlobalState()) {
// Remove global state from the cluster state
MetaData.Builder builder = MetaData.builder();
for (String index : snapshot.indices()) {
builder.put(metaData.index(index), false);
}
metaData = builder.build();
}
repository.initializeSnapshot(snapshot.snapshotId(), snapshot.indices(), metaData);
snapshotCreated = true;
if (snapshot.indices().isEmpty()) {
// No indices in this snapshot - we are done
userCreateSnapshotListener.onResponse();
endSnapshot(snapshot);
return;
}
clusterService.submitStateUpdateTask("update_snapshot [" + snapshot + "]", new ProcessedClusterStateUpdateTask() {
boolean accepted = false;
SnapshotMetaData.Entry updatedSnapshot;
String failure = null;
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
ImmutableList.Builder<SnapshotMetaData.Entry> entries = ImmutableList.builder();
for (SnapshotMetaData.Entry entry : snapshots.entries()) {
if (entry.snapshotId().equals(snapshot.snapshotId())) {
// Replace the snapshot that was just created
ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards = shards(snapshot.snapshotId(), currentState, snapshot.indices());
if (!partial) {
Set<String> indicesWithMissingShards = indicesWithMissingShards(shards);
if (indicesWithMissingShards != null) {
updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), State.FAILED, snapshot.indices(), shards);
entries.add(updatedSnapshot);
failure = "Indices don't have primary shards +[" + indicesWithMissingShards + "]";
continue;
}
}
updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), State.STARTED, snapshot.indices(), shards);
entries.add(updatedSnapshot);
if (!completed(shards.values())) {
accepted = true;
}
} else {
entries.add(entry);
}
}
mdBuilder.putCustom(SnapshotMetaData.TYPE, new SnapshotMetaData(entries.build()));
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("[{}] failed to create snapshot", t, snapshot.snapshotId());
userCreateSnapshotListener.onFailure(t);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
// The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted
// for processing. If client wants to wait for the snapshot completion, it can register snapshot
// completion listener in this method. For the snapshot completion to work properly, the snapshot
// should still exist when listener is registered.
userCreateSnapshotListener.onResponse();
// Now that snapshot completion listener is registered we can end the snapshot if needed
// We should end snapshot only if 1) we didn't accept it for processing (which happens when there
// is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should
// go ahead and continue working on this snapshot rather then end here.
if (!accepted && updatedSnapshot != null) {
endSnapshot(updatedSnapshot, failure);
}
}
});
} catch (Throwable t) {
logger.warn("failed to create snapshot [{}]", t, snapshot.snapshotId());
clusterService.submitStateUpdateTask("fail_snapshot [" + snapshot.snapshotId() + "]", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
ImmutableList.Builder<SnapshotMetaData.Entry> entries = ImmutableList.builder();
for (SnapshotMetaData.Entry entry : snapshots.entries()) {
if (!entry.snapshotId().equals(snapshot.snapshotId())) {
entries.add(entry);
}
}
mdBuilder.putCustom(SnapshotMetaData.TYPE, new SnapshotMetaData(entries.build()));
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("[{}] failed to delete snapshot", t, snapshot.snapshotId());
}
});
if (snapshotCreated) {
try {
repositoriesService.repository(snapshot.snapshotId().getRepository()).finalizeSnapshot(snapshot.snapshotId(), ExceptionsHelper.detailedMessage(t), 0, ImmutableList.<SnapshotShardFailure>of());
} catch (Throwable t2) {
logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId());
}
}
userCreateSnapshotListener.onFailure(t);
}
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
try {
if (event.localNodeMaster()) {
if (event.nodesRemoved()) {
processSnapshotsOnRemovedNodes(event);
}
}
SnapshotMetaData prev = event.previousState().metaData().custom(SnapshotMetaData.TYPE);
SnapshotMetaData curr = event.state().metaData().custom(SnapshotMetaData.TYPE);
if (prev == null) {
if (curr != null) {
processIndexShardSnapshots(curr);
}
} else {
if (!prev.equals(curr)) {
processIndexShardSnapshots(curr);
}
}
} catch (Throwable t) {
logger.warn("Failed to update snapshot state ", t);
}
}
/**
* Cleans up shard snapshots that were running on removed nodes
*
* @param event cluster changed event
*/
private void processSnapshotsOnRemovedNodes(ClusterChangedEvent event) {
if (removedNodesCleanupNeeded(event)) {
// Check if we just became the master
final boolean newMaster = !event.previousState().nodes().localNodeMaster();
clusterService.submitStateUpdateTask("update snapshot state after node removal", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
DiscoveryNodes nodes = currentState.nodes();
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
if (snapshots == null) {
return currentState;
}
boolean changed = false;
ArrayList<SnapshotMetaData.Entry> entries = newArrayList();
for (final SnapshotMetaData.Entry snapshot : snapshots.entries()) {
SnapshotMetaData.Entry updatedSnapshot = snapshot;
boolean snapshotChanged = false;
if (snapshot.state() == State.STARTED) {
ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableMap.builder();
for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshot.shards().entrySet()) {
ShardSnapshotStatus shardStatus = shardEntry.getValue();
if (!shardStatus.state().completed() && shardStatus.nodeId() != null) {
if (nodes.nodeExists(shardStatus.nodeId())) {
shards.put(shardEntry);
} else {
// TODO: Restart snapshot on another node?
snapshotChanged = true;
logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.getKey(), shardStatus.nodeId());
shards.put(shardEntry.getKey(), new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown"));
}
}
}
if (snapshotChanged) {
changed = true;
ImmutableMap<ShardId, ShardSnapshotStatus> shardsMap = shards.build();
if (!snapshot.state().completed() && completed(shardsMap.values())) {
updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), State.SUCCESS, snapshot.indices(), shardsMap);
endSnapshot(updatedSnapshot);
} else {
updatedSnapshot = new SnapshotMetaData.Entry(snapshot.snapshotId(), snapshot.includeGlobalState(), snapshot.state(), snapshot.indices(), shardsMap);
}
}
entries.add(updatedSnapshot);
} else if (snapshot.state() == State.INIT && newMaster) {
// Clean up the snapshot that failed to start from the old master
deleteSnapshot(snapshot.snapshotId(), new DeleteSnapshotListener() {
@Override
public void onResponse() {
logger.debug("cleaned up abandoned snapshot {} in INIT state", snapshot.snapshotId());
}
@Override
public void onFailure(Throwable t) {
logger.warn("failed to clean up abandoned snapshot {} in INIT state", snapshot.snapshotId());
}
});
} else if (snapshot.state() == State.SUCCESS && newMaster) {
// Finalize the snapshot
endSnapshot(snapshot);
}
}
if (changed) {
snapshots = new SnapshotMetaData(entries.toArray(new SnapshotMetaData.Entry[entries.size()]));
mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("failed to update snapshot state after node removal");
}
});
}
}
private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) {
// Check if we just became the master
boolean newMaster = !event.previousState().nodes().localNodeMaster();
SnapshotMetaData snapshotMetaData = event.state().getMetaData().custom(SnapshotMetaData.TYPE);
if (snapshotMetaData == null) {
return false;
}
for (SnapshotMetaData.Entry snapshot : snapshotMetaData.entries()) {
if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) {
// We just replaced old master and snapshots in intermediate states needs to be cleaned
return true;
}
for (DiscoveryNode node : event.nodesDelta().removedNodes()) {
for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshot.shards().entrySet()) {
ShardSnapshotStatus shardStatus = shardEntry.getValue();
if (!shardStatus.state().completed() && node.getId().equals(shardStatus.nodeId())) {
// At least one shard was running on the removed node - we need to fail it
return true;
}
}
}
}
return false;
}
/**
* Checks if any new shards should be snapshotted on this node
*
* @param snapshotMetaData snapshot metadata to be processed
*/
private void processIndexShardSnapshots(SnapshotMetaData snapshotMetaData) {
Map<SnapshotId, SnapshotShards> survivors = newHashMap();
// First, remove snapshots that are no longer there
for (Map.Entry<SnapshotId, SnapshotShards> entry : shardSnapshots.entrySet()) {
if (snapshotMetaData != null && snapshotMetaData.snapshot(entry.getKey()) != null) {
survivors.put(entry.getKey(), entry.getValue());
}
}
// For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running
// snapshots in the future
HashMap<SnapshotId, SnapshotShards> newSnapshots = null;
// Now go through all snapshots and update existing or create missing
final String localNodeId = clusterService.localNode().id();
for (SnapshotMetaData.Entry entry : snapshotMetaData.entries()) {
HashMap<ShardId, IndexShardSnapshotStatus> startedShards = null;
for (Map.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> shard : entry.shards().entrySet()) {
// Check if we have new shards to start processing on
if (localNodeId.equals(shard.getValue().nodeId())) {
if (entry.state() == State.STARTED) {
if (startedShards == null) {
startedShards = newHashMap();
}
startedShards.put(shard.getKey(), new IndexShardSnapshotStatus());
} else if (entry.state() == State.ABORTED) {
SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshotId());
if (snapshotShards != null) {
IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.getKey());
if (snapshotStatus != null) {
snapshotStatus.abort();
}
}
}
}
}
if (startedShards != null) {
if (!survivors.containsKey(entry.snapshotId())) {
if (newSnapshots == null) {
newSnapshots = newHashMapWithExpectedSize(2);
}
newSnapshots.put(entry.snapshotId(), new SnapshotShards(ImmutableMap.copyOf(startedShards)));
}
}
}
if (newSnapshots != null) {
survivors.putAll(newSnapshots);
}
// Update the list of snapshots that we saw and tried to started
// If startup of these shards fails later, we don't want to try starting these shards again
shardSnapshots = ImmutableMap.copyOf(survivors);
// We have new snapshots to process -
if (newSnapshots != null) {
for (final Map.Entry<SnapshotId, SnapshotShards> entry : newSnapshots.entrySet()) {
for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().shards.entrySet()) {
try {
final IndexShardSnapshotAndRestoreService shardSnapshotService = indicesService.indexServiceSafe(shardEntry.getKey().getIndex()).shardInjectorSafe(shardEntry.getKey().id())
.getInstance(IndexShardSnapshotAndRestoreService.class);
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
try {
shardSnapshotService.snapshot(entry.getKey(), shardEntry.getValue());
updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.SUCCESS)));
} catch (Throwable t) {
logger.warn("[{}] [{}] failed to create snapshot", t, shardEntry.getKey(), entry.getKey());
updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.FAILED, ExceptionsHelper.detailedMessage(t))));
}
}
});
} catch (Throwable t) {
updateIndexShardSnapshotStatus(new UpdateIndexShardSnapshotStatusRequest(entry.getKey(), shardEntry.getKey(), new ShardSnapshotStatus(localNodeId, SnapshotMetaData.State.FAILED, ExceptionsHelper.detailedMessage(t))));
}
}
}
}
}
/**
* Updates the shard status
*
* @param request update shard status request
*/
private void updateIndexShardSnapshotStatus(UpdateIndexShardSnapshotStatusRequest request) {
try {
if (clusterService.state().nodes().localNodeMaster()) {
innerUpdateSnapshotState(request);
} else {
transportService.sendRequest(clusterService.state().nodes().masterNode(),
UpdateSnapshotStateRequestHandler.ACTION, request, EmptyTransportResponseHandler.INSTANCE_SAME);
}
} catch (Throwable t) {
logger.warn("[{}] [{}] failed to update snapshot state", t, request.snapshotId(), request.status());
}
}
/**
* Checks if all shards in the list have completed
*
* @param shards list of shard statuses
* @return true if all shards have completed (either successfully or failed), false otherwise
*/
private boolean completed(Collection<SnapshotMetaData.ShardSnapshotStatus> shards) {
for (ShardSnapshotStatus status : shards) {
if (!status.state().completed()) {
return false;
}
}
return true;
}
/**
* Returns list of indices with missing shards
*
* @param shards list of shard statuses
* @return list of failed indices
*/
private Set<String> indicesWithMissingShards(ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards) {
Set<String> indices = null;
for (ImmutableMap.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> entry : shards.entrySet()) {
if (entry.getValue().state() == State.MISSING) {
if (indices == null) {
indices = newHashSet();
}
indices.add(entry.getKey().getIndex());
}
}
return indices;
}
/**
* Updates the shard status on master node
*
* @param request update shard status request
*/
private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request) {
clusterService.submitStateUpdateTask("update snapshot state", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
if (snapshots != null) {
boolean changed = false;
ArrayList<SnapshotMetaData.Entry> entries = newArrayList();
for (SnapshotMetaData.Entry entry : snapshots.entries()) {
if (entry.snapshotId().equals(request.snapshotId())) {
HashMap<ShardId, ShardSnapshotStatus> shards = newHashMap(entry.shards());
logger.trace("[{}] Updating shard [{}] with status [{}]", request.snapshotId(), request.shardId(), request.status().state());
shards.put(request.shardId(), request.status());
if (!completed(shards.values())) {
entries.add(new SnapshotMetaData.Entry(entry.snapshotId(), entry.includeGlobalState(), entry.state(), entry.indices(), ImmutableMap.copyOf(shards)));
} else {
// Snapshot is finished - mark it as done
// TODO: Add PARTIAL_SUCCESS status?
SnapshotMetaData.Entry updatedEntry = new SnapshotMetaData.Entry(entry.snapshotId(), entry.includeGlobalState(), State.SUCCESS, entry.indices(), ImmutableMap.copyOf(shards));
entries.add(updatedEntry);
// Finalize snapshot in the repository
endSnapshot(updatedEntry);
logger.info("snapshot [{}] is done", updatedEntry.snapshotId());
}
changed = true;
} else {
entries.add(entry);
}
}
if (changed) {
snapshots = new SnapshotMetaData(entries.toArray(new SnapshotMetaData.Entry[entries.size()]));
mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("[{}][{}] failed to update snapshot status to [{}]", t, request.snapshotId(), request.shardId(), request.status());
}
});
}
/**
* Finalizes the shard in repository and then removes it from cluster state
* <p/>
* This is non-blocking method that runs on a thread from SNAPSHOT thread pool
*
* @param entry snapshot
*/
private void endSnapshot(SnapshotMetaData.Entry entry) {
endSnapshot(entry, null);
}
/**
* Finalizes the shard in repository and then removes it from cluster state
* <p/>
* This is non-blocking method that runs on a thread from SNAPSHOT thread pool
*
* @param entry snapshot
* @param failure failure reason or null if snapshot was successful
*/
private void endSnapshot(final SnapshotMetaData.Entry entry, final String failure) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
SnapshotId snapshotId = entry.snapshotId();
try {
final Repository repository = repositoriesService.repository(snapshotId.getRepository());
logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshotId, entry.state(), failure);
ArrayList<ShardSearchFailure> failures = newArrayList();
ArrayList<SnapshotShardFailure> shardFailures = newArrayList();
for (Map.Entry<ShardId, ShardSnapshotStatus> shardStatus : entry.shards().entrySet()) {
ShardId shardId = shardStatus.getKey();
ShardSnapshotStatus status = shardStatus.getValue();
if (status.state().failed()) {
failures.add(new ShardSearchFailure(status.reason(), new SearchShardTarget(status.nodeId(), shardId.getIndex(), shardId.id())));
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId.getIndex(), shardId.id(), status.reason()));
}
}
Snapshot snapshot = repository.finalizeSnapshot(snapshotId, failure, entry.shards().size(), ImmutableList.copyOf(shardFailures));
removeSnapshotFromClusterState(snapshotId, new SnapshotInfo(snapshot), null);
} catch (Throwable t) {
logger.warn("[{}] failed to finalize snapshot", t, snapshotId);
removeSnapshotFromClusterState(snapshotId, null, t);
}
}
});
}
/**
* Removes record of running snapshot from cluster state
*
* @param snapshotId snapshot id
* @param snapshot snapshot info if snapshot was successful
* @param t exception if snapshot failed
*/
private void removeSnapshotFromClusterState(final SnapshotId snapshotId, final SnapshotInfo snapshot, final Throwable t) {
clusterService.submitStateUpdateTask("remove snapshot metadata", new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
if (snapshots != null) {
boolean changed = false;
ArrayList<SnapshotMetaData.Entry> entries = newArrayList();
for (SnapshotMetaData.Entry entry : snapshots.entries()) {
if (entry.snapshotId().equals(snapshotId)) {
changed = true;
} else {
entries.add(entry);
}
}
if (changed) {
snapshots = new SnapshotMetaData(entries.toArray(new SnapshotMetaData.Entry[entries.size()]));
mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
logger.warn("[{}][{}] failed to remove snapshot metadata", t, snapshotId);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
for (SnapshotCompletionListener listener : snapshotCompletionListeners) {
try {
if (snapshot != null) {
listener.onSnapshotCompletion(snapshotId, snapshot);
} else {
listener.onSnapshotFailure(snapshotId, t);
}
} catch (Throwable t) {
logger.warn("failed to refresh settings for [{}]", t, listener);
}
}
}
});
}
/**
* Deletes snapshot from repository.
* <p/>
* If the snapshot is still running cancels the snapshot first and then deletes it from the repository.
*
* @param snapshotId snapshot id
* @param listener listener
*/
public void deleteSnapshot(final SnapshotId snapshotId, final DeleteSnapshotListener listener) {
clusterService.submitStateUpdateTask("delete snapshot", new ProcessedClusterStateUpdateTask() {
boolean waitForSnapshot = false;
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
MetaData metaData = currentState.metaData();
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
if (snapshots == null) {
// No snapshots running - we can continue
return currentState;
}
SnapshotMetaData.Entry snapshot = snapshots.snapshot(snapshotId);
if (snapshot == null) {
// This snapshot is not running - continue
if (!snapshots.entries().isEmpty()) {
// However other snapshots are running - cannot continue
throw new ConcurrentSnapshotExecutionException(snapshotId, "another snapshot is currently running cannot delete");
}
return currentState;
} else {
// This snapshot is currently running - stopping shards first
waitForSnapshot = true;
ImmutableMap<ShardId, ShardSnapshotStatus> shards;
if (snapshot.state() == State.STARTED && snapshot.shards() != null) {
// snapshot is currently running - stop started shards
ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shardsBuilder = ImmutableMap.builder();
for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshot.shards().entrySet()) {
ShardSnapshotStatus status = shardEntry.getValue();
if (!status.state().completed()) {
shardsBuilder.put(shardEntry.getKey(), new ShardSnapshotStatus(status.nodeId(), State.ABORTED));
} else {
shardsBuilder.put(shardEntry.getKey(), status);
}
}
shards = shardsBuilder.build();
} else if (snapshot.state() == State.INIT) {
// snapshot hasn't started yet - end it
shards = snapshot.shards();
endSnapshot(snapshot);
} else {
// snapshot is being finalized - wait for it
logger.trace("trying to delete completed snapshot - save to delete");
return currentState;
}
SnapshotMetaData.Entry newSnapshot = new SnapshotMetaData.Entry(snapshotId, snapshot.includeGlobalState(), State.ABORTED, snapshot.indices(), shards);
snapshots = new SnapshotMetaData(newSnapshot);
mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots);
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
}
@Override
public void onFailure(String source, Throwable t) {
listener.onFailure(t);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (waitForSnapshot) {
logger.trace("adding snapshot completion listener to wait for deleted snapshot to finish");
addListener(new SnapshotCompletionListener() {
@Override
public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
logger.trace("deleted snapshot completed - deleting files");
removeListener(this);
deleteSnapshotFromRepository(snapshotId, listener);
}
@Override
public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
logger.trace("deleted snapshot failed - deleting files", t);
removeListener(this);
deleteSnapshotFromRepository(snapshotId, listener);
}
});
} else {
logger.trace("deleted snapshot is not running - deleting files");
deleteSnapshotFromRepository(snapshotId, listener);
}
}
});
}
/**
* Checks if a repository is currently in use by one of the snapshots
*
* @param clusterState cluster state
* @param repository repository id
* @return true if repository is currently in use by one of the running snapshots
*/
public static boolean isRepositoryInUse(ClusterState clusterState, String repository) {
MetaData metaData = clusterState.metaData();
SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE);
if (snapshots != null) {
for (SnapshotMetaData.Entry snapshot : snapshots.entries()) {
if (repository.equals(snapshot.snapshotId().getRepository())) {
return true;
}
}
}
return false;
}
/**
* Deletes snapshot from repository
*
* @param snapshotId snapshot id
* @param listener listener
*/
private void deleteSnapshotFromRepository(final SnapshotId snapshotId, final DeleteSnapshotListener listener) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new Runnable() {
@Override
public void run() {
try {
Repository repository = repositoriesService.repository(snapshotId.getRepository());
repository.deleteSnapshot(snapshotId);
listener.onResponse();
} catch (Throwable t) {
listener.onFailure(t);
}
}
});
}
/**
* Calculates the list of shards that should be included into the current snapshot
*
* @param snapshotId snapshot id
* @param clusterState cluster state
* @param indices list of indices to be snapshotted
* @return list of shard to be included into current snapshot
*/
private ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards(SnapshotId snapshotId, ClusterState clusterState, ImmutableList<String> indices) {
ImmutableMap.Builder<ShardId, SnapshotMetaData.ShardSnapshotStatus> builder = ImmutableMap.builder();
MetaData metaData = clusterState.metaData();
for (String index : indices) {
IndexMetaData indexMetaData = metaData.index(index);
IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index);
if (indexRoutingTable == null) {
throw new SnapshotCreationException(snapshotId, "Missing routing table for index [" + index + "]");
}
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
ShardId shardId = new ShardId(index, i);
ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
if (primary == null || !primary.assignedToNode()) {
builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
} else if (!primary.started()) {
builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, "primary shard hasn't been started yet"));
} else {
builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(primary.currentNodeId()));
}
}
}
return builder.build();
}
/**
* Adds snapshot completion listener
*
* @param listener listener
*/
public void addListener(SnapshotCompletionListener listener) {
this.snapshotCompletionListeners.add(listener);
}
/**
* Removes snapshot completion listener
*
* @param listener listener
*/
public void removeListener(SnapshotCompletionListener listener) {
this.snapshotCompletionListeners.remove(listener);
}
/**
* Listener for create snapshot operation
*/
public static interface CreateSnapshotListener {
/**
* Called when snapshot has successfully started
*/
void onResponse();
/**
* Called if a snapshot operation couldn't start
*/
void onFailure(Throwable t);
}
/**
* Listener for delete snapshot operation
*/
public static interface DeleteSnapshotListener {
/**
* Called if delete operation was successful
*/
void onResponse();
/**
* Called if delete operation failed
*/
void onFailure(Throwable t);
}
public static interface SnapshotCompletionListener {
void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot);
void onSnapshotFailure(SnapshotId snapshotId, Throwable t);
}
/**
* Snapshot creation request
*/
public static class SnapshotRequest {
private String cause;
private String name;
private String repository;
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.strict();
private boolean partial;
private Settings settings;
private boolean includeGlobalState;
private TimeValue masterNodeTimeout;
/**
* Constructs new snapshot creation request
*
* @param cause cause for snapshot operation
* @param name name of the snapshot
* @param repository name of the repository
*/
public SnapshotRequest(String cause, String name, String repository) {
this.cause = cause;
this.name = name;
this.repository = repository;
}
/**
* Sets the list of indices to be snapshotted
*
* @param indices list of indices
* @return this request
*/
public SnapshotRequest indices(String[] indices) {
this.indices = indices;
return this;
}
/**
* Sets repository-specific snapshot settings
*
* @param settings snapshot settings
* @return this request
*/
public SnapshotRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Set to true if global state should be stored as part of the snapshot
*
* @param includeGlobalState true if global state should be stored as part of the snapshot
* @return this request
*/
public SnapshotRequest includeGlobalState(boolean includeGlobalState) {
this.includeGlobalState = includeGlobalState;
return this;
}
/**
* Sets master node timeout
*
* @param masterNodeTimeout master node timeout
* @return this request
*/
public SnapshotRequest masterNodeTimeout(TimeValue masterNodeTimeout) {
this.masterNodeTimeout = masterNodeTimeout;
return this;
}
/**
* Sets the indices options
*
* @param indicesOptions indices options
* @return this request
*/
public SnapshotRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* Set to true if partial snapshot should be allowed
*
* @param partial true if partial snapshots should be allowed
* @return this request
*/
public SnapshotRequest partial(boolean partial) {
this.partial = partial;
return this;
}
/**
* Returns cause for snapshot operation
*
* @return cause for snapshot operation
*/
public String cause() {
return cause;
}
/**
* Returns snapshot name
*
* @return snapshot name
*/
public String name() {
return name;
}
/**
* Returns snapshot repository
*
* @return snapshot repository
*/
public String repository() {
return repository;
}
/**
* Returns the list of indices to be snapshotted
*
* @return the list of indices
*/
public String[] indices() {
return indices;
}
/**
* Returns indices options
*
* @return indices options
*/
public IndicesOptions indicesOptions() {
return indicesOptions;
}
/**
* Returns repository-specific settings for the snapshot operation
*
* @return repository-specific settings
*/
public Settings settings() {
return settings;
}
/**
* Returns true if global state should be stored as part of the snapshot
*
* @return true if global state should be stored as part of the snapshot
*/
public boolean includeGlobalState() {
return includeGlobalState;
}
/**
* Returns master node timeout
*
* @return master node timeout
*/
public TimeValue masterNodeTimeout() {
return masterNodeTimeout;
}
}
/**
* Stores the list of shards that has to be snapshotted on this node
*/
private static class SnapshotShards {
private final ImmutableMap<ShardId, IndexShardSnapshotStatus> shards;
private SnapshotShards(ImmutableMap<ShardId, IndexShardSnapshotStatus> shards) {
this.shards = shards;
}
}
/**
* Internal request that is used to send changes in snapshot status to master
*/
private static class UpdateIndexShardSnapshotStatusRequest extends TransportRequest {
private SnapshotId snapshotId;
private ShardId shardId;
private SnapshotMetaData.ShardSnapshotStatus status;
private UpdateIndexShardSnapshotStatusRequest() {
}
private UpdateIndexShardSnapshotStatusRequest(SnapshotId snapshotId, ShardId shardId, SnapshotMetaData.ShardSnapshotStatus status) {
this.snapshotId = snapshotId;
this.shardId = shardId;
this.status = status;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
snapshotId = SnapshotId.readSnapshotId(in);
shardId = ShardId.readShardId(in);
status = SnapshotMetaData.ShardSnapshotStatus.readShardSnapshotStatus(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
snapshotId.writeTo(out);
shardId.writeTo(out);
status.writeTo(out);
}
public SnapshotId snapshotId() {
return snapshotId;
}
public ShardId shardId() {
return shardId;
}
public SnapshotMetaData.ShardSnapshotStatus status() {
return status;
}
}
/**
* Transport request handler that is used to send changes in snapshot status to master
*/
private class UpdateSnapshotStateRequestHandler extends BaseTransportRequestHandler<UpdateIndexShardSnapshotStatusRequest> {
static final String ACTION = "cluster/snapshot/update_snapshot";
@Override
public UpdateIndexShardSnapshotStatusRequest newInstance() {
return new UpdateIndexShardSnapshotStatusRequest();
}
@Override
public void messageReceived(UpdateIndexShardSnapshotStatusRequest request, final TransportChannel channel) throws Exception {
innerUpdateSnapshotState(request);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_snapshots_SnapshotsService.java
|
332 |
public class TransportNodesInfoAction extends TransportNodesOperationAction<NodesInfoRequest, NodesInfoResponse, TransportNodesInfoAction.NodeInfoRequest, NodeInfo> {
private final NodeService nodeService;
@Inject
public TransportNodesInfoAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
NodeService nodeService) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.nodeService = nodeService;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected String transportAction() {
return NodesInfoAction.NAME;
}
@Override
protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, AtomicReferenceArray responses) {
final List<NodeInfo> nodesInfos = new ArrayList<NodeInfo>();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeInfo) {
nodesInfos.add((NodeInfo) resp);
}
}
return new NodesInfoResponse(clusterName, nodesInfos.toArray(new NodeInfo[nodesInfos.size()]));
}
@Override
protected NodesInfoRequest newRequest() {
return new NodesInfoRequest();
}
@Override
protected NodeInfoRequest newNodeRequest() {
return new NodeInfoRequest();
}
@Override
protected NodeInfoRequest newNodeRequest(String nodeId, NodesInfoRequest request) {
return new NodeInfoRequest(nodeId, request);
}
@Override
protected NodeInfo newNodeResponse() {
return new NodeInfo();
}
@Override
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) throws ElasticsearchException {
NodesInfoRequest request = nodeRequest.request;
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
request.network(), request.transport(), request.http(), request.plugin());
}
@Override
protected boolean accumulateExceptions() {
return false;
}
static class NodeInfoRequest extends NodeOperationRequest {
NodesInfoRequest request;
NodeInfoRequest() {
}
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
super(request, nodeId);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new NodesInfoRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_TransportNodesInfoAction.java
|
5,830 |
public class SearchContextHighlight {
private final List<Field> fields;
public SearchContextHighlight(List<Field> fields) {
this.fields = fields;
}
public List<Field> fields() {
return fields;
}
public static class Field {
// Fields that default to null or -1 are often set to their real default in HighlighterParseElement#parse
private final String field;
private int fragmentCharSize = -1;
private int numberOfFragments = -1;
private int fragmentOffset = -1;
private String encoder;
private String[] preTags;
private String[] postTags;
private Boolean scoreOrdered;
private Boolean highlightFilter;
private Boolean requireFieldMatch;
private String highlighterType;
private Boolean forceSource;
private String fragmenter;
private int boundaryMaxScan = -1;
private Character[] boundaryChars = null;
private Query highlightQuery;
private int noMatchSize = -1;
private Set<String> matchedFields;
private Map<String, Object> options;
private int phraseLimit = -1;
public Field(String field) {
this.field = field;
}
public String field() {
return field;
}
public int fragmentCharSize() {
return fragmentCharSize;
}
public void fragmentCharSize(int fragmentCharSize) {
this.fragmentCharSize = fragmentCharSize;
}
public int numberOfFragments() {
return numberOfFragments;
}
public void numberOfFragments(int numberOfFragments) {
this.numberOfFragments = numberOfFragments;
}
public int fragmentOffset() {
return fragmentOffset;
}
public void fragmentOffset(int fragmentOffset) {
this.fragmentOffset = fragmentOffset;
}
public String encoder() {
return encoder;
}
public void encoder(String encoder) {
this.encoder = encoder;
}
public String[] preTags() {
return preTags;
}
public void preTags(String[] preTags) {
this.preTags = preTags;
}
public String[] postTags() {
return postTags;
}
public void postTags(String[] postTags) {
this.postTags = postTags;
}
public Boolean scoreOrdered() {
return scoreOrdered;
}
public void scoreOrdered(boolean scoreOrdered) {
this.scoreOrdered = scoreOrdered;
}
public Boolean highlightFilter() {
return highlightFilter;
}
public void highlightFilter(boolean highlightFilter) {
this.highlightFilter = highlightFilter;
}
public Boolean requireFieldMatch() {
return requireFieldMatch;
}
public void requireFieldMatch(boolean requireFieldMatch) {
this.requireFieldMatch = requireFieldMatch;
}
public String highlighterType() {
return highlighterType;
}
public void highlighterType(String type) {
this.highlighterType = type;
}
public Boolean forceSource() {
return forceSource;
}
public void forceSource(boolean forceSource) {
this.forceSource = forceSource;
}
public String fragmenter() {
return fragmenter;
}
public void fragmenter(String fragmenter) {
this.fragmenter = fragmenter;
}
public int boundaryMaxScan() {
return boundaryMaxScan;
}
public void boundaryMaxScan(int boundaryMaxScan) {
this.boundaryMaxScan = boundaryMaxScan;
}
public Character[] boundaryChars() {
return boundaryChars;
}
public void boundaryChars(Character[] boundaryChars) {
this.boundaryChars = boundaryChars;
}
public Query highlightQuery() {
return highlightQuery;
}
public void highlightQuery(Query highlightQuery) {
this.highlightQuery = highlightQuery;
}
public int noMatchSize() {
return noMatchSize;
}
public void noMatchSize(int noMatchSize) {
this.noMatchSize = noMatchSize;
}
public int phraseLimit() {
return phraseLimit;
}
public void phraseLimit(int phraseLimit) {
this.phraseLimit = phraseLimit;
}
public Set<String> matchedFields() {
return matchedFields;
}
public void matchedFields(Set<String> matchedFields) {
this.matchedFields = matchedFields;
}
public Map<String, Object> options() {
return options;
}
public void options(Map<String, Object> options) {
this.options = options;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_highlight_SearchContextHighlight.java
|
434 |
public static class ShardStats implements ToXContent, Streamable {
int indices;
int total;
int primaries;
// min/max
int minIndexShards = -1;
int maxIndexShards = -1;
int minIndexPrimaryShards = -1;
int maxIndexPrimaryShards = -1;
double minIndexReplication = -1;
double totalIndexReplication = 0;
double maxIndexReplication = -1;
public ShardStats() {
}
/**
* number of indices in the cluster
*/
public int getIndices() {
return this.indices;
}
/**
* total number of shards in the cluster
*/
public int getTotal() {
return this.total;
}
/**
* total number of primary shards in the cluster
*/
public int getPrimaries() {
return this.primaries;
}
/**
* returns how many *redundant* copies of the data the cluster holds - running with no replicas will return 0
*/
public double getReplication() {
if (primaries == 0) {
return 0;
}
return (((double) (total - primaries)) / primaries);
}
/**
* the maximum number of shards (primary+replicas) an index has
*/
public int getMaxIndexShards() {
return this.maxIndexShards;
}
/**
* the minimum number of shards (primary+replicas) an index has
*/
public int getMinIndexShards() {
return this.minIndexShards;
}
/**
* average number of shards (primary+replicas) across the indices
*/
public double getAvgIndexShards() {
if (this.indices == 0) {
return -1;
}
return ((double) this.total) / this.indices;
}
/**
* the maximum number of primary shards an index has
*/
public int getMaxIndexPrimaryShards() {
return this.maxIndexPrimaryShards;
}
/**
* the minimum number of primary shards an index has
*/
public int getMinIndexPrimaryShards() {
return this.minIndexPrimaryShards;
}
/**
* the average number primary shards across the indices
*/
public double getAvgIndexPrimaryShards() {
if (this.indices == 0) {
return -1;
}
return ((double) this.primaries) / this.indices;
}
/**
* minimum replication factor across the indices. See {@link #getReplication}
*/
public double getMinIndexReplication() {
return this.minIndexReplication;
}
/**
* average replication factor across the indices. See {@link #getReplication}
*/
public double getAvgIndexReplication() {
if (indices == 0) {
return -1;
}
return this.totalIndexReplication / this.indices;
}
/**
* maximum replication factor across the indices. See {@link #getReplication
*/
public double getMaxIndexReplication() {
return this.maxIndexReplication;
}
public void addIndexShardCount(ShardStats indexShardCount) {
this.indices++;
this.primaries += indexShardCount.primaries;
this.total += indexShardCount.total;
this.totalIndexReplication += indexShardCount.getReplication();
if (this.indices == 1) {
// first index, uninitialized.
minIndexPrimaryShards = indexShardCount.primaries;
maxIndexPrimaryShards = indexShardCount.primaries;
minIndexShards = indexShardCount.total;
maxIndexShards = indexShardCount.total;
minIndexReplication = indexShardCount.getReplication();
maxIndexReplication = minIndexReplication;
} else {
minIndexShards = Math.min(minIndexShards, indexShardCount.total);
minIndexPrimaryShards = Math.min(minIndexPrimaryShards, indexShardCount.primaries);
minIndexReplication = Math.min(minIndexReplication, indexShardCount.getReplication());
maxIndexShards = Math.max(maxIndexShards, indexShardCount.total);
maxIndexPrimaryShards = Math.max(maxIndexPrimaryShards, indexShardCount.primaries);
maxIndexReplication = Math.max(maxIndexReplication, indexShardCount.getReplication());
}
}
public static ShardStats readShardStats(StreamInput in) throws IOException {
ShardStats c = new ShardStats();
c.readFrom(in);
return c;
}
@Override
public void readFrom(StreamInput in) throws IOException {
indices = in.readVInt();
total = in.readVInt();
primaries = in.readVInt();
minIndexShards = in.readVInt();
maxIndexShards = in.readVInt();
minIndexPrimaryShards = in.readVInt();
maxIndexPrimaryShards = in.readVInt();
minIndexReplication = in.readDouble();
totalIndexReplication = in.readDouble();
maxIndexReplication = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(indices);
out.writeVInt(total);
out.writeVInt(primaries);
out.writeVInt(minIndexShards);
out.writeVInt(maxIndexShards);
out.writeVInt(minIndexPrimaryShards);
out.writeVInt(maxIndexPrimaryShards);
out.writeDouble(minIndexReplication);
out.writeDouble(totalIndexReplication);
out.writeDouble(maxIndexReplication);
}
static final class Fields {
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString PRIMARIES = new XContentBuilderString("primaries");
static final XContentBuilderString REPLICATION = new XContentBuilderString("replication");
static final XContentBuilderString MIN = new XContentBuilderString("min");
static final XContentBuilderString MAX = new XContentBuilderString("max");
static final XContentBuilderString AVG = new XContentBuilderString("avg");
static final XContentBuilderString INDEX = new XContentBuilderString("index");
}
private void addIntMinMax(XContentBuilderString field, int min, int max, double avg, XContentBuilder builder) throws IOException {
builder.startObject(field);
builder.field(Fields.MIN, min);
builder.field(Fields.MAX, max);
builder.field(Fields.AVG, avg);
builder.endObject();
}
private void addDoubleMinMax(XContentBuilderString field, double min, double max, double avg, XContentBuilder builder) throws IOException {
builder.startObject(field);
builder.field(Fields.MIN, min);
builder.field(Fields.MAX, max);
builder.field(Fields.AVG, avg);
builder.endObject();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.SHARDS);
if (indices > 0) {
builder.field(Fields.TOTAL, total);
builder.field(Fields.PRIMARIES, primaries);
builder.field(Fields.REPLICATION, getReplication());
builder.startObject(Fields.INDEX);
addIntMinMax(Fields.SHARDS, minIndexShards, maxIndexShards, getAvgIndexShards(), builder);
addIntMinMax(Fields.PRIMARIES, minIndexPrimaryShards, maxIndexPrimaryShards, getAvgIndexPrimaryShards(), builder);
addDoubleMinMax(Fields.REPLICATION, minIndexReplication, maxIndexReplication, getAvgIndexReplication(), builder);
builder.endObject();
}
builder.endObject();
return builder;
}
@Override
public String toString() {
return "total [" + total + "] primaries [" + primaries + "]";
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsIndices.java
|
27 |
public class ClusterMembers
{
public static final Predicate<ClusterMember> ALIVE = new Predicate<ClusterMember>()
{
@Override
public boolean accept( ClusterMember item )
{
return item.isAlive();
}
};
private final InstanceId me;
public static Predicate<ClusterMember> inRole( final String role )
{
return new Predicate<ClusterMember>()
{
@Override
public boolean accept( ClusterMember item )
{
return item.hasRole( role );
}
};
}
private final Map<InstanceId, ClusterMember> members = new CopyOnWriteHashMap<InstanceId, ClusterMember>();
public ClusterMembers( Cluster cluster, Heartbeat heartbeat, ClusterMemberEvents clusterMemberEvents,
InstanceId me )
{
this.me = me;
cluster.addClusterListener( new HAMClusterListener() );
heartbeat.addHeartbeatListener( new HAMHeartbeatListener() );
clusterMemberEvents.addClusterMemberListener( new HAMClusterMemberListener() );
}
public Iterable<ClusterMember> getMembers()
{
return members.values();
}
public ClusterMember getSelf()
{
for ( ClusterMember clusterMember : getMembers() )
{
if ( clusterMember.getMemberId().equals( me ) )
{
return clusterMember;
}
}
return null;
}
private ClusterMember getMember( InstanceId server )
{
ClusterMember clusterMember = members.get( server );
if ( clusterMember == null )
throw new IllegalStateException( "Member " + server + " not found in " + new HashMap(members) );
return clusterMember;
}
private class HAMClusterListener extends ClusterListener.Adapter
{
@Override
public void enteredCluster( ClusterConfiguration configuration )
{
Map<InstanceId, ClusterMember> newMembers = new HashMap<InstanceId, ClusterMember>();
for ( InstanceId memberClusterUri : configuration.getMembers().keySet() )
newMembers.put( memberClusterUri, new ClusterMember( memberClusterUri ) );
members.clear();
members.putAll( newMembers );
}
@Override
public void leftCluster()
{
members.clear();
}
@Override
public void joinedCluster( InstanceId member, URI memberUri )
{
members.put( member, new ClusterMember( member ) );
}
@Override
public void leftCluster( InstanceId member )
{
members.remove( member );
}
}
private class HAMClusterMemberListener extends ClusterMemberListener.Adapter
{
private InstanceId masterId = null;
@Override
public void coordinatorIsElected( InstanceId coordinatorId )
{
if ( coordinatorId.equals( this.masterId ) )
{
return;
}
this.masterId = coordinatorId;
Map<InstanceId, ClusterMember> newMembers = new CopyOnWriteHashMap<InstanceId, ClusterMember>();
for ( Map.Entry<InstanceId, ClusterMember> memberEntry : members.entrySet() )
{
newMembers.put( memberEntry.getKey(), memberEntry.getValue().unavailableAs(
HighAvailabilityModeSwitcher.MASTER ).unavailableAs( HighAvailabilityModeSwitcher.SLAVE ) );
}
members.clear();
members.putAll( newMembers );
}
@Override
public void memberIsAvailable( String role, InstanceId instanceId, URI roleUri )
{
members.put( instanceId, getMember( instanceId ).availableAs( role, roleUri ) );
}
@Override
public void memberIsUnavailable( String role, InstanceId unavailableId )
{
ClusterMember member = null;
try
{
member = getMember( unavailableId );
members.put( unavailableId, member.unavailableAs( role ) );
}
catch ( IllegalStateException e )
{
// Unknown member
}
}
}
private class HAMHeartbeatListener extends HeartbeatListener.Adapter
{
@Override
public void failed( InstanceId server )
{
if (members.containsKey( server ))
{
members.put( server, getMember( server ).failed() );
}
}
@Override
public void alive( InstanceId server )
{
if (members.containsKey( server ))
members.put( server, getMember( server ).alive() );
}
}
}
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_member_ClusterMembers.java
|
4,523 |
private class PurgerThread extends Thread {
volatile boolean running = true;
public PurgerThread(String name) {
super(name);
setDaemon(true);
}
public void doStop() {
running = false;
}
public void run() {
while (running) {
try {
List<IndexShard> shardsToPurge = getShardsToPurge();
purgeShards(shardsToPurge);
} catch (Throwable e) {
if (running) {
logger.warn("failed to execute ttl purge", e);
}
}
try {
Thread.sleep(interval.millis());
} catch (InterruptedException e) {
// ignore, if we are interrupted because we are shutting down, running will be false
}
}
}
/**
* Returns the shards to purge, i.e. the local started primary shards that have ttl enabled and disable_purge to false
*/
private List<IndexShard> getShardsToPurge() {
List<IndexShard> shardsToPurge = new ArrayList<IndexShard>();
MetaData metaData = clusterService.state().metaData();
for (IndexService indexService : indicesService) {
// check the value of disable_purge for this index
IndexMetaData indexMetaData = metaData.index(indexService.index().name());
if (indexMetaData == null) {
continue;
}
boolean disablePurge = indexMetaData.settings().getAsBoolean(INDEX_TTL_DISABLE_PURGE, false);
if (disablePurge) {
continue;
}
// should be optimized with the hasTTL flag
FieldMappers ttlFieldMappers = indexService.mapperService().name(TTLFieldMapper.NAME);
if (ttlFieldMappers == null) {
continue;
}
// check if ttl is enabled for at least one type of this index
boolean hasTTLEnabled = false;
for (FieldMapper ttlFieldMapper : ttlFieldMappers) {
if (((TTLFieldMapper) ttlFieldMapper).enabled()) {
hasTTLEnabled = true;
break;
}
}
if (hasTTLEnabled) {
for (IndexShard indexShard : indexService) {
if (indexShard.state() == IndexShardState.STARTED && indexShard.routingEntry().primary() && indexShard.routingEntry().started()) {
shardsToPurge.add(indexShard);
}
}
}
}
return shardsToPurge;
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_ttl_IndicesTTLService.java
|
526 |
public class FlushRequestBuilder extends BroadcastOperationRequestBuilder<FlushRequest, FlushResponse, FlushRequestBuilder> {
public FlushRequestBuilder(IndicesAdminClient indicesClient) {
super((InternalIndicesAdminClient) indicesClient, new FlushRequest());
}
public FlushRequestBuilder setFull(boolean full) {
request.full(full);
return this;
}
@Override
protected void doExecute(ActionListener<FlushResponse> listener) {
((IndicesAdminClient) client).flush(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_flush_FlushRequestBuilder.java
|
97 |
public interface PageDao {
public Page readPageById(Long id);
public PageTemplate readPageTemplateById(Long id);
/**
* Saves the given {@link PageTemplate}
*
* @param template the {@link PageTemplate} to save
* @return the database-saved {@link PageTemplate}
*/
public PageTemplate savePageTemplate(PageTemplate template);
public Map<String, PageField> readPageFieldsByPage(Page page);
public Page updatePage(Page page);
public void delete(Page page);
public Page addPage(Page clonedPage);
/**
* Returns all pages, regardless of any sandbox they are apart of
* @return all Pages configured in the system
*/
public List<Page> readAllPages();
/**
* Returns all page templates, regardless of any sandbox they are apart of
* @return all {@link PageTemplate}s configured in the system
*/
public List<PageTemplate> readAllPageTemplates();
public List<Page> findPageByURI(SandBox sandBox, Locale fullLocale, Locale languageOnlyLocale, String uri);
public List<Page> findPageByURI(SandBox sandBox, Locale locale, String uri);
public void detachPage(Page page);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_dao_PageDao.java
|
1,555 |
public class VertexMap {
public static final String IDS = Tokens.makeNamespace(VertexMap.class) + ".ids";
public enum Counters {
VERTICES_PROCESSED
}
public static Configuration createConfiguration(final long... ids) {
final String[] idStrings = new String[ids.length];
for (int i = 0; i < ids.length; i++) {
idStrings[i] = String.valueOf(ids[i]);
}
final Configuration configuration = new EmptyConfiguration();
configuration.setStrings(IDS, idStrings);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex> {
private Collection<Long> ids;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
//todo: make as list and double up repeats
this.ids = VertexMap.Map.getLongCollection(context.getConfiguration(), IDS, new HashSet<Long>());
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, NullWritable, FaunusVertex>.Context context) throws IOException, InterruptedException {
if (this.ids.contains(value.getLongId())) {
value.startPath();
DEFAULT_COMPAT.incrementContextCounter(context, Counters.VERTICES_PROCESSED, 1L);
} else {
value.clearPaths();
}
context.write(NullWritable.get(), value);
}
private static Collection<Long> getLongCollection(final Configuration conf, final String key, final Collection<Long> collection) {
for (final String value : conf.getStrings(key)) {
collection.add(Long.valueOf(value));
}
return collection;
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_transform_VertexMap.java
|
489 |
private final class ClusterListenerThread extends Thread {
private volatile ClientConnection conn;
private final List<MemberImpl> members = new LinkedList<MemberImpl>();
private final CountDownLatch latch = new CountDownLatch(1);
private ClusterListenerThread(ThreadGroup group, String name) {
super(group, name);
}
public void await() throws InterruptedException {
latch.await();
}
public void run() {
while (!Thread.currentThread().isInterrupted()) {
try {
if (conn == null) {
try {
conn = pickConnection();
} catch (Exception e) {
LOGGER.severe("Error while connecting to cluster!", e);
client.getLifecycleService().shutdown();
latch.countDown();
return;
}
}
getInvocationService().triggerFailedListeners();
loadInitialMemberList();
listenMembershipEvents();
} catch (Exception e) {
if (client.getLifecycleService().isRunning()) {
if (LOGGER.isFinestEnabled()) {
LOGGER.warning("Error while listening cluster events! -> " + conn, e);
} else {
LOGGER.warning("Error while listening cluster events! -> " + conn + ", Error: " + e.toString());
}
}
connectionManager.markOwnerConnectionAsClosed();
IOUtil.closeResource(conn);
conn = null;
fireConnectionEvent(true);
}
try {
Thread.sleep(SLEEP_TIME);
} catch (InterruptedException e) {
latch.countDown();
break;
}
}
}
private ClientInvocationServiceImpl getInvocationService() {
return (ClientInvocationServiceImpl) client.getInvocationService();
}
private ClientConnection pickConnection() throws Exception {
final List<InetSocketAddress> socketAddresses = new LinkedList<InetSocketAddress>();
if (!members.isEmpty()) {
for (MemberImpl member : members) {
socketAddresses.add(member.getInetSocketAddress());
}
Collections.shuffle(socketAddresses);
}
socketAddresses.addAll(getConfigAddresses());
return connectToOne(socketAddresses);
}
private void loadInitialMemberList() throws Exception {
final SerializationService serializationService = getSerializationService();
final AddMembershipListenerRequest request = new AddMembershipListenerRequest();
final SerializableCollection coll = (SerializableCollection) connectionManager.sendAndReceive(request, conn);
Map<String, MemberImpl> prevMembers = Collections.emptyMap();
if (!members.isEmpty()) {
prevMembers = new HashMap<String, MemberImpl>(members.size());
for (MemberImpl member : members) {
prevMembers.put(member.getUuid(), member);
}
members.clear();
}
for (Data data : coll) {
members.add((MemberImpl) serializationService.toObject(data));
}
updateMembersRef();
LOGGER.info(membersString());
final List<MembershipEvent> events = new LinkedList<MembershipEvent>();
final Set<Member> eventMembers = Collections.unmodifiableSet(new LinkedHashSet<Member>(members));
for (MemberImpl member : members) {
final MemberImpl former = prevMembers.remove(member.getUuid());
if (former == null) {
events.add(new MembershipEvent(client.getCluster(), member, MembershipEvent.MEMBER_ADDED, eventMembers));
}
}
for (MemberImpl member : prevMembers.values()) {
events.add(new MembershipEvent(client.getCluster(), member, MembershipEvent.MEMBER_REMOVED, eventMembers));
}
for (MembershipEvent event : events) {
fireMembershipEvent(event);
}
latch.countDown();
}
private void listenMembershipEvents() throws IOException {
final SerializationService serializationService = getSerializationService();
while (!Thread.currentThread().isInterrupted()) {
final Data clientResponseData = conn.read();
final ClientResponse clientResponse = serializationService.toObject(clientResponseData);
final Object eventObject = serializationService.toObject(clientResponse.getResponse());
final ClientMembershipEvent event = (ClientMembershipEvent) eventObject;
final MemberImpl member = (MemberImpl) event.getMember();
boolean membersUpdated = false;
if (event.getEventType() == MembershipEvent.MEMBER_ADDED) {
members.add(member);
membersUpdated = true;
} else if (event.getEventType() == ClientMembershipEvent.MEMBER_REMOVED) {
members.remove(member);
membersUpdated = true;
// getConnectionManager().removeConnectionPool(member.getAddress()); //TODO
} else if (event.getEventType() == ClientMembershipEvent.MEMBER_ATTRIBUTE_CHANGED) {
MemberAttributeChange memberAttributeChange = event.getMemberAttributeChange();
Map<Address, MemberImpl> memberMap = membersRef.get();
if (memberMap != null) {
for (MemberImpl target : memberMap.values()) {
if (target.getUuid().equals(memberAttributeChange.getUuid())) {
final MemberAttributeOperationType operationType = memberAttributeChange.getOperationType();
final String key = memberAttributeChange.getKey();
final Object value = memberAttributeChange.getValue();
target.updateAttribute(operationType, key, value);
MemberAttributeEvent memberAttributeEvent = new MemberAttributeEvent(
client.getCluster(), target, operationType, key, value);
fireMemberAttributeEvent(memberAttributeEvent);
break;
}
}
}
}
if (membersUpdated) {
((ClientPartitionServiceImpl) client.getClientPartitionService()).refreshPartitions();
updateMembersRef();
LOGGER.info(membersString());
fireMembershipEvent(new MembershipEvent(client.getCluster(), member, event.getEventType(),
Collections.unmodifiableSet(new LinkedHashSet<Member>(members))));
}
}
}
private void fireMembershipEvent(final MembershipEvent event) {
client.getClientExecutionService().executeInternal(new Runnable() {
public void run() {
for (MembershipListener listener : listeners.values()) {
if (event.getEventType() == MembershipEvent.MEMBER_ADDED) {
listener.memberAdded(event);
} else {
listener.memberRemoved(event);
}
}
}
});
}
private void fireMemberAttributeEvent(final MemberAttributeEvent event) {
client.getClientExecutionService().executeInternal(new Runnable() {
@Override
public void run() {
for (MembershipListener listener : listeners.values()) {
listener.memberAttributeChanged(event);
}
}
});
}
private void updateMembersRef() {
final Map<Address, MemberImpl> map = new LinkedHashMap<Address, MemberImpl>(members.size());
for (MemberImpl member : members) {
map.put(member.getAddress(), member);
}
membersRef.set(Collections.unmodifiableMap(map));
}
void shutdown() {
interrupt();
final ClientConnection c = conn;
if (c != null) {
c.close();
}
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_impl_ClientClusterServiceImpl.java
|
729 |
loadEntriesMajor(key, inclusive, new RangeResultListener<K, V>() {
@Override
public boolean addResult(Map.Entry<K, V> entry) {
result.add(entry.getValue());
if (maxValuesToFetch > -1 && result.size() >= maxValuesToFetch)
return false;
return true;
}
});
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_sbtree_local_OSBTree.java
|
73 |
@SuppressWarnings("serial")
static final class MapReduceKeysTask<K,V,U>
extends BulkTask<K,V,U> {
final Fun<? super K, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
U result;
MapReduceKeysTask<K,V,U> rights, nextRight;
MapReduceKeysTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
MapReduceKeysTask<K,V,U> nextRight,
Fun<? super K, ? extends U> transformer,
BiFun<? super U, ? super U, ? extends U> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.transformer = transformer;
this.reducer = reducer;
}
public final U getRawResult() { return result; }
public final void compute() {
final Fun<? super K, ? extends U> transformer;
final BiFun<? super U, ? super U, ? extends U> reducer;
if ((transformer = this.transformer) != null &&
(reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new MapReduceKeysTask<K,V,U>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, transformer, reducer)).fork();
}
U r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
U u;
if ((u = transformer.apply(p.key)) != null)
r = (r == null) ? u : reducer.apply(r, u);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") MapReduceKeysTask<K,V,U>
t = (MapReduceKeysTask<K,V,U>)c,
s = t.rights;
while (s != null) {
U tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
695 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name="BLC_PRODUCT_ATTRIBUTE")
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blStandardElements")
@AdminPresentationClass(friendlyName = "ProductAttributeImpl_baseProductAttribute")
public class ProductAttributeImpl implements ProductAttribute {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The id. */
@Id
@GeneratedValue(generator= "ProductAttributeId")
@GenericGenerator(
name="ProductAttributeId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="ProductAttributeImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.catalog.domain.ProductAttributeImpl")
}
)
@Column(name = "PRODUCT_ATTRIBUTE_ID")
protected Long id;
/** The name. */
@Column(name = "NAME", nullable=false)
@Index(name="PRODUCTATTRIBUTE_NAME_INDEX", columnNames={"NAME"})
@AdminPresentation(visibility = VisibilityEnum.HIDDEN_ALL)
protected String name;
/** The value. */
@Column(name = "VALUE")
@AdminPresentation(friendlyName = "ProductAttributeImpl_Attribute_Value", order=2, group = "ProductAttributeImpl_Description", prominent=true)
protected String value;
/** The searchable. */
@Column(name = "SEARCHABLE")
@AdminPresentation(excluded = true)
protected Boolean searchable = false;
/** The product. */
@ManyToOne(targetEntity = ProductImpl.class, optional=false)
@JoinColumn(name = "PRODUCT_ID")
@Index(name="PRODUCTATTRIBUTE_INDEX", columnNames={"PRODUCT_ID"})
protected Product product;
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#getId()
*/
@Override
public Long getId() {
return id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#setId(java.lang.Long)
*/
@Override
public void setId(Long id) {
this.id = id;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#getValue()
*/
@Override
public String getValue() {
return value;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#setValue(java.lang.String)
*/
@Override
public void setValue(String value) {
this.value = value;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#getSearchable()
*/
@Override
public Boolean getSearchable() {
if (searchable == null) {
return Boolean.FALSE;
} else {
return searchable;
}
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#setSearchable(java.lang.Boolean)
*/
@Override
public void setSearchable(Boolean searchable) {
this.searchable = searchable;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#getName()
*/
@Override
public String getName() {
return name;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#setName(java.lang.String)
*/
@Override
public void setName(String name) {
this.name = name;
}
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return value;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#getProduct()
*/
@Override
public Product getProduct() {
return product;
}
/* (non-Javadoc)
* @see org.broadleafcommerce.core.catalog.domain.ProductAttribute#setProduct(org.broadleafcommerce.core.catalog.domain.Product)
*/
@Override
public void setProduct(Product product) {
this.product = product;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result + ((product == null) ? 0 : product.hashCode());
result = prime * result + ((value == null) ? 0 : value.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ProductAttributeImpl other = (ProductAttributeImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (product == null) {
if (other.product != null)
return false;
} else if (!product.equals(other.product))
return false;
if (value == null) {
if (other.value != null)
return false;
} else if (!value.equals(other.value))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_ProductAttributeImpl.java
|
1,482 |
public abstract class OSQLFunctionPathFinder<T extends Comparable<T>> extends OSQLFunctionMathAbstract {
protected OrientBaseGraph db;
protected Set<Vertex> settledNodes;
protected Set<Vertex> unSettledNodes;
protected Map<Vertex, Vertex> predecessors;
protected Map<Vertex, T> distance;
protected Vertex paramSourceVertex;
protected Vertex paramDestinationVertex;
protected Direction paramDirection = Direction.OUT;
public OSQLFunctionPathFinder(final String iName, final int iMinParams, final int iMaxParams) {
super(iName, iMinParams, iMaxParams);
}
protected abstract T getDistance(Vertex node, Vertex target);
protected abstract T getShortestDistance(Vertex destination);
protected abstract T getMinimumDistance();
protected abstract T sumDistances(T iDistance1, T iDistance2);
public Object execute(final Object[] iParameters, final OCommandContext iContext) {
settledNodes = new HashSet<Vertex>();
unSettledNodes = new HashSet<Vertex>();
distance = new HashMap<Vertex, T>();
predecessors = new HashMap<Vertex, Vertex>();
distance.put(paramSourceVertex, getMinimumDistance());
unSettledNodes.add(paramSourceVertex);
while (continueTraversing()) {
final Vertex node = getMinimum(unSettledNodes);
settledNodes.add(node);
unSettledNodes.remove(node);
findMinimalDistances(node);
}
return getPath();
}
/*
* This method returns the path from the source to the selected target and NULL if no path exists
*/
public LinkedList<Vertex> getPath() {
final LinkedList<Vertex> path = new LinkedList<Vertex>();
Vertex step = paramDestinationVertex;
// Check if a path exists
if (predecessors.get(step) == null)
return null;
path.add(step);
while (predecessors.get(step) != null) {
step = predecessors.get(step);
path.add(step);
}
// Put it into the correct order
Collections.reverse(path);
return path;
}
public boolean aggregateResults() {
return false;
}
@Override
public Object getResult() {
return getPath();
}
protected void findMinimalDistances(final Vertex node) {
final List<Vertex> adjacentNodes = getNeighbors(node);
for (Vertex target : adjacentNodes) {
final T d = sumDistances(getShortestDistance(node), getDistance(node, target));
if (getShortestDistance(target).compareTo(d) > 0) {
distance.put(target, d);
predecessors.put(target, node);
unSettledNodes.add(target);
}
}
}
protected List<Vertex> getNeighbors(final Vertex node) {
final List<Vertex> neighbors = new ArrayList<Vertex>();
if (node != null) {
for (Vertex v : node.getVertices(paramDirection))
if (v != null && !isSettled(v))
neighbors.add(v);
}
return neighbors;
}
protected Vertex getMinimum(final Set<Vertex> vertexes) {
Vertex minimum = null;
for (Vertex vertex : vertexes) {
if (minimum == null || getShortestDistance(vertex).compareTo(getShortestDistance(minimum)) < 0)
minimum = vertex;
}
return minimum;
}
protected boolean isSettled(final Vertex vertex) {
return settledNodes.contains(vertex.getId());
}
protected boolean continueTraversing() {
return unSettledNodes.size() > 0;
}
}
| 1no label
|
graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionPathFinder.java
|
376 |
public class PutRepositoryResponse extends AcknowledgedResponse {
PutRepositoryResponse() {
}
PutRepositoryResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_put_PutRepositoryResponse.java
|
366 |
public class TransportDeleteRepositoryAction extends TransportMasterNodeOperationAction<DeleteRepositoryRequest, DeleteRepositoryResponse> {
private final RepositoriesService repositoriesService;
@Inject
public TransportDeleteRepositoryAction(Settings settings, TransportService transportService, ClusterService clusterService,
RepositoriesService repositoriesService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
this.repositoriesService = repositoriesService;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return DeleteRepositoryAction.NAME;
}
@Override
protected DeleteRepositoryRequest newRequest() {
return new DeleteRepositoryRequest();
}
@Override
protected DeleteRepositoryResponse newResponse() {
return new DeleteRepositoryResponse();
}
@Override
protected ClusterBlockException checkBlock(DeleteRepositoryRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
}
@Override
protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener<DeleteRepositoryResponse> listener) throws ElasticsearchException {
repositoriesService.unregisterRepository(
new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name())
.masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()),
new ActionListener<RepositoriesService.UnregisterRepositoryResponse>() {
@Override
public void onResponse(RepositoriesService.UnregisterRepositoryResponse unregisterRepositoryResponse) {
listener.onResponse(new DeleteRepositoryResponse(unregisterRepositoryResponse.isAcknowledged()));
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_delete_TransportDeleteRepositoryAction.java
|
493 |
public class TransportClearIndicesCacheAction extends TransportBroadcastOperationAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ShardClearIndicesCacheRequest, ShardClearIndicesCacheResponse> {
private final IndicesService indicesService;
private final IndicesTermsFilterCache termsFilterCache;
private final CacheRecycler cacheRecycler;
@Inject
public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, IndicesTermsFilterCache termsFilterCache,
CacheRecycler cacheRecycler) {
super(settings, threadPool, clusterService, transportService);
this.indicesService = indicesService;
this.termsFilterCache = termsFilterCache;
this.cacheRecycler = cacheRecycler;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected String transportAction() {
return ClearIndicesCacheAction.NAME;
}
@Override
protected ClearIndicesCacheRequest newRequest() {
return new ClearIndicesCacheRequest();
}
@Override
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
successfulShards++;
}
}
return new ClearIndicesCacheResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
}
@Override
protected ShardClearIndicesCacheRequest newShardRequest() {
return new ShardClearIndicesCacheRequest();
}
@Override
protected ShardClearIndicesCacheRequest newShardRequest(ShardRouting shard, ClearIndicesCacheRequest request) {
return new ShardClearIndicesCacheRequest(shard.index(), shard.id(), request);
}
@Override
protected ShardClearIndicesCacheResponse newShardResponse() {
return new ShardClearIndicesCacheResponse();
}
@Override
protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) throws ElasticsearchException {
IndexService service = indicesService.indexService(request.index());
if (service != null) {
// we always clear the query cache
service.cache().queryParserCache().clear();
boolean clearedAtLeastOne = false;
if (request.filterCache()) {
clearedAtLeastOne = true;
service.cache().filter().clear("api");
termsFilterCache.clear("api");
}
if (request.filterKeys() != null && request.filterKeys().length > 0) {
clearedAtLeastOne = true;
service.cache().filter().clear("api", request.filterKeys());
termsFilterCache.clear("api", request.filterKeys());
}
if (request.fieldDataCache()) {
clearedAtLeastOne = true;
if (request.fields() == null || request.fields().length == 0) {
service.fieldData().clear();
} else {
for (String field : request.fields()) {
service.fieldData().clearField(field);
}
}
}
if (request.recycler()) {
logger.info("Clear CacheRecycler on index [{}]", service.index());
clearedAtLeastOne = true;
// cacheRecycler.clear();
}
if (request.idCache()) {
clearedAtLeastOne = true;
service.cache().idCache().clear();
}
if (!clearedAtLeastOne) {
if (request.fields() != null && request.fields().length > 0) {
// only clear caches relating to the specified fields
for (String field : request.fields()) {
service.fieldData().clearField(field);
}
} else {
service.cache().clear("api");
service.fieldData().clear();
termsFilterCache.clear("api");
}
}
}
return new ShardClearIndicesCacheResponse(request.index(), request.shardId());
}
/**
* The refresh request works against *all* shards.
*/
@Override
protected GroupShardsIterator shards(ClusterState clusterState, ClearIndicesCacheRequest request, String[] concreteIndices) {
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, ClearIndicesCacheRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, ClearIndicesCacheRequest request, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_cache_clear_TransportClearIndicesCacheAction.java
|
216 |
public class HydrationScanner implements ClassVisitor, FieldVisitor, AnnotationVisitor {
private static final int CLASSSTAGE = 0;
private static final int FIELDSTAGE = 1;
@SuppressWarnings("unchecked")
public HydrationScanner(Class topEntityClass, Class entityClass) {
this.topEntityClass = topEntityClass;
this.entityClass = entityClass;
}
private String cacheRegion;
private Map<String, Method[]> idMutators = new HashMap<String, Method[]>();
private Map<String, HydrationItemDescriptor> cacheMutators = new HashMap<String, HydrationItemDescriptor>();
@SuppressWarnings("unchecked")
private final Class entityClass;
@SuppressWarnings("unchecked")
private final Class topEntityClass;
private int stage = CLASSSTAGE;
@SuppressWarnings("unchecked")
private Class clazz;
private String annotation;
private String fieldName;
@SuppressWarnings("unchecked")
private Class fieldClass;
public void init() {
try {
InputStream in = HydrationScanner.class.getClassLoader().getResourceAsStream(topEntityClass.getName().replace('.', '/') + ".class");
new ClassReader(in).accept(this, ClassReader.SKIP_DEBUG);
in = HydrationScanner.class.getClassLoader().getResourceAsStream(entityClass.getName().replace('.', '/') + ".class");
new ClassReader(in).accept(this, ClassReader.SKIP_DEBUG);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public String getCacheRegion() {
return cacheRegion;
}
public Map<String, Method[]> getIdMutators() {
return idMutators;
}
public Map<String, HydrationItemDescriptor> getCacheMutators() {
return cacheMutators;
}
//Common
public AnnotationVisitor visitAnnotation(String arg0, boolean arg1) {
Type annotationType = Type.getType(arg0);
switch(stage) {
case CLASSSTAGE: {
if (annotationType.getClassName().equals(Cache.class.getName())){
annotation = Cache.class.getName();
}
break;
}
case FIELDSTAGE: {
if (annotationType.getClassName().equals(Id.class.getName())){
idMutators.put(fieldName, retrieveMutators());
}
if (annotationType.getClassName().equals(Hydrated.class.getName())){
annotation = Hydrated.class.getName();
}
break;
}
default : {
annotation = null;
fieldName = null;
break;
}
}
return this;
}
private Method[] retrieveMutators() {
String mutatorName = fieldName.substring(0,1).toUpperCase() + fieldName.substring(1, fieldName.length());
Method getMethod = null;
try {
getMethod = clazz.getMethod("get"+mutatorName, new Class[]{});
} catch (Exception e) {
//do nothing
}
if (getMethod == null) {
try {
getMethod = clazz.getMethod("is"+mutatorName, new Class[]{});
} catch (Exception e) {
//do nothing
}
}
if (getMethod == null) {
try {
getMethod = clazz.getMethod(fieldName, new Class[]{});
} catch (Exception e) {
//do nothing
}
}
Method setMethod = null;
try {
setMethod = clazz.getMethod("set"+mutatorName, new Class[]{fieldClass});
} catch (Exception e) {
//do nothing
}
if (getMethod == null || setMethod == null) {
throw new RuntimeException("Unable to find a getter and setter method for the AdminPresentation field: " + fieldName + ". Make sure you have a getter method entitled: get" + mutatorName + "(), or is" + mutatorName + "(), or " + fieldName + "(). Make sure you have a setter method entitled: set" + mutatorName + "(..).");
}
return new Method[]{getMethod, setMethod};
}
//FieldVisitor
public void visitAttribute(Attribute arg0) {
//do nothing
}
public void visitEnd() {
//do nothing
}
//ClassVisitor
public void visit(int arg0, int arg1, String arg2, String arg3, String arg4, String[] arg5) {
try {
clazz = Class.forName(arg2.replaceAll("/", "."));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
stage = CLASSSTAGE;
}
public FieldVisitor visitField(int arg0, String arg1, String arg2, String arg3, Object arg4) {
stage = FIELDSTAGE;
fieldName = arg1;
Type fieldType = Type.getType(arg2);
switch(fieldType.getSort()){
case Type.BOOLEAN:
fieldClass = boolean.class;
break;
case Type.BYTE:
fieldClass = byte.class;
break;
case Type.CHAR:
fieldClass = char.class;
break;
case Type.DOUBLE:
fieldClass = double.class;
break;
case Type.FLOAT:
fieldClass = float.class;
break;
case Type.INT:
fieldClass = int.class;
break;
case Type.LONG:
fieldClass = long.class;
break;
case Type.SHORT:
fieldClass = short.class;
break;
case Type.OBJECT:
try {
fieldClass = Class.forName(Type.getType(arg2).getClassName());
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
break;
}
return this;
}
public void visitInnerClass(String arg0, String arg1, String arg2, int arg3) {
//do nothing
}
public MethodVisitor visitMethod(int arg0, String arg1, String arg2, String arg3, String[] arg4) {
return new EmptyVisitor();
}
public void visitOuterClass(String arg0, String arg1, String arg2) {
//do nothing
}
public void visitSource(String arg0, String arg1) {
//do nothing
}
//AnnotationVisitor
public void visit(String arg0, Object arg1) {
if (Cache.class.getName().equals(annotation) && "region".equals(arg0)) {
cacheRegion = (String) arg1;
}
if (Hydrated.class.getName().equals(annotation) && "factoryMethod".equals(arg0)) {
HydrationItemDescriptor itemDescriptor = new HydrationItemDescriptor();
itemDescriptor.setFactoryMethod((String) arg1);
itemDescriptor.setMutators(retrieveMutators());
cacheMutators.put(fieldName, itemDescriptor);
}
}
public AnnotationVisitor visitAnnotation(String arg0, String arg1) {
return this;
}
public AnnotationVisitor visitArray(String arg0) {
return this;
}
public void visitEnum(String arg0, String arg1, String arg2) {
//do nothing
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_cache_engine_HydrationScanner.java
|
800 |
@Entity
@Table(name = "BLC_QUAL_CRIT_OFFER_XREF")
@Inheritance(strategy=InheritanceType.JOINED)
public class CriteriaOfferXref {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The category id. */
@EmbeddedId
CriteriaOfferXrefPK criteriaOfferXrefPK = new CriteriaOfferXrefPK();
public CriteriaOfferXrefPK getCriteriaOfferXrefPK() {
return criteriaOfferXrefPK;
}
public void setCriteriaOfferXrefPK(final CriteriaOfferXrefPK criteriaOfferXrefPK) {
this.criteriaOfferXrefPK = criteriaOfferXrefPK;
}
public static class CriteriaOfferXrefPK implements Serializable {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 1L;
@ManyToOne(targetEntity = OfferImpl.class, optional=false)
@JoinColumn(name = "OFFER_ID")
protected Offer offer = new OfferImpl();
@ManyToOne(targetEntity = OfferItemCriteriaImpl.class, optional=false)
@JoinColumn(name = "OFFER_ITEM_CRITERIA_ID")
protected OfferItemCriteria offerCriteria = new OfferItemCriteriaImpl();
public Offer getOffer() {
return offer;
}
public void setOffer(Offer offer) {
this.offer = offer;
}
public OfferItemCriteria getOfferCriteria() {
return offerCriteria;
}
public void setOfferCriteria(OfferItemCriteria offerCriteria) {
this.offerCriteria = offerCriteria;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((offer == null) ? 0 : offer.hashCode());
result = prime * result + ((offerCriteria == null) ? 0 : offerCriteria.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CriteriaOfferXrefPK other = (CriteriaOfferXrefPK) obj;
if (offer == null) {
if (other.offer != null)
return false;
} else if (!offer.equals(other.offer))
return false;
if (offerCriteria == null) {
if (other.offerCriteria != null)
return false;
} else if (!offerCriteria.equals(other.offerCriteria))
return false;
return true;
}
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_CriteriaOfferXref.java
|
1,130 |
public abstract class OAbstractSQLMethod implements OSQLMethod {
private final String name;
private final int minparams;
private final int maxparams;
public OAbstractSQLMethod(String name) {
this(name, 0);
}
public OAbstractSQLMethod(String name, int nbparams) {
this(name, nbparams, nbparams);
}
public OAbstractSQLMethod(String name, int minparams, int maxparams) {
this.name = name;
this.minparams = minparams;
this.maxparams = maxparams;
}
@Override
public String getName() {
return name;
}
@Override
public String getSyntax() {
final StringBuilder sb = new StringBuilder("<field>.");
sb.append(getName());
sb.append('(');
for (int i = 0; i < minparams; i++) {
if (i != 0) {
sb.append(", ");
}
sb.append("param");
sb.append(i + 1);
}
if (minparams != maxparams) {
sb.append('[');
for (int i = minparams; i < maxparams; i++) {
if (i != 0) {
sb.append(", ");
}
sb.append("param");
sb.append(i + 1);
}
sb.append(']');
}
sb.append(')');
return sb.toString();
}
@Override
public int getMinParams() {
return minparams;
}
@Override
public int getMaxParams() {
return maxparams;
}
protected Object getParameterValue(final OIdentifiable iRecord, final String iValue) {
if (iValue == null) {
return null;
}
if (iValue.charAt(0) == '\'' || iValue.charAt(0) == '"') {
// GET THE VALUE AS STRING
return iValue.substring(1, iValue.length() - 1);
}
// SEARCH FOR FIELD
return ((ODocument) iRecord.getRecord()).field(iValue);
}
@Override
public int compareTo(OSQLMethod o) {
return this.getName().compareTo(o.getName());
}
@Override
public String toString() {
return name;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OAbstractSQLMethod.java
|
375 |
public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
/**
* Constructs register repository request
*
* @param clusterAdminClient cluster admin client
*/
public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new PutRepositoryRequest());
}
/**
* Constructs register repository request for the repository with a given name
*
* @param clusterAdminClient cluster admin client
* @param name repository name
*/
public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
super((InternalClusterAdminClient) clusterAdminClient, new PutRepositoryRequest(name));
}
/**
* Sets the repository name
*
* @param name repository name
* @return this builder
*/
public PutRepositoryRequestBuilder setName(String name) {
request.name(name);
return this;
}
/**
* Sets the repository type
*
* @param type repository type
* @return this builder
*/
public PutRepositoryRequestBuilder setType(String type) {
request.type(type);
return this;
}
/**
* Sets the repository settings
*
* @param settings repository settings
* @return this builder
*/
public PutRepositoryRequestBuilder setSettings(Settings settings) {
request.settings(settings);
return this;
}
/**
* Sets the repository settings
*
* @param settings repository settings builder
* @return this builder
*/
public PutRepositoryRequestBuilder setSettings(Settings.Builder settings) {
request.settings(settings);
return this;
}
/**
* Sets the repository settings in Json, Yaml or properties format
*
* @param source repository settings
* @return this builder
*/
public PutRepositoryRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets the repository settings
*
* @param source repository settings
* @return this builder
*/
public PutRepositoryRequestBuilder setSettings(Map<String, Object> source) {
request.settings(source);
return this;
}
@Override
protected void doExecute(ActionListener<PutRepositoryResponse> listener) {
((ClusterAdminClient) client).putRepository(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_put_PutRepositoryRequestBuilder.java
|
45 |
public class PackageCompletions {
static final class PackageDescriptorProposal extends CompletionProposal {
PackageDescriptorProposal(int offset, String prefix, String packageName) {
super(offset, prefix, PACKAGE,
"package " + packageName,
"package " + packageName + ";");
}
@Override
protected boolean qualifiedNameIsPath() {
return true;
}
}
static final class PackageProposal extends CompletionProposal {
private final boolean withBody;
private final int len;
private final Package p;
private final String completed;
private final CeylonParseController cpc;
PackageProposal(int offset, String prefix, boolean withBody,
int len, Package p, String completed,
CeylonParseController cpc) {
super(offset, prefix, PACKAGE, completed,
completed.substring(len));
this.withBody = withBody;
this.len = len;
this.p = p;
this.completed = completed;
this.cpc = cpc;
}
@Override
public Point getSelection(IDocument document) {
if (withBody) {
return new Point(offset+completed.length()-prefix.length()-len-5, 3);
}
else {
return new Point(offset+completed.length()-prefix.length()-len, 0);
}
}
@Override
public void apply(IDocument document) {
super.apply(document);
if (withBody &&
EditorsUI.getPreferenceStore()
.getBoolean(LINKED_MODE)) {
final LinkedModeModel linkedModeModel = new LinkedModeModel();
final Point selection = getSelection(document);
List<ICompletionProposal> proposals = new ArrayList<ICompletionProposal>();
for (final Declaration d: p.getMembers()) {
if (Util.isResolvable(d) && d.isShared() &&
!isOverloadedVersion(d)) {
proposals.add(new ICompletionProposal() {
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public Image getImage() {
return getImageForDeclaration(d);
}
@Override
public String getDisplayString() {
return d.getName();
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
try {
document.replace(selection.x, selection.y,
d.getName());
}
catch (BadLocationException e) {
e.printStackTrace();
}
linkedModeModel.exit(ILinkedModeListener.UPDATE_CARET);
}
});
}
}
if (!proposals.isEmpty()) {
ProposalPosition linkedPosition =
new ProposalPosition(document, selection.x, selection.y, 0,
proposals.toArray(NO_COMPLETIONS));
try {
LinkedMode.addLinkedPosition(linkedModeModel, linkedPosition);
LinkedMode.installLinkedMode((CeylonEditor) EditorUtil.getCurrentEditor(),
document, linkedModeModel, this, new LinkedMode.NullExitPolicy(),
-1, 0);
}
catch (BadLocationException ble) {
ble.printStackTrace();
}
}
}
}
@Override
public String getAdditionalProposalInfo() {
return getDocumentationFor(cpc, p);
}
@Override
protected boolean qualifiedNameIsPath() {
return true;
}
}
static void addPackageCompletions(CeylonParseController cpc,
int offset, String prefix, Tree.ImportPath path, Node node,
List<ICompletionProposal> result, boolean withBody) {
String fullPath = fullPath(offset, prefix, path);
addPackageCompletions(offset, prefix, node, result, fullPath.length(),
fullPath+prefix, cpc, withBody);
}
private static void addPackageCompletions(final int offset, final String prefix,
Node node, List<ICompletionProposal> result, final int len, String pfp,
final CeylonParseController cpc, final boolean withBody) {
//TODO: someday it would be nice to propose from all packages
// and auto-add the module dependency!
/*TypeChecker tc = CeylonBuilder.getProjectTypeChecker(cpc.getProject().getRawProject());
if (tc!=null) {
for (Module m: tc.getContext().getModules().getListOfModules()) {*/
//Set<Package> packages = new HashSet<Package>();
Unit unit = node.getUnit();
if (unit!=null) { //a null unit can occur if we have not finished parsing the file
Module module = unit.getPackage().getModule();
for (final Package p: module.getAllPackages()) {
//if (!packages.contains(p)) {
//packages.add(p);
//if ( p.getModule().equals(module) || p.isShared() ) {
final String pkg = escapePackageName(p);
if (!pkg.isEmpty() && pkg.startsWith(pfp)) {
boolean already = false;
if (!pfp.equals(pkg)) {
//don't add already imported packages, unless
//it is an exact match to the typed path
for (ImportList il: node.getUnit().getImportLists()) {
if (il.getImportedScope()==p) {
already = true;
break;
}
}
}
if (!already) {
result.add(new PackageProposal(offset, prefix, withBody,
len, p, pkg + (withBody ? " { ... }" : ""), cpc));
}
}
//}
}
}
}
static void addPackageDescriptorCompletion(CeylonParseController cpc,
int offset, String prefix, List<ICompletionProposal> result) {
if (!"package".startsWith(prefix)) return;
IFile file = cpc.getProject().getFile(cpc.getPath());
String packageName = getPackageName(file);
if (packageName!=null) {
result.add(new PackageDescriptorProposal(offset, prefix, packageName));
}
}
static void addCurrentPackageNameCompletion(CeylonParseController cpc,
int offset, String prefix, List<ICompletionProposal> result) {
IFile file = cpc.getProject().getFile(cpc.getPath());
String moduleName = getPackageName(file);
if (moduleName!=null) {
result.add(new CompletionProposal(offset, prefix,
isModuleDescriptor(cpc) ? MODULE : PACKAGE,
moduleName, moduleName));
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_PackageCompletions.java
|
1,476 |
public class RoutingNodes implements Iterable<RoutingNode> {
private final MetaData metaData;
private final ClusterBlocks blocks;
private final RoutingTable routingTable;
private final Map<String, RoutingNode> nodesToShards = newHashMap();
private final UnassignedShards unassignedShards = new UnassignedShards();
private final List<MutableShardRouting> ignoredUnassignedShards = newArrayList();
private final Map<ShardId, List<MutableShardRouting>> assignedShards = newHashMap();
private int inactivePrimaryCount = 0;
private int inactiveShardCount = 0;
private int relocatingShards = 0;
private Set<ShardId> clearPostAllocationFlag;
private final Map<String, ObjectIntOpenHashMap<String>> nodesPerAttributeNames = new HashMap<String, ObjectIntOpenHashMap<String>>();
public RoutingNodes(ClusterState clusterState) {
this.metaData = clusterState.metaData();
this.blocks = clusterState.blocks();
this.routingTable = clusterState.routingTable();
Map<String, List<MutableShardRouting>> nodesToShards = newHashMap();
// fill in the nodeToShards with the "live" nodes
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
nodesToShards.put(cursor.value.id(), new ArrayList<MutableShardRouting>());
}
// fill in the inverse of node -> shards allocated
// also fill replicaSet information
for (IndexRoutingTable indexRoutingTable : routingTable.indicesRouting().values()) {
for (IndexShardRoutingTable indexShard : indexRoutingTable) {
for (ShardRouting shard : indexShard) {
// to get all the shards belonging to an index, including the replicas,
// we define a replica set and keep track of it. A replica set is identified
// by the ShardId, as this is common for primary and replicas.
// A replica Set might have one (and not more) replicas with the state of RELOCATING.
if (shard.assignedToNode()) {
List<MutableShardRouting> entries = nodesToShards.get(shard.currentNodeId());
if (entries == null) {
entries = newArrayList();
nodesToShards.put(shard.currentNodeId(), entries);
}
MutableShardRouting sr = new MutableShardRouting(shard);
entries.add(sr);
assignedShardsAdd(sr);
if (shard.relocating()) {
entries = nodesToShards.get(shard.relocatingNodeId());
relocatingShards++;
if (entries == null) {
entries = newArrayList();
nodesToShards.put(shard.relocatingNodeId(), entries);
}
// add the counterpart shard with relocatingNodeId reflecting the source from which
// it's relocating from.
sr = new MutableShardRouting(shard.index(), shard.id(), shard.relocatingNodeId(),
shard.currentNodeId(), shard.primary(), ShardRoutingState.INITIALIZING, shard.version());
entries.add(sr);
assignedShardsAdd(sr);
} else if (!shard.active()) { // shards that are initializing without being relocated
if (shard.primary()) {
inactivePrimaryCount++;
}
inactiveShardCount++;
}
} else {
MutableShardRouting sr = new MutableShardRouting(shard);
assignedShardsAdd(sr);
unassignedShards.add(sr);
}
}
}
}
for (Map.Entry<String, List<MutableShardRouting>> entry : nodesToShards.entrySet()) {
String nodeId = entry.getKey();
this.nodesToShards.put(nodeId, new RoutingNode(nodeId, clusterState.nodes().get(nodeId), entry.getValue()));
}
}
@Override
public Iterator<RoutingNode> iterator() {
return Iterators.unmodifiableIterator(nodesToShards.values().iterator());
}
public RoutingTable routingTable() {
return routingTable;
}
public RoutingTable getRoutingTable() {
return routingTable();
}
public MetaData metaData() {
return this.metaData;
}
public MetaData getMetaData() {
return metaData();
}
public ClusterBlocks blocks() {
return this.blocks;
}
public ClusterBlocks getBlocks() {
return this.blocks;
}
public int requiredAverageNumberOfShardsPerNode() {
int totalNumberOfShards = 0;
// we need to recompute to take closed shards into account
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
IndexMetaData indexMetaData = cursor.value;
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
totalNumberOfShards += indexMetaData.totalNumberOfShards();
}
}
return totalNumberOfShards / nodesToShards.size();
}
public boolean hasUnassigned() {
return !unassignedShards.isEmpty();
}
public List<MutableShardRouting> ignoredUnassigned() {
return this.ignoredUnassignedShards;
}
public UnassignedShards unassigned() {
return this.unassignedShards;
}
public RoutingNodesIterator nodes() {
return new RoutingNodesIterator(nodesToShards.values().iterator());
}
/**
* Clears the post allocation flag for the provided shard id. NOTE: this should be used cautiously
* since it will lead to data loss of the primary shard is not allocated, as it will allocate
* the primary shard on a node and *not* expect it to have an existing valid index there.
*/
public void addClearPostAllocationFlag(ShardId shardId) {
if (clearPostAllocationFlag == null) {
clearPostAllocationFlag = Sets.newHashSet();
}
clearPostAllocationFlag.add(shardId);
}
public Iterable<ShardId> getShardsToClearPostAllocationFlag() {
if (clearPostAllocationFlag == null) {
return ImmutableSet.of();
}
return clearPostAllocationFlag;
}
public RoutingNode node(String nodeId) {
return nodesToShards.get(nodeId);
}
public ObjectIntOpenHashMap<String> nodesPerAttributesCounts(String attributeName) {
ObjectIntOpenHashMap<String> nodesPerAttributesCounts = nodesPerAttributeNames.get(attributeName);
if (nodesPerAttributesCounts != null) {
return nodesPerAttributesCounts;
}
nodesPerAttributesCounts = new ObjectIntOpenHashMap<String>();
for (RoutingNode routingNode : this) {
String attrValue = routingNode.node().attributes().get(attributeName);
nodesPerAttributesCounts.addTo(attrValue, 1);
}
nodesPerAttributeNames.put(attributeName, nodesPerAttributesCounts);
return nodesPerAttributesCounts;
}
public boolean hasUnassignedPrimaries() {
return unassignedShards.numPrimaries() > 0;
}
public boolean hasUnassignedShards() {
return !unassignedShards.isEmpty();
}
public boolean hasInactivePrimaries() {
return inactivePrimaryCount > 0;
}
public boolean hasInactiveShards() {
return inactiveShardCount > 0;
}
public int getRelocatingShardCount() {
return relocatingShards;
}
/**
* Returns the active primary shard for the given ShardRouting or <code>null</code> if
* no primary is found or the primary is not active.
*/
public MutableShardRouting activePrimary(ShardRouting shard) {
assert !shard.primary();
for (MutableShardRouting shardRouting : assignedShards(shard.shardId())) {
if (shardRouting.primary() && shardRouting.active()) {
return shardRouting;
}
}
return null;
}
/**
* Returns one active replica shard for the given ShardRouting shard ID or <code>null</code> if
* no active replica is found.
*/
public MutableShardRouting activeReplica(ShardRouting shard) {
for (MutableShardRouting shardRouting : assignedShards(shard.shardId())) {
if (!shardRouting.primary() && shardRouting.active()) {
return shardRouting;
}
}
return null;
}
/**
* Returns all shards that are not in the state UNASSIGNED with the same shard
* ID as the given shard.
*/
public Iterable<MutableShardRouting> assignedShards(ShardRouting shard) {
return assignedShards(shard.shardId());
}
/**
* Returns <code>true</code> iff all replicas are active for the given shard routing. Otherwise <code>false</code>
*/
public boolean allReplicasActive(ShardRouting shardRouting) {
final List<MutableShardRouting> shards = assignedShards(shardRouting.shardId());
if (shards.isEmpty() || shards.size() < this.routingTable.index(shardRouting.index()).shard(shardRouting.id()).size()) {
return false; // if we are empty nothing is active if we have less than total at least one is unassigned
}
for (MutableShardRouting shard : shards) {
if (!shard.active()) {
return false;
}
}
return true;
}
public List<MutableShardRouting> shards(Predicate<MutableShardRouting> predicate) {
List<MutableShardRouting> shards = newArrayList();
for (RoutingNode routingNode : this) {
for (MutableShardRouting shardRouting : routingNode) {
if (predicate.apply(shardRouting)) {
shards.add(shardRouting);
}
}
}
return shards;
}
public List<MutableShardRouting> shardsWithState(ShardRoutingState... state) {
// TODO these are used on tests only - move into utils class
List<MutableShardRouting> shards = newArrayList();
for (RoutingNode routingNode : this) {
shards.addAll(routingNode.shardsWithState(state));
}
return shards;
}
public List<MutableShardRouting> shardsWithState(String index, ShardRoutingState... state) {
// TODO these are used on tests only - move into utils class
List<MutableShardRouting> shards = newArrayList();
for (RoutingNode routingNode : this) {
shards.addAll(routingNode.shardsWithState(index, state));
}
return shards;
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder("routing_nodes:\n");
for (RoutingNode routingNode : this) {
sb.append(routingNode.prettyPrint());
}
sb.append("---- unassigned\n");
for (MutableShardRouting shardEntry : unassignedShards) {
sb.append("--------").append(shardEntry.shortSummary()).append('\n');
}
return sb.toString();
}
/**
* Assign a shard to a node. This will increment the inactiveShardCount counter
* and the inactivePrimaryCount counter if the shard is the primary.
* In case the shard is already assigned and started, it will be marked as
* relocating, which is accounted for, too, so the number of concurrent relocations
* can be retrieved easily.
* This method can be called several times for the same shard, only the first time
* will change the state.
*
* INITIALIZING => INITIALIZING
* UNASSIGNED => INITIALIZING
* STARTED => RELOCATING
* RELOCATING => RELOCATING
*
* @param shard the shard to be assigned
* @param nodeId the nodeId this shard should initialize on or relocate from
*/
public void assign(MutableShardRouting shard, String nodeId) {
// state will not change if the shard is already initializing.
ShardRoutingState oldState = shard.state();
shard.assignToNode(nodeId);
node(nodeId).add(shard);
if (oldState == ShardRoutingState.UNASSIGNED) {
inactiveShardCount++;
if (shard.primary()) {
inactivePrimaryCount++;
}
}
if (shard.state() == ShardRoutingState.RELOCATING) {
relocatingShards++;
}
assignedShardsAdd(shard);
}
/**
* Relocate a shard to another node.
*/
public void relocate(MutableShardRouting shard, String nodeId) {
relocatingShards++;
shard.relocate(nodeId);
}
/**
* Mark a shard as started and adjusts internal statistics.
*/
public void started(MutableShardRouting shard) {
if (!shard.active() && shard.relocatingNodeId() == null) {
inactiveShardCount--;
if (shard.primary()) {
inactivePrimaryCount--;
}
} else if (shard.relocating()) {
relocatingShards--;
}
assert !shard.started();
shard.moveToStarted();
}
/**
* Cancels a relocation of a shard that shard must relocating.
*/
public void cancelRelocation(MutableShardRouting shard) {
relocatingShards--;
shard.cancelRelocation();
}
/**
* swaps the status of a shard, making replicas primary and vice versa.
*
* @param shards the shard to have its primary status swapped.
*/
public void swapPrimaryFlag(MutableShardRouting... shards) {
for (MutableShardRouting shard : shards) {
if (shard.primary()) {
shard.moveFromPrimary();
if (shard.unassigned()) {
unassignedShards.primaries--;
}
} else {
shard.moveToPrimary();
if (shard.unassigned()) {
unassignedShards.primaries++;
}
}
}
}
private static final List<MutableShardRouting> EMPTY = Collections.emptyList();
private List<MutableShardRouting> assignedShards(ShardId shardId) {
final List<MutableShardRouting> replicaSet = assignedShards.get(shardId);
return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet);
}
/**
* Cancels the give shard from the Routing nodes internal statistics and cancels
* the relocation if the shard is relocating.
* @param shard
*/
private void remove(MutableShardRouting shard) {
if (!shard.active() && shard.relocatingNodeId() == null) {
inactiveShardCount--;
assert inactiveShardCount >= 0;
if (shard.primary()) {
inactivePrimaryCount--;
}
} else if (shard.relocating()) {
cancelRelocation(shard);
}
assignedShardsRemove(shard);
}
private void assignedShardsAdd(MutableShardRouting shard) {
if (shard.unassigned()) {
// no unassigned
return;
}
List<MutableShardRouting> shards = assignedShards.get(shard.shardId());
if (shards == null) {
shards = Lists.newArrayList();
assignedShards.put(shard.shardId(), shards);
}
assert assertInstanceNotInList(shard, shards);
shards.add(shard);
}
private boolean assertInstanceNotInList(MutableShardRouting shard, List<MutableShardRouting> shards) {
for (MutableShardRouting s : shards) {
assert s != shard;
}
return true;
}
private void assignedShardsRemove(MutableShardRouting shard) {
final List<MutableShardRouting> replicaSet = assignedShards.get(shard.shardId());
if (replicaSet != null) {
final Iterator<MutableShardRouting> iterator = replicaSet.iterator();
while(iterator.hasNext()) {
// yes we check identity here
if (shard == iterator.next()) {
iterator.remove();
return;
}
}
assert false : "Illegal state";
}
}
public boolean isKnown(DiscoveryNode node) {
return nodesToShards.containsKey(node.getId());
}
public void addNode(DiscoveryNode node) {
RoutingNode routingNode = new RoutingNode(node.id(), node);
nodesToShards.put(routingNode.nodeId(), routingNode);
}
public RoutingNodeIterator routingNodeIter(String nodeId) {
final RoutingNode routingNode = nodesToShards.get(nodeId);
if (routingNode == null) {
return null;
}
assert assertShardStats(this);
return new RoutingNodeIterator(routingNode);
}
public RoutingNode[] toArray() {
return nodesToShards.values().toArray(new RoutingNode[nodesToShards.size()]);
}
public final static class UnassignedShards implements Iterable<MutableShardRouting> {
private final List<MutableShardRouting> unassigned;
private int primaries = 0;
private long transactionId = 0;
private final UnassignedShards source;
private final long sourceTransactionId;
public UnassignedShards(UnassignedShards other) {
source = other;
sourceTransactionId = other.transactionId;
unassigned = new ArrayList<MutableShardRouting>(other.unassigned);
primaries = other.primaries;
}
public UnassignedShards() {
unassigned = new ArrayList<MutableShardRouting>();
source = null;
sourceTransactionId = -1;
}
public void add(MutableShardRouting mutableShardRouting) {
if(mutableShardRouting.primary()) {
primaries++;
}
unassigned.add(mutableShardRouting);
transactionId++;
}
public void addAll(Collection<MutableShardRouting> mutableShardRoutings) {
for (MutableShardRouting r : mutableShardRoutings) {
add(r);
}
}
public int size() {
return unassigned.size();
}
public int numPrimaries() {
return primaries;
}
@Override
public Iterator<MutableShardRouting> iterator() {
final Iterator<MutableShardRouting> iterator = unassigned.iterator();
return new Iterator<MutableShardRouting>() {
private MutableShardRouting current;
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public MutableShardRouting next() {
return current = iterator.next();
}
@Override
public void remove() {
iterator.remove();
if (current.primary()) {
primaries--;
}
transactionId++;
}
};
}
public boolean isEmpty() {
return unassigned.isEmpty();
}
public void shuffle() {
Collections.shuffle(unassigned);
}
public void clear() {
transactionId++;
unassigned.clear();
primaries = 0;
}
public void transactionEnd(UnassignedShards shards) {
assert shards.source == this && shards.sourceTransactionId == transactionId :
"Expected ID: " + shards.sourceTransactionId + " actual: " + transactionId + " Expected Source: " + shards.source + " actual: " + this;
transactionId++;
this.unassigned.clear();
this.unassigned.addAll(shards.unassigned);
this.primaries = shards.primaries;
}
public UnassignedShards transactionBegin() {
return new UnassignedShards(this);
}
public MutableShardRouting[] drain() {
MutableShardRouting[] mutableShardRoutings = unassigned.toArray(new MutableShardRouting[unassigned.size()]);
unassigned.clear();
primaries = 0;
transactionId++;
return mutableShardRoutings;
}
}
/**
* Calculates RoutingNodes statistics by iterating over all {@link MutableShardRouting}s
* in the cluster to ensure the book-keeping is correct.
* For performance reasons, this should only be called from asserts
*
* @return this method always returns <code>true</code> or throws an assertion error. If assertion are not enabled
* this method does nothing.
*/
public static boolean assertShardStats(RoutingNodes routingNodes) {
boolean run = false;
assert (run = true); // only run if assertions are enabled!
if (!run) {
return true;
}
int unassignedPrimaryCount = 0;
int inactivePrimaryCount = 0;
int inactiveShardCount = 0;
int relocating = 0;
final Set<ShardId> seenShards = newHashSet();
Map<String, Integer> indicesAndShards = new HashMap<String, Integer>();
for (RoutingNode node : routingNodes) {
for (MutableShardRouting shard : node) {
if (!shard.active() && shard.relocatingNodeId() == null) {
if (!shard.relocating()) {
inactiveShardCount++;
if (shard.primary()) {
inactivePrimaryCount++;
}
}
}
if (shard.relocating()) {
relocating++;
}
seenShards.add(shard.shardId());
Integer i = indicesAndShards.get(shard.index());
if (i == null) {
i = shard.id();
}
indicesAndShards.put(shard.index(), Math.max(i, shard.id()));
}
}
// Assert that the active shard routing are identical.
Set<Map.Entry<String, Integer>> entries = indicesAndShards.entrySet();
final List<MutableShardRouting> shards = newArrayList();
for (Map.Entry<String, Integer> e : entries) {
String index = e.getKey();
for (int i = 0; i < e.getValue(); i++) {
for (RoutingNode routingNode : routingNodes) {
for (MutableShardRouting shardRouting : routingNode) {
if (shardRouting.index().equals(index) && shardRouting.id() == i) {
shards.add(shardRouting);
}
}
}
List<MutableShardRouting> mutableShardRoutings = routingNodes.assignedShards(new ShardId(index, i));
assert mutableShardRoutings.size() == shards.size();
for (MutableShardRouting r : mutableShardRoutings) {
assert shards.contains(r);
shards.remove(r);
}
assert shards.isEmpty();
}
}
for (MutableShardRouting shard : routingNodes.unassigned()) {
if (shard.primary()) {
unassignedPrimaryCount++;
}
seenShards.add(shard.shardId());
}
assert unassignedPrimaryCount == routingNodes.unassignedShards.numPrimaries() :
"Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().numPrimaries() + "]";
assert inactivePrimaryCount == routingNodes.inactivePrimaryCount :
"Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + routingNodes.inactivePrimaryCount + "]";
assert inactiveShardCount == routingNodes.inactiveShardCount :
"Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + routingNodes.inactiveShardCount + "]";
assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]";
return true;
}
public class RoutingNodesIterator implements Iterator<RoutingNode>, Iterable<MutableShardRouting> {
private RoutingNode current;
private final Iterator<RoutingNode> delegate;
public RoutingNodesIterator(Iterator<RoutingNode> iterator) {
delegate = iterator;
}
@Override
public boolean hasNext() {
return delegate.hasNext();
}
@Override
public RoutingNode next() {
return current = delegate.next();
}
public RoutingNodeIterator nodeShards() {
return new RoutingNodeIterator(current);
}
@Override
public void remove() {
delegate.remove();
}
@Override
public Iterator<MutableShardRouting> iterator() {
return nodeShards();
}
}
public final class RoutingNodeIterator implements Iterator<MutableShardRouting>, Iterable<MutableShardRouting> {
private final RoutingNode iterable;
private MutableShardRouting shard;
private final Iterator<MutableShardRouting> delegate;
public RoutingNodeIterator(RoutingNode iterable) {
this.delegate = iterable.mutableIterator();
this.iterable = iterable;
}
@Override
public boolean hasNext() {
return delegate.hasNext();
}
@Override
public MutableShardRouting next() {
return shard = delegate.next();
}
public void remove() {
delegate.remove();
RoutingNodes.this.remove(shard);
}
@Override
public Iterator<MutableShardRouting> iterator() {
return iterable.iterator();
}
public void moveToUnassigned() {
iterator().remove();
unassigned().add(new MutableShardRouting(shard.index(), shard.id(),
null, shard.primary(), ShardRoutingState.UNASSIGNED, shard.version() + 1));
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_RoutingNodes.java
|
494 |
public class CloseIndexAction extends IndicesAction<CloseIndexRequest, CloseIndexResponse, CloseIndexRequestBuilder> {
public static final CloseIndexAction INSTANCE = new CloseIndexAction();
public static final String NAME = "indices/close";
private CloseIndexAction() {
super(NAME);
}
@Override
public CloseIndexResponse newResponse() {
return new CloseIndexResponse();
}
@Override
public CloseIndexRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new CloseIndexRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_close_CloseIndexAction.java
|
435 |
static final class Fields {
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString PRIMARIES = new XContentBuilderString("primaries");
static final XContentBuilderString REPLICATION = new XContentBuilderString("replication");
static final XContentBuilderString MIN = new XContentBuilderString("min");
static final XContentBuilderString MAX = new XContentBuilderString("max");
static final XContentBuilderString AVG = new XContentBuilderString("avg");
static final XContentBuilderString INDEX = new XContentBuilderString("index");
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsIndices.java
|
2,618 |
public final class UnsafeHelper {
public static final Unsafe UNSAFE;
public static final boolean UNSAFE_AVAILABLE;
public static final long BYTE_ARRAY_BASE_OFFSET;
public static final long SHORT_ARRAY_BASE_OFFSET;
public static final long CHAR_ARRAY_BASE_OFFSET;
public static final long INT_ARRAY_BASE_OFFSET;
public static final long FLOAT_ARRAY_BASE_OFFSET;
public static final long LONG_ARRAY_BASE_OFFSET;
public static final long DOUBLE_ARRAY_BASE_OFFSET;
public static final int BYTE_ARRAY_INDEX_SCALE;
public static final int SHORT_ARRAY_INDEX_SCALE;
public static final int CHAR_ARRAY_INDEX_SCALE;
public static final int INT_ARRAY_INDEX_SCALE;
public static final int FLOAT_ARRAY_INDEX_SCALE;
public static final int LONG_ARRAY_INDEX_SCALE;
public static final int DOUBLE_ARRAY_INDEX_SCALE;
static {
try {
Unsafe unsafe = findUnsafe();
BYTE_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(byte[].class);
SHORT_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(short[].class);
CHAR_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(char[].class);
INT_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(int[].class);
FLOAT_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(float[].class);
LONG_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(long[].class);
DOUBLE_ARRAY_BASE_OFFSET = unsafe.arrayBaseOffset(double[].class);
BYTE_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(byte[].class);
SHORT_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(short[].class);
CHAR_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(char[].class);
INT_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(int[].class);
FLOAT_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(float[].class);
LONG_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(long[].class);
DOUBLE_ARRAY_INDEX_SCALE = unsafe.arrayIndexScale(double[].class);
// test if unsafe has required methods...
byte[] buffer = new byte[8];
unsafe.putChar(buffer, BYTE_ARRAY_BASE_OFFSET, '0');
unsafe.putShort(buffer, BYTE_ARRAY_BASE_OFFSET, (short) 1);
unsafe.putInt(buffer, BYTE_ARRAY_BASE_OFFSET, 2);
unsafe.putFloat(buffer, BYTE_ARRAY_BASE_OFFSET, 3f);
unsafe.putLong(buffer, BYTE_ARRAY_BASE_OFFSET, 4L);
unsafe.putDouble(buffer, BYTE_ARRAY_BASE_OFFSET, 5d);
unsafe.copyMemory(new byte[8], BYTE_ARRAY_BASE_OFFSET, buffer, BYTE_ARRAY_BASE_OFFSET, buffer.length);
UNSAFE = unsafe;
UNSAFE_AVAILABLE = UNSAFE != null;
} catch (Throwable e) {
throw new HazelcastException(e);
}
}
private UnsafeHelper() {
}
private static Unsafe findUnsafe() {
try {
return Unsafe.getUnsafe();
} catch (SecurityException se) {
return AccessController.doPrivileged(new PrivilegedAction<Unsafe>() {
@Override
public Unsafe run() {
try {
Class<Unsafe> type = Unsafe.class;
try {
Field field = type.getDeclaredField("theUnsafe");
field.setAccessible(true);
return type.cast(field.get(type));
} catch (Exception e) {
for (Field field : type.getDeclaredFields()) {
if (type.isAssignableFrom(field.getType())) {
field.setAccessible(true);
return type.cast(field.get(type));
}
}
}
} catch (Exception e) {
throw new RuntimeException("Unsafe unavailable", e);
}
throw new RuntimeException("Unsafe unavailable");
}
});
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_UnsafeHelper.java
|
2,733 |
public final class SerializationServiceImpl implements SerializationService {
private static final int CONSTANT_SERIALIZERS_SIZE = SerializationConstants.CONSTANT_SERIALIZERS_LENGTH;
private static final PartitioningStrategy EMPTY_PARTITIONING_STRATEGY = new PartitioningStrategy() {
public Object getPartitionKey(Object key) {
return null;
}
};
private final IdentityHashMap<Class, SerializerAdapter> constantTypesMap
= new IdentityHashMap<Class, SerializerAdapter>(CONSTANT_SERIALIZERS_SIZE);
private final SerializerAdapter[] constantTypeIds = new SerializerAdapter[CONSTANT_SERIALIZERS_SIZE];
private final ConcurrentMap<Class, SerializerAdapter> typeMap = new ConcurrentHashMap<Class, SerializerAdapter>();
private final ConcurrentMap<Integer, SerializerAdapter> idMap = new ConcurrentHashMap<Integer, SerializerAdapter>();
private final AtomicReference<SerializerAdapter> global = new AtomicReference<SerializerAdapter>();
private final InputOutputFactory inputOutputFactory;
private final Queue<BufferObjectDataOutput> outputPool = new ConcurrentLinkedQueue<BufferObjectDataOutput>();
private final PortableSerializer portableSerializer;
private final SerializerAdapter dataSerializerAdapter;
private final SerializerAdapter portableSerializerAdapter;
private final ClassLoader classLoader;
private final ManagedContext managedContext;
private final SerializationContextImpl serializationContext;
private final PartitioningStrategy globalPartitioningStrategy;
private final int outputBufferSize;
private volatile boolean active = true;
SerializationServiceImpl(InputOutputFactory inputOutputFactory, int version, ClassLoader classLoader,
Map<Integer, ? extends DataSerializableFactory> dataSerializableFactories,
Map<Integer, ? extends PortableFactory> portableFactories,
Collection<ClassDefinition> classDefinitions, boolean checkClassDefErrors,
ManagedContext managedContext, PartitioningStrategy partitionStrategy,
int initialOutputBufferSize,
boolean enableCompression, boolean enableSharedObject) {
this.inputOutputFactory = inputOutputFactory;
this.classLoader = classLoader;
this.managedContext = managedContext;
this.globalPartitioningStrategy = partitionStrategy;
this.outputBufferSize = initialOutputBufferSize;
PortableHookLoader loader = new PortableHookLoader(portableFactories, classLoader);
serializationContext = new SerializationContextImpl(this, loader.getFactories().keySet(), version);
for (ClassDefinition cd : loader.getDefinitions()) {
serializationContext.registerClassDefinition(cd);
}
dataSerializerAdapter = new StreamSerializerAdapter(this, new DataSerializer(dataSerializableFactories, classLoader));
portableSerializer = new PortableSerializer(serializationContext, loader.getFactories());
portableSerializerAdapter = new StreamSerializerAdapter(this, portableSerializer);
registerConstant(DataSerializable.class, dataSerializerAdapter);
registerConstant(Portable.class, portableSerializerAdapter);
registerConstant(Byte.class, new ByteSerializer());
registerConstant(Boolean.class, new BooleanSerializer());
registerConstant(Character.class, new CharSerializer());
registerConstant(Short.class, new ShortSerializer());
registerConstant(Integer.class, new IntegerSerializer());
registerConstant(Long.class, new LongSerializer());
registerConstant(Float.class, new FloatSerializer());
registerConstant(Double.class, new DoubleSerializer());
registerConstant(byte[].class, new TheByteArraySerializer());
registerConstant(char[].class, new CharArraySerializer());
registerConstant(short[].class, new ShortArraySerializer());
registerConstant(int[].class, new IntegerArraySerializer());
registerConstant(long[].class, new LongArraySerializer());
registerConstant(float[].class, new FloatArraySerializer());
registerConstant(double[].class, new DoubleArraySerializer());
registerConstant(String.class, new StringSerializer());
safeRegister(Date.class, new DateSerializer());
safeRegister(BigInteger.class, new BigIntegerSerializer());
safeRegister(BigDecimal.class, new BigDecimalSerializer());
safeRegister(Externalizable.class, new Externalizer(enableCompression));
safeRegister(Serializable.class, new ObjectSerializer(enableSharedObject, enableCompression));
safeRegister(Class.class, new ClassSerializer());
safeRegister(Enum.class, new EnumSerializer());
registerClassDefinitions(classDefinitions, checkClassDefErrors);
}
private void registerClassDefinitions(final Collection<ClassDefinition> classDefinitions, boolean checkClassDefErrors) {
final Map<Integer, ClassDefinition> classDefMap = new HashMap<Integer, ClassDefinition>(classDefinitions.size());
for (ClassDefinition cd : classDefinitions) {
if (classDefMap.containsKey(cd.getClassId())) {
throw new HazelcastSerializationException("Duplicate registration found for class-id[" + cd.getClassId() + "]!");
}
classDefMap.put(cd.getClassId(), cd);
}
for (ClassDefinition classDefinition : classDefinitions) {
registerClassDefinition(classDefinition, classDefMap, checkClassDefErrors);
}
}
private void registerClassDefinition(ClassDefinition cd, Map<Integer,
ClassDefinition> classDefMap, boolean checkClassDefErrors) {
for (int i = 0; i < cd.getFieldCount(); i++) {
FieldDefinition fd = cd.get(i);
if (fd.getType() == FieldType.PORTABLE || fd.getType() == FieldType.PORTABLE_ARRAY) {
int classId = fd.getClassId();
ClassDefinition nestedCd = classDefMap.get(classId);
if (nestedCd != null) {
((ClassDefinitionImpl) cd).addClassDef(nestedCd);
registerClassDefinition(nestedCd, classDefMap, checkClassDefErrors);
serializationContext.registerClassDefinition(nestedCd);
} else if (checkClassDefErrors) {
throw new HazelcastSerializationException("Could not find registered ClassDefinition for class-id: "
+ classId);
}
}
}
serializationContext.registerClassDefinition(cd);
}
public Data toData(final Object obj) {
return toData(obj, globalPartitioningStrategy);
}
@SuppressWarnings("unchecked")
public Data toData(Object obj, PartitioningStrategy strategy) {
if (obj == null) {
return null;
}
if (obj instanceof Data) {
return (Data) obj;
}
try {
final SerializerAdapter serializer = serializerFor(obj.getClass());
if (serializer == null) {
if (active) {
throw new HazelcastSerializationException("There is no suitable serializer for " + obj.getClass());
}
throw new HazelcastInstanceNotActiveException();
}
final byte[] bytes = serializer.write(obj);
final Data data = new Data(serializer.getTypeId(), bytes);
if (obj instanceof Portable) {
final Portable portable = (Portable) obj;
data.classDefinition = serializationContext.lookup(portable.getFactoryId(), portable.getClassId());
}
if (strategy == null) {
strategy = globalPartitioningStrategy;
}
if (strategy != null) {
Object pk = strategy.getPartitionKey(obj);
if (pk != null && pk != obj) {
final Data partitionKey = toData(pk, EMPTY_PARTITIONING_STRATEGY);
data.partitionHash = (partitionKey == null) ? -1 : partitionKey.getPartitionHash();
}
}
return data;
} catch (Throwable e) {
handleException(e);
}
return null;
}
public <T> T toObject(final Object object) {
if (!(object instanceof Data)) {
return (T) object;
}
Data data = (Data) object;
if (data.bufferSize() == 0 && data.isDataSerializable()) {
return null;
}
try {
final int typeId = data.type;
final SerializerAdapter serializer = serializerFor(typeId);
if (serializer == null) {
if (active) {
throw new HazelcastSerializationException("There is no suitable de-serializer for type " + typeId);
}
throw new HazelcastInstanceNotActiveException();
}
if (typeId == SerializationConstants.CONSTANT_TYPE_PORTABLE) {
serializationContext.registerClassDefinition(data.classDefinition);
}
Object obj = serializer.read(data);
if (managedContext != null) {
obj = managedContext.initialize(obj);
}
return (T) obj;
} catch (Throwable e) {
handleException(e);
}
return null;
}
public void writeObject(final ObjectDataOutput out, final Object obj) {
final boolean isNull = obj == null;
try {
out.writeBoolean(isNull);
if (isNull) {
return;
}
final SerializerAdapter serializer = serializerFor(obj.getClass());
if (serializer == null) {
if (active) {
throw new HazelcastSerializationException("There is no suitable serializer for " + obj.getClass());
}
throw new HazelcastInstanceNotActiveException();
}
out.writeInt(serializer.getTypeId());
if (obj instanceof Portable) {
final Portable portable = (Portable) obj;
ClassDefinition classDefinition = serializationContext.lookupOrRegisterClassDefinition(portable);
classDefinition.writeData(out);
}
serializer.write(out, obj);
} catch (Throwable e) {
handleException(e);
}
}
public Object readObject(final ObjectDataInput in) {
try {
final boolean isNull = in.readBoolean();
if (isNull) {
return null;
}
final int typeId = in.readInt();
final SerializerAdapter serializer = serializerFor(typeId);
if (serializer == null) {
if (active) {
throw new HazelcastSerializationException("There is no suitable de-serializer for type " + typeId);
}
throw new HazelcastInstanceNotActiveException();
}
if (typeId == SerializationConstants.CONSTANT_TYPE_PORTABLE && in instanceof PortableContextAwareInputStream) {
ClassDefinition classDefinition = new ClassDefinitionImpl();
classDefinition.readData(in);
classDefinition = serializationContext.registerClassDefinition(classDefinition);
PortableContextAwareInputStream ctxIn = (PortableContextAwareInputStream) in;
ctxIn.setClassDefinition(classDefinition);
}
Object obj = serializer.read(in);
if (managedContext != null) {
obj = managedContext.initialize(obj);
}
return obj;
} catch (Throwable e) {
handleException(e);
}
return null;
}
private void handleException(Throwable e) {
if (e instanceof OutOfMemoryError) {
OutOfMemoryErrorDispatcher.onOutOfMemory((OutOfMemoryError) e);
return;
}
if (e instanceof HazelcastSerializationException) {
throw (HazelcastSerializationException) e;
}
throw new HazelcastSerializationException(e);
}
BufferObjectDataOutput pop() {
BufferObjectDataOutput out = outputPool.poll();
if (out == null) {
out = inputOutputFactory.createOutput(outputBufferSize, this);
}
return out;
}
void push(BufferObjectDataOutput out) {
if (out != null) {
out.clear();
outputPool.offer(out);
}
}
public BufferObjectDataInput createObjectDataInput(byte[] data) {
return inputOutputFactory.createInput(data, this);
}
public BufferObjectDataInput createObjectDataInput(Data data) {
return inputOutputFactory.createInput(data, this);
}
public BufferObjectDataOutput createObjectDataOutput(int size) {
return inputOutputFactory.createOutput(size, this);
}
public ObjectDataOutputStream createObjectDataOutputStream(OutputStream out) {
return new ObjectDataOutputStream(out, this);
}
public ObjectDataInputStream createObjectDataInputStream(InputStream in) {
return new ObjectDataInputStream(in, this);
}
public ObjectDataOutputStream createObjectDataOutputStream(OutputStream out, ByteOrder order) {
return new ObjectDataOutputStream(out, this, order);
}
public ObjectDataInputStream createObjectDataInputStream(InputStream in, ByteOrder order) {
return new ObjectDataInputStream(in, this, order);
}
public void register(Class type, Serializer serializer) {
if (type == null) {
throw new IllegalArgumentException("Class type information is required!");
}
if (serializer.getTypeId() <= 0) {
throw new IllegalArgumentException("Type id must be positive! Current: "
+ serializer.getTypeId() + ", Serializer: " + serializer);
}
safeRegister(type, createSerializerAdapter(serializer));
}
public void registerGlobal(final Serializer serializer) {
SerializerAdapter adapter = createSerializerAdapter(serializer);
if (!global.compareAndSet(null, adapter)) {
throw new IllegalStateException("Global serializer is already registered!");
}
SerializerAdapter current = idMap.putIfAbsent(serializer.getTypeId(), adapter);
if (current != null && current.getImpl().getClass() != adapter.getImpl().getClass()) {
global.compareAndSet(adapter, null);
throw new IllegalStateException("Serializer [" + current.getImpl() + "] has been already registered for type-id: "
+ serializer.getTypeId());
}
}
private SerializerAdapter createSerializerAdapter(Serializer serializer) {
final SerializerAdapter s;
if (serializer instanceof StreamSerializer) {
s = new StreamSerializerAdapter(this, (StreamSerializer) serializer);
} else if (serializer instanceof ByteArraySerializer) {
s = new ByteArraySerializerAdapter((ByteArraySerializer) serializer);
} else {
throw new IllegalArgumentException("Serializer must be instance of either StreamSerializer or ByteArraySerializer!");
}
return s;
}
public SerializerAdapter serializerFor(final Class type) {
if (DataSerializable.class.isAssignableFrom(type)) {
return dataSerializerAdapter;
} else if (Portable.class.isAssignableFrom(type)) {
return portableSerializerAdapter;
} else {
final SerializerAdapter serializer;
if ((serializer = constantTypesMap.get(type)) != null) {
return serializer;
}
}
SerializerAdapter serializer = typeMap.get(type);
if (serializer == null) {
// look for super classes
Class typeSuperclass = type.getSuperclass();
final Set<Class> interfaces = new LinkedHashSet<Class>(5);
getInterfaces(type, interfaces);
while (typeSuperclass != null) {
if ((serializer = registerFromSuperType(type, typeSuperclass)) != null) {
break;
}
getInterfaces(typeSuperclass, interfaces);
typeSuperclass = typeSuperclass.getSuperclass();
}
if (serializer == null) {
// look for interfaces
for (Class typeInterface : interfaces) {
if ((serializer = registerFromSuperType(type, typeInterface)) != null) {
break;
}
}
}
if (serializer == null && (serializer = global.get()) != null) {
safeRegister(type, serializer);
}
}
return serializer;
}
private static void getInterfaces(Class clazz, Set<Class> interfaces) {
final Class[] classes = clazz.getInterfaces();
if (classes.length > 0) {
Collections.addAll(interfaces, classes);
for (Class cl : classes) {
getInterfaces(cl, interfaces);
}
}
}
private SerializerAdapter registerFromSuperType(final Class type, final Class superType) {
final SerializerAdapter serializer = typeMap.get(superType);
if (serializer != null) {
safeRegister(type, serializer);
}
return serializer;
}
private void registerConstant(Class type, Serializer serializer) {
registerConstant(type, createSerializerAdapter(serializer));
}
private void registerConstant(Class type, SerializerAdapter serializer) {
constantTypesMap.put(type, serializer);
constantTypeIds[indexForDefaultType(serializer.getTypeId())] = serializer;
}
void safeRegister(final Class type, final Serializer serializer) {
safeRegister(type, createSerializerAdapter(serializer));
}
private void safeRegister(final Class type, final SerializerAdapter serializer) {
if (constantTypesMap.containsKey(type)) {
throw new IllegalArgumentException("[" + type + "] serializer cannot be overridden!");
}
SerializerAdapter current = typeMap.putIfAbsent(type, serializer);
if (current != null && current.getImpl().getClass() != serializer.getImpl().getClass()) {
throw new IllegalStateException("Serializer[" + current.getImpl()
+ "] has been already registered for type: " + type);
}
current = idMap.putIfAbsent(serializer.getTypeId(), serializer);
if (current != null && current.getImpl().getClass() != serializer.getImpl().getClass()) {
throw new IllegalStateException("Serializer [" + current.getImpl() + "] has been already registered for type-id: "
+ serializer.getTypeId());
}
}
public SerializerAdapter serializerFor(final int typeId) {
if (typeId < 0) {
final int index = indexForDefaultType(typeId);
if (index < CONSTANT_SERIALIZERS_SIZE) {
return constantTypeIds[index];
}
}
return idMap.get(typeId);
}
private int indexForDefaultType(final int typeId) {
return -typeId - 1;
}
public SerializationContext getSerializationContext() {
return serializationContext;
}
public PortableReader createPortableReader(Data data) {
return new DefaultPortableReader(portableSerializer, createObjectDataInput(data), data.getClassDefinition());
}
public void destroy() {
active = false;
for (SerializerAdapter serializer : typeMap.values()) {
serializer.destroy();
}
typeMap.clear();
idMap.clear();
global.set(null);
constantTypesMap.clear();
for (BufferObjectDataOutput output : outputPool) {
IOUtil.closeResource(output);
}
outputPool.clear();
}
public ClassLoader getClassLoader() {
return classLoader;
}
public ManagedContext getManagedContext() {
return managedContext;
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_nio_serialization_SerializationServiceImpl.java
|
39 |
{
@Override
public HighAvailabilityMemberState getHighAvailabilityMemberState()
{
return memberStateMachine.getCurrentState();
}
} );
| 1no label
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
1,621 |
public class OTxTask extends OAbstractReplicatedTask {
private static final long serialVersionUID = 1L;
private List<OAbstractRecordReplicatedTask> tasks = new ArrayList<OAbstractRecordReplicatedTask>();
public OTxTask() {
}
public void add(final OAbstractRecordReplicatedTask iTask) {
tasks.add(iTask);
}
@Override
public Object execute(final OServer iServer, ODistributedServerManager iManager, final ODatabaseDocumentTx database)
throws Exception {
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.IN,
"committing transaction against db=%s...", database.getName());
ODatabaseRecordThreadLocal.INSTANCE.set(database);
try {
database.begin();
for (OAbstractRecordReplicatedTask task : tasks) {
task.execute(iServer, iManager, database);
}
database.commit();
} catch (ONeedRetryException e) {
return Boolean.FALSE;
} catch (OTransactionException e) {
return Boolean.FALSE;
} catch (Exception e) {
OLogManager.instance().error(this, "Error on distirbuted transaction commit", e);
return Boolean.FALSE;
}
return Boolean.TRUE;
}
@Override
public QUORUM_TYPE getQuorumType() {
return QUORUM_TYPE.WRITE;
}
@Override
public OFixTxTask getFixTask(final ODistributedRequest iRequest, final ODistributedResponse iBadResponse,
final ODistributedResponse iGoodResponse) {
final OFixTxTask fixTask = new OFixTxTask();
for (OAbstractRecordReplicatedTask t : tasks) {
final ORecordId rid = t.getRid();
final ORecordInternal<?> rec = rid.getRecord();
if (rec == null)
fixTask.add(new ODeleteRecordTask(rid, null));
else {
final ORecordVersion v = rec.getRecordVersion();
v.setRollbackMode();
fixTask.add(new OUpdateRecordTask(rid, rec.toStream(), v, rec.getRecordType()));
}
}
return fixTask;
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
out.writeInt(tasks.size());
for (OAbstractRecordReplicatedTask task : tasks)
out.writeObject(task);
}
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
final int size = in.readInt();
for (int i = 0; i < size; ++i)
tasks.add((OAbstractRecordReplicatedTask) in.readObject());
}
@Override
public String getName() {
return "tx";
}
@Override
public String getPayload() {
return null;
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_distributed_task_OTxTask.java
|
380 |
public class AnnotationIterator implements Iterator<Annotation> {
private Iterator<Annotation> iterator;
private Annotation nextAnnotation;
private boolean includeRefinementAnnotations;
/**
* Returns a new JavaAnnotationIterator.
* @param parent the parent iterator to iterate over annotations
* @param returnAllAnnotations whether to return all annotations or just problem annotations
*/
public AnnotationIterator(Iterator<Annotation> parent,
boolean includeRefinementAnnotations) {
this.iterator = parent;
this.includeRefinementAnnotations = includeRefinementAnnotations;
skip();
}
private void skip() {
while (iterator.hasNext()) {
Annotation next = (Annotation) iterator.next();
if (!next.isMarkedDeleted()) {
//TODO: rethink this condition!
if (next instanceof CeylonAnnotation ||
includeRefinementAnnotations &&
next instanceof RefinementAnnotation ||
isProblemMarkerAnnotation(next)) {
nextAnnotation = next;
return;
}
}
}
nextAnnotation = null;
}
private static boolean isProblemMarkerAnnotation(Annotation annotation) {
if (!(annotation instanceof MarkerAnnotation))
return false;
try {
MarkerAnnotation ma = (MarkerAnnotation) annotation;
return ma.getMarker().isSubtypeOf(IMarker.PROBLEM) &&
!ma.getMarker().getType().equals(CeylonBuilder.PROBLEM_MARKER_ID);
}
catch (CoreException e) {
return false;
}
}
public boolean hasNext() {
return nextAnnotation != null;
}
public Annotation next() {
try {
return nextAnnotation;
}
finally {
skip();
}
}
public void remove() {
throw new UnsupportedOperationException();
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_hover_AnnotationIterator.java
|
1,376 |
final Runnable callback = new Runnable() {
@Override
public void run() {
final ODocument indexEntries = getIndexChanges();
if (indexEntries != null) {
final Map<String, OIndexInternal<?>> indexesToCommit = new HashMap<String, OIndexInternal<?>>();
for (Entry<String, Object> indexEntry : indexEntries) {
final OIndexInternal<?> index = indexes.get(indexEntry.getKey()).getInternal();
indexesToCommit.put(index.getName(), index.getInternal());
}
for (OIndexInternal<?> indexInternal : indexesToCommit.values())
indexInternal.preCommit();
for (Entry<String, Object> indexEntry : indexEntries) {
final OIndexInternal<?> index = indexesToCommit.get(indexEntry.getKey()).getInternal();
if (index == null) {
OLogManager.instance().error(this, "Index with name " + indexEntry.getKey() + " was not found.");
throw new OIndexException("Index with name " + indexEntry.getKey() + " was not found.");
} else
index.addTxOperation((ODocument) indexEntry.getValue());
}
try {
for (OIndexInternal<?> indexInternal : indexesToCommit.values())
indexInternal.commit();
} finally {
for (OIndexInternal<?> indexInternal : indexesToCommit.values())
indexInternal.postCommit();
}
}
}
};
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_tx_OTransactionOptimistic.java
|
1,146 |
public class OSQLMethodIndexOf extends OAbstractSQLMethod {
public static final String NAME = "indexof";
public OSQLMethodIndexOf() {
super(NAME, 1, 2);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
final String param0 = iMethodParams[0].toString();
if (param0.length() > 2) {
String toFind = param0.substring(1, param0.length() - 1);
int startIndex = iMethodParams.length > 1 ? Integer.parseInt(iMethodParams[1].toString()) : 0;
ioResult = ioResult != null ? ioResult.toString().indexOf(toFind, startIndex) : null;
}
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodIndexOf.java
|
730 |
ItemListener listener = new ItemListener() {
@Override
public void itemAdded(ItemEvent item) {
send(item);
}
@Override
public void itemRemoved(ItemEvent item) {
send(item);
}
private void send(ItemEvent event) {
if (endpoint.live()) {
Data item = clientEngine.toData(event.getItem());
final ItemEventType eventType = event.getEventType();
final String uuid = event.getMember().getUuid();
PortableItemEvent portableItemEvent = new PortableItemEvent(item, eventType, uuid);
endpoint.sendEvent(portableItemEvent, getCallId());
}
}
};
| 1no label
|
hazelcast_src_main_java_com_hazelcast_collection_client_CollectionAddListenerRequest.java
|
88 |
public class Decimal extends AbstractDecimal {
public static final int DECIMALS = 3;
public static final Decimal MIN_VALUE = new Decimal(minDoubleValue(DECIMALS));
public static final Decimal MAX_VALUE = new Decimal(maxDoubleValue(DECIMALS));
private Decimal() {}
public Decimal(double value) {
super(value, DECIMALS);
}
private Decimal(long format) {
super(format, DECIMALS);
}
public static class DecimalSerializer extends AbstractDecimalSerializer<Decimal> {
public DecimalSerializer() {
super(DECIMALS, Decimal.class);
}
@Override
protected Decimal construct(long format, int decimals) {
assert decimals==DECIMALS;
return new Decimal(format);
}
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Decimal.java
|
622 |
public class ShardStats extends BroadcastShardOperationResponse implements ToXContent {
private ShardRouting shardRouting;
CommonStats stats;
ShardStats() {
}
public ShardStats(IndexShard indexShard, CommonStatsFlags flags) {
super(indexShard.routingEntry().index(), indexShard.routingEntry().id());
this.shardRouting = indexShard.routingEntry();
this.stats = new CommonStats(indexShard, flags);
}
/**
* The shard routing information (cluster wide shard state).
*/
public ShardRouting getShardRouting() {
return this.shardRouting;
}
public CommonStats getStats() {
return this.stats;
}
public static ShardStats readShardStats(StreamInput in) throws IOException {
ShardStats stats = new ShardStats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardRouting = readShardRoutingEntry(in);
stats = CommonStats.readCommonStats(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardRouting.writeTo(out);
stats.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.ROUTING)
.field(Fields.STATE, shardRouting.state())
.field(Fields.PRIMARY, shardRouting.primary())
.field(Fields.NODE, shardRouting.currentNodeId())
.field(Fields.RELOCATING_NODE, shardRouting.relocatingNodeId())
.endObject();
stats.toXContent(builder, params);
return builder;
}
static final class Fields {
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
static final XContentBuilderString STATE = new XContentBuilderString("state");
static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
static final XContentBuilderString NODE = new XContentBuilderString("node");
static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_stats_ShardStats.java
|
254 |
public class StoreRateLimiting {
public static interface Provider {
StoreRateLimiting rateLimiting();
}
public interface Listener {
void onPause(long nanos);
}
public static enum Type {
NONE,
MERGE,
ALL;
public static Type fromString(String type) throws ElasticsearchIllegalArgumentException {
if ("none".equalsIgnoreCase(type)) {
return NONE;
} else if ("merge".equalsIgnoreCase(type)) {
return MERGE;
} else if ("all".equalsIgnoreCase(type)) {
return ALL;
}
throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]");
}
}
private final SimpleRateLimiter rateLimiter = new SimpleRateLimiter(0);
private volatile SimpleRateLimiter actualRateLimiter;
private volatile Type type;
public StoreRateLimiting() {
}
@Nullable
public RateLimiter getRateLimiter() {
return actualRateLimiter;
}
public void setMaxRate(ByteSizeValue rate) {
if (rate.bytes() <= 0) {
actualRateLimiter = null;
} else if (actualRateLimiter == null) {
actualRateLimiter = rateLimiter;
actualRateLimiter.setMbPerSec(rate.mbFrac());
} else {
assert rateLimiter == actualRateLimiter;
rateLimiter.setMbPerSec(rate.mbFrac());
}
}
public Type getType() {
return type;
}
public void setType(Type type) {
this.type = type;
}
public void setType(String type) throws ElasticsearchIllegalArgumentException {
this.type = Type.fromString(type);
}
}
| 0true
|
src_main_java_org_apache_lucene_store_StoreRateLimiting.java
|
512 |
public class TransportDeleteIndexAction extends TransportMasterNodeOperationAction<DeleteIndexRequest, DeleteIndexResponse> {
private final MetaDataDeleteIndexService deleteIndexService;
private final DestructiveOperations destructiveOperations;
@Inject
public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService,
NodeSettingsService nodeSettingsService) {
super(settings, transportService, clusterService, threadPool);
this.deleteIndexService = deleteIndexService;
this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService);
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return DeleteIndexAction.NAME;
}
@Override
protected DeleteIndexRequest newRequest() {
return new DeleteIndexRequest();
}
@Override
protected DeleteIndexResponse newResponse() {
return new DeleteIndexResponse();
}
@Override
protected void doExecute(DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener) {
destructiveOperations.failDestructive(request.indices());
super.doExecute(request, listener);
}
@Override
protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, request.indices());
}
@Override
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) throws ElasticsearchException {
request.indices(state.metaData().concreteIndices(request.indices(), request.indicesOptions()));
if (request.indices().length == 0) {
listener.onResponse(new DeleteIndexResponse(true));
return;
}
// TODO: this API should be improved, currently, if one delete index failed, we send a failure, we should send a response array that includes all the indices that were deleted
final CountDown count = new CountDown(request.indices().length);
for (final String index : request.indices()) {
deleteIndexService.deleteIndex(new MetaDataDeleteIndexService.Request(index).timeout(request.timeout()).masterTimeout(request.masterNodeTimeout()), new MetaDataDeleteIndexService.Listener() {
private volatile Throwable lastFailure;
private volatile boolean ack = true;
@Override
public void onResponse(MetaDataDeleteIndexService.Response response) {
if (!response.acknowledged()) {
ack = false;
}
if (count.countDown()) {
if (lastFailure != null) {
listener.onFailure(lastFailure);
} else {
listener.onResponse(new DeleteIndexResponse(ack));
}
}
}
@Override
public void onFailure(Throwable t) {
logger.debug("[{}] failed to delete index", t, index);
lastFailure = t;
if (count.countDown()) {
listener.onFailure(t);
}
}
});
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_delete_TransportDeleteIndexAction.java
|
602 |
public class OIndexFullText extends OIndexMultiValues {
private static final String CONFIG_STOP_WORDS = "stopWords";
private static final String CONFIG_SEPARATOR_CHARS = "separatorChars";
private static final String CONFIG_IGNORE_CHARS = "ignoreChars";
private static String DEF_SEPARATOR_CHARS = " \r\n\t:;,.|+*/\\=!?[]()";
private static String DEF_IGNORE_CHARS = "'\"";
private static String DEF_STOP_WORDS = "the in a at as and or for his her " + "him this that what which while "
+ "up with be was is";
private final String separatorChars = DEF_SEPARATOR_CHARS;
private final String ignoreChars = DEF_IGNORE_CHARS;
private final Set<String> stopWords;
public OIndexFullText(String typeId, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine,
String valueContainerAlgorithm) {
super(typeId, algorithm, indexEngine, valueContainerAlgorithm);
stopWords = new HashSet<String>(OStringSerializerHelper.split(DEF_STOP_WORDS, ' '));
}
/**
* Indexes a value and save the index. Splits the value in single words and index each one. Save of the index is responsibility of
* the caller.
*/
@Override
public OIndexFullText put(Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
if (key == null)
return this;
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
final List<String> words = splitIntoWords(key.toString());
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
for (final String word : words) {
acquireExclusiveLock();
try {
Set<OIdentifiable> refs;
// SEARCH FOR THE WORD
refs = indexEngine.get(word);
if (refs == null) {
// WORD NOT EXISTS: CREATE THE KEYWORD CONTAINER THE FIRST TIME THE WORD IS FOUND
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
refs = new OIndexRIDContainer(getName());
} else {
refs = new OMVRBTreeRIDSet();
((OMVRBTreeRIDSet) refs).setAutoConvertToRecord(false);
}
}
// ADD THE CURRENT DOCUMENT AS REF FOR THAT WORD
refs.add(iSingleValue);
// SAVE THE INDEX ENTRY
indexEngine.put(word, refs);
} finally {
releaseExclusiveLock();
}
}
return this;
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void putInSnapshot(Object key, OIdentifiable value, Map<Object, Object> snapshot) {
if (key == null)
return;
key = getCollatingValue(key);
final List<String> words = splitIntoWords(key.toString());
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
for (final String word : words) {
Set<OIdentifiable> refs;
final Object snapshotValue = snapshot.get(word);
if (snapshotValue == null)
refs = indexEngine.get(word);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
refs = null;
else
refs = (Set<OIdentifiable>) snapshotValue;
if (refs == null) {
// WORD NOT EXISTS: CREATE THE KEYWORD CONTAINER THE FIRST TIME THE WORD IS FOUND
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
refs = new OIndexRIDContainer(getName());
} else {
refs = new OMVRBTreeRIDSet();
((OMVRBTreeRIDSet) refs).setAutoConvertToRecord(false);
}
snapshot.put(word, refs);
}
// ADD THE CURRENT DOCUMENT AS REF FOR THAT WORD
refs.add(value.getIdentity());
}
}
/**
* Splits passed in key on several words and remove records with keys equals to any item of split result and values equals to
* passed in value.
*
* @param key
* Key to remove.
* @param value
* Value to remove.
* @return <code>true</code> if at least one record is removed.
*/
@Override
public boolean remove(Object key, final OIdentifiable value) {
checkForRebuild();
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
final List<String> words = splitIntoWords(key.toString());
boolean removed = false;
for (final String word : words) {
acquireExclusiveLock();
try {
final Set<OIdentifiable> recs = indexEngine.get(word);
if (recs != null && !recs.isEmpty()) {
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(word);
else
indexEngine.put(word, recs);
removed = true;
}
}
} finally {
releaseExclusiveLock();
}
}
return removed;
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void removeFromSnapshot(Object key, OIdentifiable value, Map<Object, Object> snapshot) {
key = getCollatingValue(key);
final List<String> words = splitIntoWords(key.toString());
for (final String word : words) {
final Set<OIdentifiable> recs;
final Object snapshotValue = snapshot.get(word);
if (snapshotValue == null)
recs = indexEngine.get(word);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
recs = null;
else
recs = (Set<OIdentifiable>) snapshotValue;
if (recs != null && !recs.isEmpty()) {
if (recs.remove(value)) {
if (recs.isEmpty())
snapshot.put(word, RemovedValue.INSTANCE);
else
snapshot.put(word, recs);
}
}
}
}
@Override
public OIndexInternal<?> create(String name, OIndexDefinition indexDefinition, String clusterIndexName,
Set<String> clustersToIndex, boolean rebuild, OProgressListener progressListener, OStreamSerializer valueSerializer) {
if (indexDefinition.getFields().size() > 1) {
throw new OIndexException(type + " indexes cannot be used as composite ones.");
}
return super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener, valueSerializer);
}
@Override
public OIndexMultiValues create(String name, OIndexDefinition indexDefinition, String clusterIndexName,
Set<String> clustersToIndex, boolean rebuild, OProgressListener progressListener) {
if (indexDefinition.getFields().size() > 1) {
throw new OIndexException(type + " indexes cannot be used as composite ones.");
}
return super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener);
}
@Override
public ODocument updateConfiguration() {
super.updateConfiguration();
configuration.setInternalStatus(ORecordElement.STATUS.UNMARSHALLING);
try {
configuration.field(CONFIG_SEPARATOR_CHARS, separatorChars);
configuration.field(CONFIG_IGNORE_CHARS, ignoreChars);
configuration.field(CONFIG_STOP_WORDS, stopWords);
} finally {
configuration.setInternalStatus(ORecordElement.STATUS.LOADED);
}
return configuration;
}
private List<String> splitIntoWords(final String iKey) {
final List<String> result = new ArrayList<String>();
final List<String> words = (List<String>) OStringSerializerHelper.split(new ArrayList<String>(), iKey, 0, -1, separatorChars);
final StringBuilder buffer = new StringBuilder();
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
char c;
boolean ignore;
for (String word : words) {
buffer.setLength(0);
for (int i = 0; i < word.length(); ++i) {
c = word.charAt(i);
ignore = false;
for (int k = 0; k < ignoreChars.length(); ++k)
if (c == ignoreChars.charAt(k)) {
ignore = true;
break;
}
if (!ignore)
buffer.append(c);
}
word = buffer.toString();
// CHECK IF IT'S A STOP WORD
if (stopWords.contains(word))
continue;
result.add(word);
}
return result;
}
public boolean canBeUsedInEqualityOperators() {
return false;
}
public boolean supportsOrderedIterations() {
return false;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexFullText.java
|
390 |
new Thread(){
public void run() {
mm.forceUnlock(key);
forceUnlock.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapLockTest.java
|
52 |
public class ClusterManager
extends LifecycleAdapter
{
public static class Builder
{
private final File root;
private final Provider provider = clusterOfSize( 3 );
private final Map<String, String> commonConfig = emptyMap();
private final Map<Integer, Map<String,String>> instanceConfig = new HashMap<>();
private HighlyAvailableGraphDatabaseFactory factory = new HighlyAvailableGraphDatabaseFactory();
private StoreDirInitializer initializer;
public Builder( File root )
{
this.root = root;
}
public Builder withSeedDir( final File seedDir )
{
return withStoreDirInitializer( new StoreDirInitializer()
{
@Override
public void initializeStoreDir( int serverId, File storeDir ) throws IOException
{
copyRecursively( seedDir, storeDir );
}
} );
}
public Builder withStoreDirInitializer( StoreDirInitializer initializer )
{
this.initializer = initializer;
return this;
}
public Builder withDbFactory( HighlyAvailableGraphDatabaseFactory dbFactory )
{
this.factory = dbFactory;
return this;
}
public ClusterManager build()
{
return new ClusterManager( this );
}
}
public interface StoreDirInitializer
{
void initializeStoreDir( int serverId, File storeDir ) throws IOException;
}
/**
* Provides a specification of which clusters to start in {@link ClusterManager#start()}.
*/
public interface Provider
{
Clusters clusters() throws Throwable;
}
/**
* Provider pointing out an XML file to read.
*
* @param clustersXml the XML file containing the cluster specifications.
*/
public static Provider fromXml( final URI clustersXml )
{
return new Provider()
{
@Override
public Clusters clusters() throws Exception
{
DocumentBuilder documentBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
Document clustersXmlDoc = documentBuilder.parse( clustersXml.toURL().openStream() );
return new ClustersXMLSerializer( documentBuilder ).read( clustersXmlDoc );
}
};
}
/**
* Provides a cluster specification with default values
*
* @param memberCount the total number of members in the cluster to start.
*/
public static Provider clusterOfSize( int memberCount )
{
Clusters.Cluster cluster = new Clusters.Cluster( "neo4j.ha" );
for ( int i = 0; i < memberCount; i++ )
{
cluster.getMembers().add( new Clusters.Member( 5001 + i, true ) );
}
final Clusters clusters = new Clusters();
clusters.getClusters().add( cluster );
return provided( clusters );
}
/**
* Provides a cluster specification with default values
* @param haMemberCount the total number of members in the cluster to start.
*/
public static Provider clusterWithAdditionalClients( int haMemberCount, int additionalClientCount )
{
Clusters.Cluster cluster = new Clusters.Cluster( "neo4j.ha" );
int counter = 0;
for ( int i = 0; i < haMemberCount; i++, counter++ )
{
cluster.getMembers().add( new Clusters.Member( 5001 + counter, true ) );
}
for ( int i = 0; i < additionalClientCount; i++, counter++ )
{
cluster.getMembers().add( new Clusters.Member( 5001 + counter, false ) );
}
final Clusters clusters = new Clusters();
clusters.getClusters().add( cluster );
return provided( clusters );
}
/**
* Provides a cluster specification with default values
* @param haMemberCount the total number of members in the cluster to start.
*/
public static Provider clusterWithAdditionalArbiters( int haMemberCount, int arbiterCount)
{
Clusters.Cluster cluster = new Clusters.Cluster( "neo4j.ha" );
int counter = 0;
for ( int i = 0; i < arbiterCount; i++, counter++ )
{
cluster.getMembers().add( new Clusters.Member( 5001 + counter, false ) );
}
for ( int i = 0; i < haMemberCount; i++, counter++ )
{
cluster.getMembers().add( new Clusters.Member( 5001 + counter, true ) );
}
final Clusters clusters = new Clusters();
clusters.getClusters().add( cluster );
return provided( clusters );
}
public static Provider provided( final Clusters clusters )
{
return new Provider()
{
@Override
public Clusters clusters() throws Throwable
{
return clusters;
}
};
}
LifeSupport life;
private final File root;
private final Map<String, String> commonConfig;
private final Map<Integer, Map<String, String>> instanceConfig;
private final Map<String, ManagedCluster> clusterMap = new HashMap<>();
private final Provider clustersProvider;
private final HighlyAvailableGraphDatabaseFactory dbFactory;
private final StoreDirInitializer storeDirInitializer;
public ClusterManager( Provider clustersProvider, File root, Map<String, String> commonConfig,
Map<Integer, Map<String, String>> instanceConfig,
HighlyAvailableGraphDatabaseFactory dbFactory )
{
this.clustersProvider = clustersProvider;
this.root = root;
this.commonConfig = commonConfig;
this.instanceConfig = instanceConfig;
this.dbFactory = dbFactory;
this.storeDirInitializer = null;
}
private ClusterManager( Builder builder )
{
this.clustersProvider = builder.provider;
this.root = builder.root;
this.commonConfig = builder.commonConfig;
this.instanceConfig = builder.instanceConfig;
this.dbFactory = builder.factory;
this.storeDirInitializer = builder.initializer;
}
public ClusterManager( Provider clustersProvider, File root, Map<String, String> commonConfig,
Map<Integer, Map<String, String>> instanceConfig )
{
this( clustersProvider, root, commonConfig, instanceConfig, new HighlyAvailableGraphDatabaseFactory() );
}
public ClusterManager( Provider clustersProvider, File root, Map<String, String> commonConfig )
{
this( clustersProvider, root, commonConfig, Collections.<Integer, Map<String, String>>emptyMap(),
new HighlyAvailableGraphDatabaseFactory() );
}
@Override
public void start() throws Throwable
{
Clusters clusters = clustersProvider.clusters();
life = new LifeSupport();
// Started so instances added here will be started immediately, and in case of exceptions they can be
// shutdown() or stop()ped properly
life.start();
for ( int i = 0; i < clusters.getClusters().size(); i++ )
{
Clusters.Cluster cluster = clusters.getClusters().get( i );
ManagedCluster managedCluster = new ManagedCluster( cluster );
clusterMap.put( cluster.getName(), managedCluster );
life.add( managedCluster );
}
}
@Override
public void stop() throws Throwable
{
life.stop();
}
@Override
public void shutdown() throws Throwable
{
life.shutdown();
}
/**
* Represent one cluster. It can retrieve the current master, random slave
* or all members. It can also temporarily fail an instance or shut it down.
*/
public class ManagedCluster extends LifecycleAdapter
{
private final Clusters.Cluster spec;
private final String name;
private final Map<Integer, HighlyAvailableGraphDatabaseProxy> members = new ConcurrentHashMap<>();
private final List<ClusterMembers> arbiters = new ArrayList<>( );
ManagedCluster( Clusters.Cluster spec ) throws URISyntaxException, IOException
{
this.spec = spec;
this.name = spec.getName();
for ( int i = 0; i < spec.getMembers().size(); i++ )
{
startMember( i + 1 );
}
for ( HighlyAvailableGraphDatabaseProxy member : members.values() )
{
insertInitialData( member.get(), name, member.get().getConfig().get( ClusterSettings.server_id ) );
}
}
public String getInitialHostsConfigString()
{
StringBuilder result = new StringBuilder();
for ( HighlyAvailableGraphDatabase member : getAllMembers() )
{
result.append( result.length() > 0 ? "," : "" ).append( ":" )
.append( member.getDependencyResolver().resolveDependency(
ClusterClient.class ).getClusterServer().getPort() );
}
return result.toString();
}
@Override
public void stop() throws Throwable
{
for ( HighlyAvailableGraphDatabaseProxy member : members.values() )
{
member.get().shutdown();
}
}
/**
* @return all started members in this cluster.
*/
public Iterable<HighlyAvailableGraphDatabase> getAllMembers()
{
return Iterables.map( new Function<HighlyAvailableGraphDatabaseProxy, HighlyAvailableGraphDatabase>()
{
@Override
public HighlyAvailableGraphDatabase apply( HighlyAvailableGraphDatabaseProxy from )
{
return from.get();
}
}, members.values() );
}
public Iterable<ClusterMembers> getArbiters()
{
return arbiters;
}
/**
* @return the current master in the cluster.
* @throws IllegalStateException if there's no current master.
*/
public HighlyAvailableGraphDatabase getMaster()
{
for ( HighlyAvailableGraphDatabase graphDatabaseService : getAllMembers() )
{
if ( graphDatabaseService.isMaster() )
{
return graphDatabaseService;
}
}
throw new IllegalStateException( "No master found in cluster " + name );
}
/**
* @param except do not return any of the dbs found in this array
* @return a slave in this cluster.
* @throws IllegalStateException if no slave was found in this cluster.
*/
public HighlyAvailableGraphDatabase getAnySlave( HighlyAvailableGraphDatabase... except )
{
Set<HighlyAvailableGraphDatabase> exceptSet = new HashSet<>( asList( except ) );
for ( HighlyAvailableGraphDatabase graphDatabaseService : getAllMembers() )
{
if ( graphDatabaseService.getInstanceState().equals( "SLAVE" ) && !exceptSet.contains(
graphDatabaseService ) )
{
return graphDatabaseService;
}
}
throw new IllegalStateException( "No slave found in cluster " + name );
}
/**
* @param serverId the server id to return the db for.
* @return the {@link HighlyAvailableGraphDatabase} with the given server id.
* @throws IllegalStateException if that db isn't started or no such
* db exists in the cluster.
*/
public HighlyAvailableGraphDatabase getMemberByServerId( int serverId )
{
HighlyAvailableGraphDatabase db = members.get( serverId ).get();
if ( db == null )
{
throw new IllegalStateException( "Db " + serverId + " not found at the moment in " + name );
}
return db;
}
/**
* Shuts down a member of this cluster. A {@link RepairKit} is returned
* which is able to restore the instance (i.e. start it again).
*
* @param db the {@link HighlyAvailableGraphDatabase} to shut down.
* @return a {@link RepairKit} which can start it again.
* @throws IllegalArgumentException if the given db isn't a member of this cluster.
*/
public RepairKit shutdown( HighlyAvailableGraphDatabase db )
{
assertMember( db );
int serverId = db.getDependencyResolver().resolveDependency( Config.class ).get( ClusterSettings.server_id );
members.remove( serverId );
life.remove( db );
db.shutdown();
return new StartDatabaseAgainKit( this, serverId );
}
private void assertMember( HighlyAvailableGraphDatabase db )
{
for ( HighlyAvailableGraphDatabaseProxy highlyAvailableGraphDatabaseProxy : members.values() )
{
if ( highlyAvailableGraphDatabaseProxy.get().equals( db ) )
{
return;
}
}
throw new IllegalArgumentException( "Db " + db + " not a member of this cluster " + name );
}
/**
* WARNING: beware of hacks.
* <p/>
* Fails a member of this cluster by making it not respond to heart beats.
* A {@link RepairKit} is returned which is able to repair the instance
* (i.e start the network) again.
*
* @param db the {@link HighlyAvailableGraphDatabase} to fail.
* @return a {@link RepairKit} which can repair the failure.
* @throws IllegalArgumentException if the given db isn't a member of this cluster.
*/
public RepairKit fail( HighlyAvailableGraphDatabase db ) throws Throwable
{
assertMember( db );
ClusterClient clusterClient = db.getDependencyResolver().resolveDependency( ClusterClient.class );
LifeSupport clusterClientLife = (LifeSupport) accessible( clusterClient.getClass().getDeclaredField(
"life" ) ).get( clusterClient );
NetworkReceiver receiver = instance( NetworkReceiver.class, clusterClientLife.getLifecycleInstances() );
receiver.stop();
ExecutorLifecycleAdapter statemachineExecutor = instance(ExecutorLifecycleAdapter.class, clusterClientLife.getLifecycleInstances());
statemachineExecutor.stop();
NetworkSender sender = instance( NetworkSender.class, clusterClientLife.getLifecycleInstances() );
sender.stop();
List<Lifecycle> stoppedServices = new ArrayList<>();
stoppedServices.add( sender );
stoppedServices.add(statemachineExecutor);
stoppedServices.add( receiver );
return new StartNetworkAgainKit( db, stoppedServices );
}
private void startMember( int serverId ) throws URISyntaxException, IOException
{
Clusters.Member member = spec.getMembers().get( serverId-1 );
StringBuilder initialHosts = new StringBuilder( spec.getMembers().get( 0 ).getHost() );
for (int i = 1; i < spec.getMembers().size(); i++)
{
initialHosts.append( "," ).append( spec.getMembers().get( i ).getHost() );
}
File parent = new File( root, name );
URI clusterUri = new URI( "cluster://" + member.getHost() );
if ( member.isFullHaMember() )
{
int clusterPort = clusterUri.getPort();
int haPort = clusterUri.getPort() + 3000;
File storeDir = new File( parent, "server" + serverId );
if ( storeDirInitializer != null)
{
storeDirInitializer.initializeStoreDir( serverId, storeDir );
}
GraphDatabaseBuilder graphDatabaseBuilder = dbFactory.newHighlyAvailableDatabaseBuilder(
storeDir.getAbsolutePath() ).
setConfig( ClusterSettings.cluster_name, name ).
setConfig( ClusterSettings.initial_hosts, initialHosts.toString() ).
setConfig( ClusterSettings.server_id, serverId + "" ).
setConfig( ClusterSettings.cluster_server, "0.0.0.0:"+clusterPort).
setConfig( HaSettings.ha_server, ":" + haPort ).
setConfig( OnlineBackupSettings.online_backup_enabled, Settings.FALSE ).
setConfig( commonConfig );
if ( instanceConfig.containsKey( serverId ) )
{
graphDatabaseBuilder.setConfig( instanceConfig.get( serverId ) );
}
config( graphDatabaseBuilder, name, serverId );
final HighlyAvailableGraphDatabaseProxy graphDatabase = new HighlyAvailableGraphDatabaseProxy(
graphDatabaseBuilder );
members.put( serverId, graphDatabase );
life.add( new LifecycleAdapter()
{
@Override
public void stop() throws Throwable
{
graphDatabase.get().shutdown();
}
} );
}
else
{
Map<String, String> config = MapUtil.stringMap(
ClusterSettings.cluster_name.name(), name,
ClusterSettings.initial_hosts.name(), initialHosts.toString(),
ClusterSettings.server_id.name(), serverId + "",
ClusterSettings.cluster_server.name(), "0.0.0.0:"+clusterUri.getPort(),
GraphDatabaseSettings.store_dir.name(), new File( parent, "arbiter" + serverId ).getAbsolutePath() );
Config config1 = new Config( config );
Logging clientLogging =life.add( new LogbackService( config1, new LoggerContext() ) );
ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory();
ClusterClient clusterClient = new ClusterClient( ClusterClient.adapt( config1 ),
clientLogging, new NotElectableElectionCredentialsProvider(), objectStreamFactory,
objectStreamFactory );
arbiters.add(new ClusterMembers(clusterClient, clusterClient, new ClusterMemberEvents()
{
@Override
public void addClusterMemberListener( ClusterMemberListener listener )
{
// noop
}
@Override
public void removeClusterMemberListener( ClusterMemberListener listener )
{
// noop
}
}, clusterClient.getServerId() ));
life.add( new FutureLifecycleAdapter<>( clusterClient ) );
}
}
/**
* Will await a condition for the default max time.
*
* @param predicate {@link Predicate} that should return true
* signalling that the condition has been met.
* @throws IllegalStateException if the condition wasn't met
* during within the max time.
*/
public void await( Predicate<ManagedCluster> predicate )
{
await( predicate, 60 );
}
/**
* Will await a condition for the given max time.
*
* @param predicate {@link Predicate} that should return true
* signalling that the condition has been met.
* @throws IllegalStateException if the condition wasn't met
* during within the max time.
*/
public void await( Predicate<ManagedCluster> predicate, int maxSeconds )
{
long end = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis( maxSeconds );
while ( System.currentTimeMillis() < end )
{
if ( predicate.accept( this ) )
{
return;
}
try
{
Thread.sleep( 100 );
}
catch ( InterruptedException e )
{
// Ignore
}
}
String state = printState( this );
throw new IllegalStateException( format(
"Awaited condition never met, waited %s for %s:%n%s", maxSeconds, predicate, state ) );
}
/**
* The total number of members of the cluster.
*/
public int size()
{
return spec.getMembers().size();
}
public int getServerId( HighlyAvailableGraphDatabase member )
{
assertMember( member );
return member.getConfig().get( ClusterSettings.server_id );
}
public File getStoreDir( HighlyAvailableGraphDatabase member )
{
assertMember( member );
return member.getConfig().get( GraphDatabaseSettings.store_dir );
}
public void sync( HighlyAvailableGraphDatabase... except )
{
Set<HighlyAvailableGraphDatabase> exceptSet = new HashSet<>( asList( except ) );
for ( HighlyAvailableGraphDatabase db : getAllMembers() )
{
if ( !exceptSet.contains( db ) )
{
db.getDependencyResolver().resolveDependency( UpdatePuller.class ).pullUpdates();
}
}
}
}
private static final class HighlyAvailableGraphDatabaseProxy
{
private GraphDatabaseService result;
private Future<GraphDatabaseService> untilThen;
private final ExecutorService executor;
public HighlyAvailableGraphDatabaseProxy( final GraphDatabaseBuilder graphDatabaseBuilder )
{
Callable<GraphDatabaseService> starter = new Callable<GraphDatabaseService>()
{
@Override
public GraphDatabaseService call() throws Exception
{
return graphDatabaseBuilder.newGraphDatabase();
}
};
executor = Executors.newFixedThreadPool( 1 );
untilThen = executor.submit( starter );
}
public HighlyAvailableGraphDatabase get()
{
if ( result == null )
{
try
{
result = untilThen.get();
}
catch ( InterruptedException | ExecutionException e )
{
throw new RuntimeException( e );
}
finally
{
executor.shutdownNow();
}
}
return (HighlyAvailableGraphDatabase) result;
}
}
private static final class FutureLifecycleAdapter<T extends Lifecycle> extends LifecycleAdapter
{
private final T wrapped;
private Future<Void> currentFuture;
private final ExecutorService starter;
public FutureLifecycleAdapter( T toWrap)
{
wrapped = toWrap;
starter = Executors.newFixedThreadPool( 1 );
}
@Override
public void init() throws Throwable
{
currentFuture = starter.submit( new Callable<Void>()
{
@Override
public Void call() throws Exception
{
try
{
wrapped.init();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
return null;
}
} );
}
@Override
public void start() throws Throwable
{
currentFuture.get();
currentFuture = starter.submit( new Callable<Void>()
{
@Override
public Void call() throws Exception
{
try
{
wrapped.start();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
return null;
}
} );
}
@Override
public void stop() throws Throwable
{
currentFuture.get();
currentFuture = starter.submit( new Callable<Void>()
{
@Override
public Void call() throws Exception
{
try
{
wrapped.stop();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
return null;
}
} );
}
@Override
public void shutdown() throws Throwable
{
currentFuture = starter.submit( new Callable<Void>()
{
@Override
public Void call() throws Exception
{
try
{
wrapped.shutdown();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
return null;
}
} );
currentFuture.get();
starter.shutdownNow();
}
}
/**
* The current master sees this many slaves as available.
*
* @param count number of slaves to see as available.
*/
public static Predicate<ManagedCluster> masterSeesSlavesAsAvailable( final int count )
{
return new Predicate<ClusterManager.ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster cluster )
{
return count( cluster.getMaster().getDependencyResolver().resolveDependency( Slaves.class ).getSlaves
() ) >= count;
}
@Override
public String toString()
{
return "Master should see " + count + " slaves as available";
}
};
}
/**
* The current master sees all slaves in the cluster as available.
* Based on the total number of members in the cluster.
*/
public static Predicate<ManagedCluster> masterSeesAllSlavesAsAvailable()
{
return new Predicate<ClusterManager.ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster cluster )
{
return count( cluster.getMaster().getDependencyResolver().resolveDependency( Slaves.class ).getSlaves
() ) >= cluster.size() - 1;
}
@Override
public String toString()
{
return "Master should see all slaves as available";
}
};
}
/**
* There must be a master available. Optionally exceptions, useful for when awaiting a
* re-election of a different master.
*/
public static Predicate<ManagedCluster> masterAvailable( HighlyAvailableGraphDatabase... except )
{
final Set<HighlyAvailableGraphDatabase> exceptSet = new HashSet<>( asList( except ) );
return new Predicate<ClusterManager.ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster cluster )
{
for ( HighlyAvailableGraphDatabase graphDatabaseService : cluster.getAllMembers() )
{
if ( !exceptSet.contains( graphDatabaseService ))
{
if ( graphDatabaseService.isMaster() )
{
return true;
}
}
}
return false;
}
@Override
public String toString()
{
return "There's an available master";
}
};
}
/**
* The current master sees this many slaves as available.
* @param count number of slaves to see as available.
*/
public static Predicate<ManagedCluster> masterSeesMembers( final int count )
{
return new Predicate<ClusterManager.ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster cluster )
{
ClusterMembers members = cluster.getMaster().getDependencyResolver().resolveDependency( ClusterMembers.class );
return Iterables.count(members.getMembers()) == count;
}
@Override
public String toString()
{
return "Master should see " + count + " members";
}
};
}
public static Predicate<ManagedCluster> allSeesAllAsAvailable()
{
return new Predicate<ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster cluster )
{
if (!allSeesAllAsJoined().accept( cluster ))
return false;
for ( HighlyAvailableGraphDatabase database : cluster.getAllMembers() )
{
ClusterMembers members = database.getDependencyResolver().resolveDependency( ClusterMembers.class );
for ( ClusterMember clusterMember : members.getMembers() )
{
if ( clusterMember.getHARole().equals( "UNKNOWN" ) )
{
return false;
}
}
}
// Everyone sees everyone else as available!
return true;
}
@Override
public String toString()
{
return "All instances should see all others as available";
}
};
}
public static Predicate<ManagedCluster> allSeesAllAsJoined( )
{
return new Predicate<ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster cluster )
{
int nrOfMembers = cluster.size();
for ( HighlyAvailableGraphDatabase database : cluster.getAllMembers() )
{
ClusterMembers members = database.getDependencyResolver().resolveDependency( ClusterMembers.class );
if (Iterables.count( members.getMembers() ) < nrOfMembers)
return false;
}
for ( ClusterMembers clusterMembers : cluster.getArbiters() )
{
if (Iterables.count(clusterMembers.getMembers()) < nrOfMembers)
{
return false;
}
}
// Everyone sees everyone else as joined!
return true;
}
@Override
public String toString()
{
return "All instances should see all others as joined";
}
};
}
public static Predicate<ManagedCluster> allAvailabilityGuardsReleased()
{
return new Predicate<ManagedCluster>()
{
@Override
public boolean accept( ManagedCluster item )
{
for ( HighlyAvailableGraphDatabaseProxy member : item.members.values() )
{
try
{
member.get().beginTx().close();
}
catch ( TransactionFailureException e )
{
return false;
}
}
return true;
}
};
}
private static String printState(ManagedCluster cluster)
{
StringBuilder buf = new StringBuilder();
for ( HighlyAvailableGraphDatabase database : cluster.getAllMembers() )
{
ClusterMembers members = database.getDependencyResolver().resolveDependency( ClusterMembers.class );
for ( ClusterMember clusterMember : members.getMembers() )
{
buf.append( clusterMember.getInstanceId() ).append( ":" ).append( clusterMember.getHARole() )
.append( "\n" );
}
buf.append( "\n" );
}
return buf.toString();
}
@SuppressWarnings("unchecked")
private <T> T instance( Class<T> classToFind, Iterable<?> from )
{
for ( Object item : from )
{
if ( classToFind.isAssignableFrom( item.getClass() ) )
{
return (T) item;
}
}
fail( "Couldn't find the network instance to fail. Internal field, so fragile sensitive to changes though" );
return null; // it will never get here.
}
private Field accessible( Field field )
{
field.setAccessible( true );
return field;
}
public ManagedCluster getCluster( String name )
{
if ( !clusterMap.containsKey( name ) )
{
throw new IllegalArgumentException( name );
}
return clusterMap.get( name );
}
public ManagedCluster getDefaultCluster()
{
return getCluster( "neo4j.ha" );
}
protected void config( GraphDatabaseBuilder builder, String clusterName, int serverId )
{
}
protected void insertInitialData( GraphDatabaseService db, String name, int serverId )
{
}
public interface RepairKit
{
HighlyAvailableGraphDatabase repair() throws Throwable;
}
private class StartNetworkAgainKit implements RepairKit
{
private final HighlyAvailableGraphDatabase db;
private final Iterable<Lifecycle> stoppedServices;
StartNetworkAgainKit( HighlyAvailableGraphDatabase db, Iterable<Lifecycle> stoppedServices )
{
this.db = db;
this.stoppedServices = stoppedServices;
}
@Override
public HighlyAvailableGraphDatabase repair() throws Throwable
{
for ( Lifecycle stoppedService : stoppedServices )
{
stoppedService.start();
}
return db;
}
}
private class StartDatabaseAgainKit implements RepairKit
{
private final int serverId;
private final ManagedCluster cluster;
public StartDatabaseAgainKit( ManagedCluster cluster, int serverId )
{
this.cluster = cluster;
this.serverId = serverId;
}
@Override
public HighlyAvailableGraphDatabase repair() throws Throwable
{
cluster.startMember( serverId );
return cluster.getMemberByServerId( serverId );
}
}
}
| 1no label
|
enterprise_ha_src_test_java_org_neo4j_test_ha_ClusterManager.java
|
2,234 |
class CustomBoostFactorWeight extends Weight {
final Weight subQueryWeight;
public CustomBoostFactorWeight(Weight subQueryWeight) throws IOException {
this.subQueryWeight = subQueryWeight;
}
public Query getQuery() {
return FunctionScoreQuery.this;
}
@Override
public float getValueForNormalization() throws IOException {
float sum = subQueryWeight.getValueForNormalization();
sum *= getBoost() * getBoost();
return sum;
}
@Override
public void normalize(float norm, float topLevelBoost) {
subQueryWeight.normalize(norm, topLevelBoost * getBoost());
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
// we ignore scoreDocsInOrder parameter, because we need to score in
// order if documents are scored with a script. The
// ShardLookup depends on in order scoring.
Scorer subQueryScorer = subQueryWeight.scorer(context, true, false, acceptDocs);
if (subQueryScorer == null) {
return null;
}
function.setNextReader(context);
return new CustomBoostFactorScorer(this, subQueryScorer, function, maxBoost, combineFunction);
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(context, doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
function.setNextReader(context);
Explanation functionExplanation = function.explainScore(doc, subQueryExpl);
return combineFunction.explain(getBoost(), subQueryExpl, functionExplanation, maxBoost);
}
}
| 1no label
|
src_main_java_org_elasticsearch_common_lucene_search_function_FunctionScoreQuery.java
|
324 |
final EntryListener listener = new EntryAdapter() {
public void entryAdded(EntryEvent event) {
latch.countDown();
}
public void entryEvicted(EntryEvent event) {
final Object value = event.getValue();
final Object oldValue = event.getOldValue();
if (value != null) {
nullLatch.countDown();
}
if (oldValue != null) {
nullLatch.countDown();
}
latch.countDown();
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
2,139 |
public class ObjectRecordFactory implements RecordFactory<Object> {
private final SerializationService serializationService;
private final boolean statisticsEnabled;
public ObjectRecordFactory(MapConfig config, SerializationService serializationService) {
this.serializationService = serializationService;
this.statisticsEnabled = config.isStatisticsEnabled();
}
@Override
public InMemoryFormat getStorageFormat() {
return InMemoryFormat.OBJECT;
}
@Override
public Record<Object> newRecord(Data key, Object value) {
Object v = value;
if (value instanceof Data) {
v = serializationService.toObject(value);
}
return statisticsEnabled ? new ObjectRecordWithStats(key, value) : new ObjectRecord(key, v);
}
@Override
public void setValue(Record<Object> record, Object value) {
Object v = value;
if (value instanceof Data) {
v = serializationService.toObject(value);
}
record.setValue(v);
}
@Override
public boolean isEquals(Object value1, Object value2) {
Object v1 = value1 instanceof Data ? serializationService.toObject(value1) : value1;
Object v2 = value2 instanceof Data ? serializationService.toObject(value2) : value2;
if (v1 == null && v2 == null) {
return true;
}
if (v1 == null) {
return false;
}
if (v2 == null) {
return false;
}
return v1.equals(v2);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_record_ObjectRecordFactory.java
|
22 |
public class LogPruneStrategies
{
public static final LogPruneStrategy NO_PRUNING = new LogPruneStrategy()
{
@Override
public void prune( LogLoader source )
{ // Don't prune logs at all.
}
@Override
public String toString()
{
return "NO_PRUNING";
}
};
private interface Threshold
{
boolean reached( File file, long version, LogLoader source );
}
private abstract static class AbstractPruneStrategy implements LogPruneStrategy
{
protected final FileSystemAbstraction fileSystem;
AbstractPruneStrategy( FileSystemAbstraction fileSystem )
{
this.fileSystem = fileSystem;
}
@Override
public void prune( LogLoader source )
{
if ( source.getHighestLogVersion() == 0 )
return;
long upper = source.getHighestLogVersion()-1;
Threshold threshold = newThreshold();
boolean exceeded = false;
while ( upper >= 0 )
{
File file = source.getFileName( upper );
if ( !fileSystem.fileExists( file ) )
// There aren't logs to prune anything. Just return
return;
if ( fileSystem.getFileSize( file ) > LogIoUtils.LOG_HEADER_SIZE &&
threshold.reached( file, upper, source ) )
{
exceeded = true;
break;
}
upper--;
}
if ( !exceeded )
return;
// Find out which log is the earliest existing (lower bound to prune)
long lower = upper;
while ( fileSystem.fileExists( source.getFileName( lower-1 ) ) )
lower--;
// The reason we delete from lower to upper is that if it crashes in the middle
// we can be sure that no holes are created
for ( long version = lower; version < upper; version++ )
fileSystem.deleteFile( source.getFileName( version ) );
}
/**
* @return a {@link Threshold} which if returning {@code false} states that the log file
* is within the threshold and doesn't need to be pruned. The first time it returns
* {@code true} it says that the threshold has been reached and the log file it just
* returned {@code true} for should be kept, but all previous logs should be pruned.
*/
protected abstract Threshold newThreshold();
}
public static LogPruneStrategy nonEmptyFileCount( FileSystemAbstraction fileSystem, int maxLogCountToKeep )
{
return new FileCountPruneStrategy( fileSystem, maxLogCountToKeep );
}
private static class FileCountPruneStrategy extends AbstractPruneStrategy
{
private final int maxNonEmptyLogCount;
public FileCountPruneStrategy( FileSystemAbstraction fileSystem, int maxNonEmptyLogCount )
{
super( fileSystem );
this.maxNonEmptyLogCount = maxNonEmptyLogCount;
}
@Override
protected Threshold newThreshold()
{
return new Threshold()
{
int nonEmptyLogCount = 0;
@Override
public boolean reached( File file, long version, LogLoader source )
{
return ++nonEmptyLogCount >= maxNonEmptyLogCount;
}
};
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[max:" + maxNonEmptyLogCount + "]";
}
}
public static LogPruneStrategy totalFileSize( FileSystemAbstraction fileSystem, int numberOfBytes )
{
return new FileSizePruneStrategy( fileSystem, numberOfBytes );
}
public static class FileSizePruneStrategy extends AbstractPruneStrategy
{
private final int maxSize;
public FileSizePruneStrategy( FileSystemAbstraction fileystem, int maxSizeBytes )
{
super( fileystem );
this.maxSize = maxSizeBytes;
}
@Override
protected Threshold newThreshold()
{
return new Threshold()
{
private int size;
@Override
public boolean reached( File file, long version, LogLoader source )
{
size += fileSystem.getFileSize( file );
return size >= maxSize;
}
};
}
}
public static LogPruneStrategy transactionCount( FileSystemAbstraction fileSystem, int maxCount )
{
return new TransactionCountPruneStrategy( fileSystem, maxCount );
}
public static class TransactionCountPruneStrategy extends AbstractPruneStrategy
{
private final int maxTransactionCount;
public TransactionCountPruneStrategy( FileSystemAbstraction fileSystem, int maxTransactionCount )
{
super( fileSystem );
this.maxTransactionCount = maxTransactionCount;
}
@Override
protected Threshold newThreshold()
{
return new Threshold()
{
private Long highest;
@Override
public boolean reached( File file, long version, LogLoader source )
{
// Here we know that the log version exists (checked in AbstractPruneStrategy#prune)
long tx = source.getFirstCommittedTxId( version );
if ( highest == null )
{
highest = source.getLastCommittedTxId();
return false;
}
return highest-tx >= maxTransactionCount;
}
};
}
}
public static LogPruneStrategy transactionTimeSpan( FileSystemAbstraction fileSystem, int timeToKeep, TimeUnit timeUnit )
{
return new TransactionTimeSpanPruneStrategy( fileSystem, timeToKeep, timeUnit );
}
public static class TransactionTimeSpanPruneStrategy extends AbstractPruneStrategy
{
private final int timeToKeep;
private final TimeUnit unit;
public TransactionTimeSpanPruneStrategy( FileSystemAbstraction fileSystem, int timeToKeep, TimeUnit unit )
{
super( fileSystem );
this.timeToKeep = timeToKeep;
this.unit = unit;
}
@Override
protected Threshold newThreshold()
{
return new Threshold()
{
private long lowerLimit = System.currentTimeMillis() - unit.toMillis( timeToKeep );
@Override
public boolean reached( File file, long version, LogLoader source )
{
try
{
return source.getFirstStartRecordTimestamp( version ) < lowerLimit;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
}
}
/**
* Parses a configuration value for log specifying log pruning. It has one of these forms:
* <ul>
* <li>all</li>
* <li>[number][unit] [type]</li>
* </ul>
* For example:
* <ul>
* <li>100M size - For keeping last 100 megabytes of log data</li>
* <li>20 pcs - For keeping last 20 non-empty log files</li>
* <li>7 days - For keeping last 7 days worth of log data</li>
* <li>1k hours - For keeping last 1000 hours worth of log data</li>
* </ul>
*/
public static LogPruneStrategy fromConfigValue( FileSystemAbstraction fileSystem, String configValue )
{
String[] tokens = configValue.split( " " );
if ( tokens.length == 0 )
throw new IllegalArgumentException( "Invalid log pruning configuration value '" + configValue + "'" );
String numberWithUnit = tokens[0];
if ( tokens.length == 1 )
{
if ( numberWithUnit.equals( "true" ) )
return NO_PRUNING;
else if ( numberWithUnit.equals( "false" ) )
return transactionCount( fileSystem, 1 );
else
throw new IllegalArgumentException( "Invalid log pruning configuration value '" + configValue +
"'. The form is 'all' or '<number><unit> <type>' for example '100k txs' " +
"for the latest 100 000 transactions" );
}
String[] types = new String[] { "files", "size", "txs", "hours", "days" };
String type = tokens[1];
int number = (int) parseLongWithUnit( numberWithUnit );
int typeIndex = 0;
if ( type.equals( types[typeIndex++] ) )
return nonEmptyFileCount( fileSystem, number );
else if ( type.equals( types[typeIndex++] ) )
return totalFileSize( fileSystem, number );
else if ( type.equals( types[typeIndex++] ) )
return transactionCount( fileSystem, number );
else if ( type.equals( types[typeIndex++] ) )
return transactionTimeSpan( fileSystem, number, TimeUnit.HOURS );
else if ( type.equals( types[typeIndex++] ) )
return transactionTimeSpan( fileSystem, number, TimeUnit.DAYS );
else
throw new IllegalArgumentException( "Invalid log pruning configuration value '" + configValue +
"'. Invalid type '" + type + "', valid are " + Arrays.asList( types ) );
}
}
| 1no label
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogPruneStrategies.java
|
123 |
@Service("blPageDefaultRuleProcessor")
public class PageDefaultRuleProcessor extends AbstractPageRuleProcessor {
private static final Log LOG = LogFactory.getLog(PageDefaultRuleProcessor.class);
/**
* Returns true if all of the rules associated with the passed in <code>Page</code>
* item match based on the passed in vars.
*
* Also returns true if no rules are present for the passed in item.
*
* @param sc - a structured content item to test
* @param vars - a map of objects used by the rule MVEL expressions
* @return the result of the rule checks
*/
public boolean checkForMatch(PageDTO page, Map<String, Object> vars) {
String ruleExpression = page.getRuleExpression();
if (ruleExpression != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Processing content rule for page with id " + page.getId() +". Value = " + ruleExpression);
}
boolean result = executeExpression(ruleExpression, vars);
if (! result) {
if (LOG.isDebugEnabled()) {
LOG.debug("Page failed to pass rule and will not be included for Page with id " + page.getId() +". Value = " + ruleExpression);
}
}
return result;
} else {
// If no rule found, then consider this a match.
return true;
}
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_service_PageDefaultRuleProcessor.java
|
417 |
public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAction<GetSnapshotsRequest, GetSnapshotsResponse> {
private final SnapshotsService snapshotsService;
@Inject
public TransportGetSnapshotsAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, SnapshotsService snapshotsService) {
super(settings, transportService, clusterService, threadPool);
this.snapshotsService = snapshotsService;
}
@Override
protected String executor() {
return ThreadPool.Names.SNAPSHOT;
}
@Override
protected String transportAction() {
return GetSnapshotsAction.NAME;
}
@Override
protected GetSnapshotsRequest newRequest() {
return new GetSnapshotsRequest();
}
@Override
protected GetSnapshotsResponse newResponse() {
return new GetSnapshotsResponse();
}
@Override
protected ClusterBlockException checkBlock(GetSnapshotsRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
}
@Override
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener<GetSnapshotsResponse> listener) throws ElasticsearchException {
SnapshotId[] snapshotIds = new SnapshotId[request.snapshots().length];
for (int i = 0; i < snapshotIds.length; i++) {
snapshotIds[i] = new SnapshotId(request.repository(), request.snapshots()[i]);
}
try {
ImmutableList.Builder<SnapshotInfo> snapshotInfoBuilder = ImmutableList.builder();
if (snapshotIds.length > 0) {
for (SnapshotId snapshotId : snapshotIds) {
snapshotInfoBuilder.add(new SnapshotInfo(snapshotsService.snapshot(snapshotId)));
}
} else {
ImmutableList<Snapshot> snapshots = snapshotsService.snapshots(request.repository());
for (Snapshot snapshot : snapshots) {
snapshotInfoBuilder.add(new SnapshotInfo(snapshot));
}
}
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder.build()));
} catch (Throwable t) {
listener.onFailure(t);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_get_TransportGetSnapshotsAction.java
|
80 |
public class ClientDisconnectionOperation extends AbstractOperation implements UrgentSystemOperation {
private String clientUuid;
public ClientDisconnectionOperation() {
}
public ClientDisconnectionOperation(String clientUuid) {
this.clientUuid = clientUuid;
}
@Override
public void run() throws Exception {
ClientEngineImpl engine = getService();
Set<ClientEndpoint> endpoints = engine.getEndpoints(clientUuid);
for (ClientEndpoint endpoint : endpoints) {
Connection connection = endpoint.getConnection();
engine.removeEndpoint(connection, true);
}
NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine();
nodeEngine.onClientDisconnected(clientUuid);
Collection<ClientAwareService> services = nodeEngine.getServices(ClientAwareService.class);
for (ClientAwareService service : services) {
service.clientDisconnected(clientUuid);
}
}
@Override
public boolean returnsResponse() {
return false;
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeUTF(clientUuid);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
clientUuid = in.readUTF();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_client_ClientDisconnectionOperation.java
|
129 |
public class LongAdderTable<K> implements Serializable {
/** Relies on default serialization */
private static final long serialVersionUID = 7249369246863182397L;
/** The underlying map */
private final ConcurrentHashMapV8<K, LongAdder> map;
static final class CreateAdder
implements ConcurrentHashMapV8.Fun<Object, LongAdder> {
public LongAdder apply(Object unused) { return new LongAdder(); }
}
private static final CreateAdder createAdder = new CreateAdder();
/**
* Creates a new empty table.
*/
public LongAdderTable() {
map = new ConcurrentHashMapV8<K, LongAdder>();
}
/**
* If the given key does not already exist in the table, inserts
* the key with initial sum of zero; in either case returning the
* adder associated with this key.
*
* @param key the key
* @return the adder associated with the key
*/
public LongAdder install(K key) {
return map.computeIfAbsent(key, createAdder);
}
/**
* Adds the given value to the sum associated with the given
* key. If the key does not already exist in the table, it is
* inserted.
*
* @param key the key
* @param x the value to add
*/
public void add(K key, long x) {
map.computeIfAbsent(key, createAdder).add(x);
}
/**
* Increments the sum associated with the given key. If the key
* does not already exist in the table, it is inserted.
*
* @param key the key
*/
public void increment(K key) { add(key, 1L); }
/**
* Decrements the sum associated with the given key. If the key
* does not already exist in the table, it is inserted.
*
* @param key the key
*/
public void decrement(K key) { add(key, -1L); }
/**
* Returns the sum associated with the given key, or zero if the
* key does not currently exist in the table.
*
* @param key the key
* @return the sum associated with the key, or zero if the key is
* not in the table
*/
public long sum(K key) {
LongAdder a = map.get(key);
return a == null ? 0L : a.sum();
}
/**
* Resets the sum associated with the given key to zero if the key
* exists in the table. This method does <em>NOT</em> add or
* remove the key from the table (see {@link #remove}).
*
* @param key the key
*/
public void reset(K key) {
LongAdder a = map.get(key);
if (a != null)
a.reset();
}
/**
* Resets the sum associated with the given key to zero if the key
* exists in the table. This method does <em>NOT</em> add or
* remove the key from the table (see {@link #remove}).
*
* @param key the key
* @return the previous sum, or zero if the key is not
* in the table
*/
public long sumThenReset(K key) {
LongAdder a = map.get(key);
return a == null ? 0L : a.sumThenReset();
}
/**
* Returns the sum totalled across all keys.
*
* @return the sum totalled across all keys
*/
public long sumAll() {
long sum = 0L;
for (LongAdder a : map.values())
sum += a.sum();
return sum;
}
/**
* Resets the sum associated with each key to zero.
*/
public void resetAll() {
for (LongAdder a : map.values())
a.reset();
}
/**
* Totals, then resets, the sums associated with all keys.
*
* @return the sum totalled across all keys
*/
public long sumThenResetAll() {
long sum = 0L;
for (LongAdder a : map.values())
sum += a.sumThenReset();
return sum;
}
/**
* Removes the given key from the table.
*
* @param key the key
*/
public void remove(K key) { map.remove(key); }
/**
* Removes all keys from the table.
*/
public void removeAll() { map.clear(); }
/**
* Returns the current set of keys.
*
* @return the current set of keys
*/
public Set<K> keySet() {
return map.keySet();
}
/**
* Returns the current set of key-value mappings.
*
* @return the current set of key-value mappings
*/
public Set<Map.Entry<K,LongAdder>> entrySet() {
return map.entrySet();
}
}
| 0true
|
src_main_java_jsr166e_LongAdderTable.java
|
934 |
public class OfferDiscountType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, OfferDiscountType> TYPES = new LinkedHashMap<String, OfferDiscountType>();
public static final OfferDiscountType PERCENT_OFF = new OfferDiscountType("PERCENT_OFF", "Percent Off");
public static final OfferDiscountType AMOUNT_OFF = new OfferDiscountType("AMOUNT_OFF", "Amount Off");
public static final OfferDiscountType FIX_PRICE = new OfferDiscountType("FIX_PRICE", "Fixed Price");
public static OfferDiscountType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public OfferDiscountType() {
//do nothing
}
public OfferDiscountType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
public void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
public String getType() {
return type;
}
public String getFriendlyType() {
return friendlyType;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OfferDiscountType other = (OfferDiscountType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_type_OfferDiscountType.java
|
2,830 |
private class SyncReplicaVersionTask implements Runnable {
@Override
public void run() {
if (node.isActive() && migrationActive.get()) {
final Address thisAddress = node.getThisAddress();
for (final InternalPartitionImpl partition : partitions) {
if (thisAddress.equals(partition.getOwnerOrNull())) {
for (int index = 1; index < InternalPartition.MAX_REPLICA_COUNT; index++) {
if (partition.getReplicaAddress(index) != null) {
SyncReplicaVersion op = new SyncReplicaVersion(index, null);
op.setService(InternalPartitionServiceImpl.this);
op.setNodeEngine(nodeEngine);
op.setResponseHandler(ResponseHandlerFactory
.createErrorLoggingResponseHandler(node.getLogger(SyncReplicaVersion.class)));
op.setPartitionId(partition.getPartitionId());
nodeEngine.getOperationService().executeOperation(op);
}
}
}
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_InternalPartitionServiceImpl.java
|
337 |
new Thread() {
public void run() {
try {
if (tempMap.tryLock("key1", 20, TimeUnit.SECONDS)) {
latch2.countDown();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
4,201 |
public class RateLimitingInputStream extends InputStream {
private final InputStream delegate;
private final RateLimiter rateLimiter;
private final Listener listener;
public interface Listener {
void onPause(long nanos);
}
public RateLimitingInputStream(InputStream delegate, RateLimiter rateLimiter, Listener listener) {
this.delegate = delegate;
this.rateLimiter = rateLimiter;
this.listener = listener;
}
@Override
public int read() throws IOException {
int b = delegate.read();
long pause = rateLimiter.pause(1);
if (pause > 0) {
listener.onPause(pause);
}
return b;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
int n = delegate.read(b, off, len);
if (n > 0) {
listener.onPause(rateLimiter.pause(n));
}
return n;
}
@Override
public long skip(long n) throws IOException {
return delegate.skip(n);
}
@Override
public int available() throws IOException {
return delegate.available();
}
@Override
public void close() throws IOException {
delegate.close();
}
@Override
public void mark(int readlimit) {
delegate.mark(readlimit);
}
@Override
public void reset() throws IOException {
delegate.reset();
}
@Override
public boolean markSupported() {
return delegate.markSupported();
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_snapshots_blobstore_RateLimitingInputStream.java
|
60 |
final Iterator<String> keysToMangle = Iterators.filter(configuration.getKeys(), new Predicate<String>() {
@Override
public boolean apply(String key) {
if (null == key)
return false;
return p.matcher(key).matches();
}
});
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_TitanFactory.java
|
1,591 |
public abstract class FieldMetadata implements Serializable {
private static final long serialVersionUID = 1L;
private String inheritedFromType;
private String[] availableToTypes;
private Boolean excluded;
private String friendlyName;
private String securityLevel;
private Integer order;
private String owningClassFriendlyName;
private String tab;
private Integer tabOrder;
//temporary fields
private Boolean childrenExcluded;
private String targetClass;
private String owningClass;
private String prefix;
private String fieldName;
private String showIfProperty;
private String currencyCodeField;
//Additional metadata not supported as first class
private Map<String, Object> additionalMetadata = new HashMap<String, Object>();
public String[] getAvailableToTypes() {
return availableToTypes;
}
public void setAvailableToTypes(String[] availableToTypes) {
Arrays.sort(availableToTypes);
this.availableToTypes = availableToTypes;
}
public String getInheritedFromType() {
return inheritedFromType;
}
public void setInheritedFromType(String inheritedFromType) {
this.inheritedFromType = inheritedFromType;
}
public Boolean getExcluded() {
return excluded;
}
public void setExcluded(Boolean excluded) {
this.excluded = excluded;
}
public Map<String, Object> getAdditionalMetadata() {
return additionalMetadata;
}
public void setAdditionalMetadata(Map<String, Object> additionalMetadata) {
this.additionalMetadata = additionalMetadata;
}
protected FieldMetadata populate(FieldMetadata metadata) {
metadata.inheritedFromType = inheritedFromType;
if (availableToTypes != null) {
metadata.availableToTypes = new String[availableToTypes.length];
System.arraycopy(availableToTypes, 0, metadata.availableToTypes, 0, availableToTypes.length);
}
metadata.excluded = excluded;
metadata.friendlyName = friendlyName;
metadata.owningClassFriendlyName = owningClassFriendlyName;
metadata.securityLevel = securityLevel;
metadata.order = order;
metadata.targetClass = targetClass;
metadata.owningClass = owningClass;
metadata.prefix = prefix;
metadata.childrenExcluded = childrenExcluded;
metadata.fieldName = fieldName;
metadata.showIfProperty = showIfProperty;
metadata.currencyCodeField = currencyCodeField;
for (Map.Entry<String, Object> entry : additionalMetadata.entrySet()) {
metadata.additionalMetadata.put(entry.getKey(), entry.getValue());
}
return metadata;
}
public String getShowIfProperty() {
return showIfProperty;
}
public void setShowIfProperty(String showIfProperty) {
this.showIfProperty = showIfProperty;
}
public String getCurrencyCodeField() {
return currencyCodeField;
}
public void setCurrencyCodeField(String currencyCodeField) {
this.currencyCodeField = currencyCodeField;
}
public String getFriendlyName() {
return friendlyName;
}
public void setFriendlyName(String friendlyName) {
this.friendlyName = friendlyName;
}
public String getSecurityLevel() {
return securityLevel;
}
public void setSecurityLevel(String securityLevel) {
this.securityLevel = securityLevel;
}
public Integer getOrder() {
return order;
}
public void setOrder(Integer order) {
this.order = order;
}
public String getTargetClass() {
return targetClass;
}
public void setTargetClass(String targetClass) {
this.targetClass = targetClass;
}
public String getFieldName() {
return fieldName;
}
public void setFieldName(String fieldName) {
this.fieldName = fieldName;
}
public String getOwningClassFriendlyName() {
return owningClassFriendlyName;
}
public void setOwningClassFriendlyName(String owningClassFriendlyName) {
this.owningClassFriendlyName = owningClassFriendlyName;
}
public String getOwningClass() {
return owningClass;
}
public void setOwningClass(String owningClass) {
this.owningClass = owningClass;
}
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
public Boolean getChildrenExcluded() {
return childrenExcluded;
}
public void setChildrenExcluded(Boolean childrenExcluded) {
this.childrenExcluded = childrenExcluded;
}
public String getTab() {
return tab;
}
public void setTab(String tab) {
this.tab = tab;
}
public Integer getTabOrder() {
return tabOrder;
}
public void setTabOrder(Integer tabOrder) {
this.tabOrder = tabOrder;
}
public abstract FieldMetadata cloneFieldMetadata();
public abstract void accept(MetadataVisitor visitor);
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof FieldMetadata)) return false;
FieldMetadata that = (FieldMetadata) o;
if (additionalMetadata != null ? !additionalMetadata.equals(that.additionalMetadata) : that
.additionalMetadata != null)
return false;
if (!Arrays.equals(availableToTypes, that.availableToTypes)) return false;
if (childrenExcluded != null ? !childrenExcluded.equals(that.childrenExcluded) : that.childrenExcluded != null)
return false;
if (currencyCodeField != null ? !currencyCodeField.equals(that.currencyCodeField) : that.currencyCodeField !=
null)
return false;
if (excluded != null ? !excluded.equals(that.excluded) : that.excluded != null) return false;
if (fieldName != null ? !fieldName.equals(that.fieldName) : that.fieldName != null) return false;
if (friendlyName != null ? !friendlyName.equals(that.friendlyName) : that.friendlyName != null) return false;
if (inheritedFromType != null ? !inheritedFromType.equals(that.inheritedFromType) : that.inheritedFromType !=
null)
return false;
if (order != null ? !order.equals(that.order) : that.order != null) return false;
if (owningClass != null ? !owningClass.equals(that.owningClass) : that.owningClass != null) return false;
if (owningClassFriendlyName != null ? !owningClassFriendlyName.equals(that.owningClassFriendlyName) : that
.owningClassFriendlyName != null)
return false;
if (prefix != null ? !prefix.equals(that.prefix) : that.prefix != null) return false;
if (securityLevel != null ? !securityLevel.equals(that.securityLevel) : that.securityLevel != null)
return false;
if (showIfProperty != null ? !showIfProperty.equals(that.showIfProperty) : that.showIfProperty != null)
return false;
if (tab != null ? !tab.equals(that.tab) : that.tab != null) return false;
if (tabOrder != null ? !tabOrder.equals(that.tabOrder) : that.tabOrder != null) return false;
if (targetClass != null ? !targetClass.equals(that.targetClass) : that.targetClass != null) return false;
return true;
}
@Override
public int hashCode() {
int result = inheritedFromType != null ? inheritedFromType.hashCode() : 0;
result = 31 * result + (availableToTypes != null ? Arrays.hashCode(availableToTypes) : 0);
result = 31 * result + (excluded != null ? excluded.hashCode() : 0);
result = 31 * result + (friendlyName != null ? friendlyName.hashCode() : 0);
result = 31 * result + (securityLevel != null ? securityLevel.hashCode() : 0);
result = 31 * result + (order != null ? order.hashCode() : 0);
result = 31 * result + (owningClassFriendlyName != null ? owningClassFriendlyName.hashCode() : 0);
result = 31 * result + (tab != null ? tab.hashCode() : 0);
result = 31 * result + (tabOrder != null ? tabOrder.hashCode() : 0);
result = 31 * result + (childrenExcluded != null ? childrenExcluded.hashCode() : 0);
result = 31 * result + (targetClass != null ? targetClass.hashCode() : 0);
result = 31 * result + (owningClass != null ? owningClass.hashCode() : 0);
result = 31 * result + (prefix != null ? prefix.hashCode() : 0);
result = 31 * result + (fieldName != null ? fieldName.hashCode() : 0);
result = 31 * result + (showIfProperty != null ? showIfProperty.hashCode() : 0);
result = 31 * result + (currencyCodeField != null ? currencyCodeField.hashCode() : 0);
result = 31 * result + (additionalMetadata != null ? additionalMetadata.hashCode() : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_FieldMetadata.java
|
751 |
public class MultiGetAction extends Action<MultiGetRequest, MultiGetResponse, MultiGetRequestBuilder> {
public static final MultiGetAction INSTANCE = new MultiGetAction();
public static final String NAME = "mget";
private MultiGetAction() {
super(NAME);
}
@Override
public MultiGetResponse newResponse() {
return new MultiGetResponse();
}
@Override
public MultiGetRequestBuilder newRequestBuilder(Client client) {
return new MultiGetRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_get_MultiGetAction.java
|
671 |
public class DeleteWarmerResponse extends AcknowledgedResponse {
DeleteWarmerResponse() {
super();
}
DeleteWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_delete_DeleteWarmerResponse.java
|
6,303 |
public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper {
private final ESLogger logger;
private final boolean failOnClose;
public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, ESLogger logger, boolean failOnClose) {
super(random, delegate);
this.logger = logger;
this.failOnClose = failOnClose;
}
@Override
public void close() throws IOException {
try {
super.close();
} catch (RuntimeException ex) {
if (failOnClose) {
throw ex;
}
// we catch the exception on close to properly close shards even if there are open files
// the test framework will call closeWithRuntimeException after the test exits to fail
// on unclosed files.
logger.debug("MockDirectoryWrapper#close() threw exception", ex);
}
}
public void closeWithRuntimeException() throws IOException {
super.close(); // force fail if open files etc. called in tear down of ElasticsearchIntegrationTest
}
}
| 1no label
|
src_test_java_org_elasticsearch_test_store_MockDirectoryHelper.java
|
3,208 |
constructors[VECTOR] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new VectorClock();
}
};
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_operation_ReplicatedMapDataSerializerHook.java
|
48 |
static final class CounterHashCode {
int code;
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
202 |
public class ExtendedCommonTermsQuery extends CommonTermsQuery {
public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency, boolean disableCoord) {
super(highFreqOccur, lowFreqOccur, maxTermFrequency, disableCoord);
}
public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency) {
super(highFreqOccur, lowFreqOccur, maxTermFrequency);
}
private String lowFreqMinNumShouldMatchSpec;
private String highFreqMinNumShouldMatchSpec;
@Override
protected int calcLowFreqMinimumNumberShouldMatch(int numOptional) {
return calcMinimumNumberShouldMatch(lowFreqMinNumShouldMatchSpec, numOptional);
}
protected int calcMinimumNumberShouldMatch(String spec, int numOptional) {
if (spec == null) {
return 0;
}
return Queries.calculateMinShouldMatch(numOptional, spec);
}
@Override
protected int calcHighFreqMinimumNumberShouldMatch(int numOptional) {
return calcMinimumNumberShouldMatch(highFreqMinNumShouldMatchSpec, numOptional);
}
public void setHighFreqMinimumNumberShouldMatch(String spec) {
this.highFreqMinNumShouldMatchSpec = spec;
}
public String getHighFreqMinimumNumberShouldMatchSpec() {
return highFreqMinNumShouldMatchSpec;
}
public void setLowFreqMinimumNumberShouldMatch(String spec) {
this.lowFreqMinNumShouldMatchSpec = spec;
}
public String getLowFreqMinimumNumberShouldMatchSpec() {
return lowFreqMinNumShouldMatchSpec;
}
}
| 1no label
|
src_main_java_org_apache_lucene_queries_ExtendedCommonTermsQuery.java
|
43 |
public class MultiPaxosServerFactory
implements ProtocolServerFactory
{
private final ClusterConfiguration initialConfig;
private final Logging logging;
public MultiPaxosServerFactory( ClusterConfiguration initialConfig, Logging logging )
{
this.initialConfig = initialConfig;
this.logging = logging;
}
@Override
public ProtocolServer newProtocolServer( InstanceId me, TimeoutStrategy timeoutStrategy, MessageSource input,
MessageSender output, AcceptorInstanceStore acceptorInstanceStore,
ElectionCredentialsProvider electionCredentialsProvider,
Executor stateMachineExecutor,
ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory )
{
DelayedDirectExecutor executor = new DelayedDirectExecutor();
// Create state machines
Timeouts timeouts = new Timeouts( timeoutStrategy );
final MultiPaxosContext context = new MultiPaxosContext( me,
Iterables.<ElectionRole,ElectionRole>iterable( new ElectionRole(ClusterConfiguration.COORDINATOR )),
new ClusterConfiguration( initialConfig.getName(), logging.getMessagesLog( ClusterConfiguration.class ),
initialConfig.getMemberURIs() ),
executor, logging, objectInputStreamFactory, objectOutputStreamFactory, acceptorInstanceStore, timeouts,
electionCredentialsProvider);
SnapshotContext snapshotContext = new SnapshotContext( context.getClusterContext(),context.getLearnerContext());
return newProtocolServer( me, input, output, stateMachineExecutor, executor, timeouts,
context, snapshotContext );
}
public ProtocolServer newProtocolServer( InstanceId me, MessageSource input, MessageSender output,
Executor stateMachineExecutor, DelayedDirectExecutor executor, Timeouts timeouts,
MultiPaxosContext context, SnapshotContext snapshotContext )
{
return constructSupportingInfrastructureFor( me, input, output, executor, timeouts, stateMachineExecutor, context, new StateMachine[]
{
new StateMachine( context.getAtomicBroadcastContext(), AtomicBroadcastMessage.class,
AtomicBroadcastState.start, logging ),
new StateMachine( context.getAcceptorContext(), AcceptorMessage.class, AcceptorState.start, logging ),
new StateMachine( context.getProposerContext(), ProposerMessage.class, ProposerState.start, logging ),
new StateMachine( context.getLearnerContext(), LearnerMessage.class, LearnerState.start, logging ),
new StateMachine( context.getHeartbeatContext(), HeartbeatMessage.class, HeartbeatState.start,
logging ),
new StateMachine( context.getElectionContext(), ElectionMessage.class, ElectionState.start, logging ),
new StateMachine( snapshotContext, SnapshotMessage.class, SnapshotState.start, logging ),
new StateMachine( context.getClusterContext(), ClusterMessage.class, ClusterState.start, logging )
});
}
/**
* Sets up the supporting infrastructure and communication hooks for our state machines. This is here to support
* an external requirement for assembling protocol servers given an existing set of state machines (used to prove
* correctness).
* */
public ProtocolServer constructSupportingInfrastructureFor( InstanceId me, MessageSource input,
MessageSender output, DelayedDirectExecutor executor, Timeouts timeouts,
Executor stateMachineExecutor, final MultiPaxosContext context,
StateMachine[] machines )
{
StateMachines stateMachines = new StateMachines( input, output, timeouts, executor, stateMachineExecutor, me );
for ( StateMachine machine : machines )
{
stateMachines.addStateMachine( machine );
}
final ProtocolServer server = new ProtocolServer( me, stateMachines, logging );
server.addBindingListener( new BindingListener()
{
@Override
public void listeningAt( URI me )
{
context.getClusterContext().setBoundAt( me );
}
} );
stateMachines.addMessageProcessor( new HeartbeatRefreshProcessor( stateMachines.getOutgoing
(), context.getClusterContext() ) );
input.addMessageProcessor( new HeartbeatIAmAliveProcessor( stateMachines.getOutgoing(),
context.getClusterContext() ) );
server.newClient( Cluster.class ).addClusterListener( new HeartbeatJoinListener( stateMachines
.getOutgoing() ) );
context.getHeartbeatContext().addHeartbeatListener( new HeartbeatReelectionListener(
server.newClient( Election.class ), logging.getMessagesLog( ClusterLeaveReelectionListener.class ) ) );
context.getClusterContext().addClusterListener( new ClusterLeaveReelectionListener( server.newClient(
Election.class ),
logging.getMessagesLog( ClusterLeaveReelectionListener.class ) ) );
StateMachineRules rules = new StateMachineRules( stateMachines.getOutgoing() )
.rule( ClusterState.start, ClusterMessage.create, ClusterState.entered,
internal( AtomicBroadcastMessage.entered ),
internal( ProposerMessage.join ),
internal( AcceptorMessage.join ),
internal( LearnerMessage.join ),
internal( HeartbeatMessage.join ),
internal( ElectionMessage.created ),
internal( SnapshotMessage.join ) )
.rule( ClusterState.discovery, ClusterMessage.configurationResponse, ClusterState.joining,
internal( AcceptorMessage.join ),
internal( LearnerMessage.join ),
internal( AtomicBroadcastMessage.join ) )
.rule( ClusterState.discovery, ClusterMessage.configurationResponse, ClusterState.entered,
internal( AtomicBroadcastMessage.entered ),
internal( ProposerMessage.join ),
internal( AcceptorMessage.join ),
internal( LearnerMessage.join ),
internal( HeartbeatMessage.join ),
internal( ElectionMessage.join ),
internal( SnapshotMessage.join ) )
.rule( ClusterState.joining, ClusterMessage.configurationChanged, ClusterState.entered,
internal( AtomicBroadcastMessage.entered ),
internal( ProposerMessage.join ),
internal( AcceptorMessage.join ),
internal( LearnerMessage.join ),
internal( HeartbeatMessage.join ),
internal( ElectionMessage.join ),
internal( SnapshotMessage.join ) )
.rule( ClusterState.joining, ClusterMessage.joinFailure, ClusterState.start,
internal( AtomicBroadcastMessage.leave ),
internal( AcceptorMessage.leave ),
internal( LearnerMessage.leave ),
internal( ProposerMessage.leave ) )
.rule( ClusterState.entered, ClusterMessage.leave, ClusterState.start,
internal( AtomicBroadcastMessage.leave ),
internal( AcceptorMessage.leave ),
internal( LearnerMessage.leave ),
internal( HeartbeatMessage.leave ),
internal( SnapshotMessage.leave ),
internal( ElectionMessage.leave ),
internal( ProposerMessage.leave ) )
.rule( ClusterState.entered, ClusterMessage.leave, ClusterState.start,
internal( AtomicBroadcastMessage.leave ),
internal( AcceptorMessage.leave ),
internal( LearnerMessage.leave ),
internal( HeartbeatMessage.leave ),
internal( ElectionMessage.leave ),
internal( SnapshotMessage.leave ),
internal( ProposerMessage.leave ) )
.rule( ClusterState.leaving, ClusterMessage.configurationChanged, ClusterState.start,
internal( AtomicBroadcastMessage.leave ),
internal( AcceptorMessage.leave ),
internal( LearnerMessage.leave ),
internal( HeartbeatMessage.leave ),
internal( ElectionMessage.leave ),
internal( SnapshotMessage.leave ),
internal( ProposerMessage.leave ) )
.rule( ClusterState.leaving, ClusterMessage.leaveTimedout, ClusterState.start,
internal( AtomicBroadcastMessage.leave ),
internal( AcceptorMessage.leave ),
internal( LearnerMessage.leave ),
internal( HeartbeatMessage.leave ),
internal( ElectionMessage.leave ),
internal( SnapshotMessage.leave ),
internal( ProposerMessage.leave ) );
stateMachines.addStateTransitionListener( rules );
return server;
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_MultiPaxosServerFactory.java
|
232 |
assertTrueEventually(new AssertTask() {
public void run() throws Exception {
assertTrue(map.containsKey(member1.getUuid()));
assertTrue(map.containsKey(member3.getUuid()));
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceExecuteTest.java
|
1,313 |
public class OLocalPaginatedStorage extends OStorageLocalAbstract {
private static final int ONE_KB = 1024;
private final int DELETE_MAX_RETRIES;
private final int DELETE_WAIT_TIME;
private final Map<String, OCluster> clusterMap = new LinkedHashMap<String, OCluster>();
private OCluster[] clusters = new OCluster[0];
private String storagePath;
private final OStorageVariableParser variableParser;
private int defaultClusterId = -1;
private static String[] ALL_FILE_EXTENSIONS = { ".ocf", ".pls", ".pcl", ".oda", ".odh", ".otx",
".ocs", ".oef", ".oem", ".oet", OWriteAheadLog.WAL_SEGMENT_EXTENSION, OWriteAheadLog.MASTER_RECORD_EXTENSION,
OLocalHashTableIndexEngine.BUCKET_FILE_EXTENSION, OLocalHashTableIndexEngine.METADATA_FILE_EXTENSION,
OLocalHashTableIndexEngine.TREE_FILE_EXTENSION, OClusterPositionMap.DEF_EXTENSION, OSBTreeIndexEngine.DATA_FILE_EXTENSION,
OWOWCache.NAME_ID_MAP_EXTENSION, OIndexRIDContainer.INDEX_FILE_EXTENSION };
private OModificationLock modificationLock = new OModificationLock();
private ScheduledExecutorService fuzzyCheckpointExecutor;
private ExecutorService checkpointExecutor;
private volatile boolean wereDataRestoredAfterOpen = false;
private boolean makeFullCheckPointAfterClusterCreate = OGlobalConfiguration.STORAGE_MAKE_FULL_CHECKPOINT_AFTER_CLUSTER_CREATE
.getValueAsBoolean();
public OLocalPaginatedStorage(final String name, final String filePath, final String mode) throws IOException {
super(name, filePath, mode);
File f = new File(url);
if (f.exists() || !exists(f.getParent())) {
// ALREADY EXISTS OR NOT LEGACY
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getPath()));
} else {
// LEGACY DB
storagePath = OSystemVariableResolver.resolveSystemVariables(OFileUtils.getPath(new File(url).getParent()));
}
storagePath = OIOUtils.getPathFromDatabaseName(storagePath);
variableParser = new OStorageVariableParser(storagePath);
configuration = new OStorageConfigurationSegment(this);
DELETE_MAX_RETRIES = OGlobalConfiguration.FILE_MMAP_FORCE_RETRY.getValueAsInteger();
DELETE_WAIT_TIME = OGlobalConfiguration.FILE_MMAP_FORCE_DELAY.getValueAsInteger();
}
private void initWal() throws IOException {
if (OGlobalConfiguration.USE_WAL.getValueAsBoolean()) {
fuzzyCheckpointExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
return thread;
}
});
checkpointExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setDaemon(true);
return thread;
}
});
writeAheadLog = new OWriteAheadLog(this);
final int fuzzyCheckpointDelay = OGlobalConfiguration.WAL_FUZZY_CHECKPOINT_INTERVAL.getValueAsInteger();
fuzzyCheckpointExecutor.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
makeFuzzyCheckpoint();
} catch (Throwable e) {
OLogManager.instance().error(this, "Error during background fuzzy checkpoint creation for storage " + name, e);
}
}
}, fuzzyCheckpointDelay, fuzzyCheckpointDelay, TimeUnit.SECONDS);
} else
writeAheadLog = null;
long diskCacheSize = OGlobalConfiguration.DISK_CACHE_SIZE.getValueAsLong() * 1024 * 1024;
long writeCacheSize = (long) Math.floor((((double) OGlobalConfiguration.DISK_WRITE_CACHE_PART.getValueAsInteger()) / 100.0)
* diskCacheSize);
long readCacheSize = diskCacheSize - writeCacheSize;
diskCache = new OReadWriteDiskCache(name, readCacheSize, writeCacheSize,
OGlobalConfiguration.DISK_CACHE_PAGE_SIZE.getValueAsInteger() * ONE_KB,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_TTL.getValueAsLong() * 1000,
OGlobalConfiguration.DISK_WRITE_CACHE_PAGE_FLUSH_INTERVAL.getValueAsInteger(), this, writeAheadLog, false, true);
}
public void open(final String iUserName, final String iUserPassword, final Map<String, Object> iProperties) {
lock.acquireExclusiveLock();
try {
addUser();
if (status != STATUS.CLOSED)
// ALREADY OPENED: THIS IS THE CASE WHEN A STORAGE INSTANCE IS
// REUSED
return;
if (!exists())
throw new OStorageException("Cannot open the storage '" + name + "' because it does not exist in path: " + url);
initWal();
status = STATUS.OPEN;
// OPEN BASIC SEGMENTS
int pos;
addDefaultClusters();
// REGISTER CLUSTER
for (int i = 0; i < configuration.clusters.size(); ++i) {
final OStorageClusterConfiguration clusterConfig = configuration.clusters.get(i);
if (clusterConfig != null) {
pos = createClusterFromConfig(clusterConfig);
try {
if (pos == -1) {
clusters[i].open();
} else {
if (clusterConfig.getName().equals(CLUSTER_DEFAULT_NAME))
defaultClusterId = pos;
clusters[pos].open();
}
} catch (FileNotFoundException e) {
OLogManager.instance().warn(
this,
"Error on loading cluster '" + clusters[i].getName() + "' (" + i
+ "): file not found. It will be excluded from current database '" + getName() + "'.");
clusterMap.remove(clusters[i].getName());
clusters[i] = null;
}
} else {
clusters = Arrays.copyOf(clusters, clusters.length + 1);
clusters[i] = null;
}
}
restoreIfNeeded();
} catch (Exception e) {
close(true);
throw new OStorageException("Cannot open local storage '" + url + "' with mode=" + mode, e);
} finally {
lock.releaseExclusiveLock();
}
}
private void restoreIfNeeded() throws IOException {
boolean wasSoftlyClosed = true;
for (OCluster cluster : clusters)
if (cluster != null && !cluster.wasSoftlyClosed())
wasSoftlyClosed = false;
if (!wasSoftlyClosed) {
OLogManager.instance().warn(this, "Storage " + name + " was not closed properly. Will try to restore from write ahead log.");
try {
restoreFromWAL();
makeFullCheckpoint();
} catch (Exception e) {
OLogManager.instance().error(this, "Exception during storage data restore.", e);
} finally {
OLogManager.instance().info(this, "Storage data restore was completed");
}
}
}
private void restoreFromWAL() throws IOException {
if (writeAheadLog == null) {
OLogManager.instance().error(this, "Restore is not possible because write ahead logging is switched off.");
return;
}
if (writeAheadLog.begin() == null) {
OLogManager.instance().error(this, "Restore is not possible because write ahead log is empty.");
return;
}
OLogManager.instance().info(this, "Try to find last checkpoint.");
OLogSequenceNumber lastCheckPoint = writeAheadLog.getLastCheckpoint();
if (lastCheckPoint == null) {
OLogManager.instance().info(this, "Checkpoints are absent will restore from beginning.");
restoreFromBegging();
return;
}
OWALRecord checkPointRecord = writeAheadLog.read(lastCheckPoint);
if (checkPointRecord == null) {
OLogManager.instance().info(this, "Checkpoints are absent will restore from beginning.");
restoreFromBegging();
return;
}
if (checkPointRecord instanceof OFuzzyCheckpointStartRecord) {
OLogManager.instance().info(this, "Found checkpoint is fuzzy checkpoint.");
boolean fuzzyCheckPointIsComplete = checkFuzzyCheckPointIsComplete(lastCheckPoint);
if (!fuzzyCheckPointIsComplete) {
OLogManager.instance().warn(this, "Fuzzy checkpoint is not complete.");
OLogSequenceNumber previousCheckpoint = ((OFuzzyCheckpointStartRecord) checkPointRecord).getPreviousCheckpoint();
checkPointRecord = null;
if (previousCheckpoint != null)
checkPointRecord = writeAheadLog.read(previousCheckpoint);
if (checkPointRecord != null) {
OLogManager.instance().warn(this, "Will restore from previous checkpoint.");
restoreFromCheckPoint((OAbstractCheckPointStartRecord) checkPointRecord);
} else {
OLogManager.instance().warn(this, "Will restore from beginning.");
restoreFromBegging();
}
} else
restoreFromCheckPoint((OAbstractCheckPointStartRecord) checkPointRecord);
return;
}
if (checkPointRecord instanceof OFullCheckpointStartRecord) {
OLogManager.instance().info(this, "Found checkpoint is full checkpoint.");
boolean fullCheckPointIsComplete = checkFullCheckPointIsComplete(lastCheckPoint);
if (!fullCheckPointIsComplete) {
OLogManager.instance().warn(this, "Full checkpoint is not complete.");
OLogSequenceNumber previousCheckpoint = ((OFullCheckpointStartRecord) checkPointRecord).getPreviousCheckpoint();
checkPointRecord = null;
if (previousCheckpoint != null)
checkPointRecord = writeAheadLog.read(previousCheckpoint);
if (checkPointRecord != null) {
OLogManager.instance().warn(this, "Will restore from previous checkpoint.");
} else {
OLogManager.instance().warn(this, "Will restore from beginning.");
restoreFromBegging();
}
} else
restoreFromCheckPoint((OAbstractCheckPointStartRecord) checkPointRecord);
return;
}
throw new OStorageException("Unknown checkpoint record type " + checkPointRecord.getClass().getName());
}
private boolean checkFullCheckPointIsComplete(OLogSequenceNumber lastCheckPoint) throws IOException {
OLogSequenceNumber lsn = writeAheadLog.next(lastCheckPoint);
while (lsn != null) {
OWALRecord walRecord = writeAheadLog.read(lsn);
if (walRecord instanceof OCheckpointEndRecord)
return true;
lsn = writeAheadLog.next(lsn);
}
return false;
}
private boolean checkFuzzyCheckPointIsComplete(OLogSequenceNumber lastCheckPoint) throws IOException {
OLogSequenceNumber lsn = writeAheadLog.next(lastCheckPoint);
while (lsn != null) {
OWALRecord walRecord = writeAheadLog.read(lsn);
if (walRecord instanceof OFuzzyCheckpointEndRecord)
return true;
lsn = writeAheadLog.next(lsn);
}
return false;
}
private void restoreFromCheckPoint(OAbstractCheckPointStartRecord checkPointRecord) throws IOException {
if (checkPointRecord instanceof OFuzzyCheckpointStartRecord) {
restoreFromFuzzyCheckPoint((OFuzzyCheckpointStartRecord) checkPointRecord);
return;
}
if (checkPointRecord instanceof OFullCheckpointStartRecord) {
restoreFromFullCheckPoint((OFullCheckpointStartRecord) checkPointRecord);
return;
}
throw new OStorageException("Unknown checkpoint record type " + checkPointRecord.getClass().getName());
}
private void restoreFromFullCheckPoint(OFullCheckpointStartRecord checkPointRecord) throws IOException {
OLogManager.instance().info(this, "Data restore procedure from full checkpoint is started. Restore is performed from LSN %s",
checkPointRecord.getLsn());
final OLogSequenceNumber lsn = writeAheadLog.next(checkPointRecord.getLsn());
restoreFrom(lsn);
}
private void restoreFromFuzzyCheckPoint(OFuzzyCheckpointStartRecord checkPointRecord) throws IOException {
OLogManager.instance().info(this, "Data restore procedure from fuzzy checkpoint is started.");
OLogSequenceNumber dirtyPagesLSN = writeAheadLog.next(checkPointRecord.getLsn());
ODirtyPagesRecord dirtyPagesRecord = (ODirtyPagesRecord) writeAheadLog.read(dirtyPagesLSN);
OLogSequenceNumber startLSN;
Set<ODirtyPage> dirtyPages = dirtyPagesRecord.getDirtyPages();
if (dirtyPages.isEmpty()) {
startLSN = dirtyPagesLSN;
} else {
ODirtyPage[] pages = dirtyPages.toArray(new ODirtyPage[dirtyPages.size()]);
Arrays.sort(pages, new Comparator<ODirtyPage>() {
@Override
public int compare(ODirtyPage pageOne, ODirtyPage pageTwo) {
return pageOne.getLsn().compareTo(pageTwo.getLsn());
}
});
startLSN = pages[0].getLsn();
}
if (startLSN.compareTo(writeAheadLog.begin()) < 0)
startLSN = writeAheadLog.begin();
restoreFrom(startLSN);
}
private void restoreFromBegging() throws IOException {
OLogManager.instance().info(this, "Date restore procedure is started.");
OLogSequenceNumber lsn = writeAheadLog.begin();
restoreFrom(lsn);
}
private void restoreFrom(OLogSequenceNumber lsn) throws IOException {
wereDataRestoredAfterOpen = true;
Map<OOperationUnitId, List<OWALRecord>> operationUnits = new HashMap<OOperationUnitId, List<OWALRecord>>();
while (lsn != null) {
OWALRecord walRecord = writeAheadLog.read(lsn);
if (walRecord instanceof OAtomicUnitStartRecord) {
List<OWALRecord> operationList = new ArrayList<OWALRecord>();
operationUnits.put(((OAtomicUnitStartRecord) walRecord).getOperationUnitId(), operationList);
operationList.add(walRecord);
} else if (walRecord instanceof OOperationUnitRecord) {
OOperationUnitRecord operationUnitRecord = (OOperationUnitRecord) walRecord;
OOperationUnitId unitId = operationUnitRecord.getOperationUnitId();
List<OWALRecord> records = operationUnits.get(unitId);
assert records != null;
records.add(walRecord);
if (operationUnitRecord instanceof OAtomicUnitEndRecord) {
OAtomicUnitEndRecord atomicUnitEndRecord = (OAtomicUnitEndRecord) walRecord;
if (atomicUnitEndRecord.isRollback())
undoOperation(records);
else
redoOperation(records);
operationUnits.remove(unitId);
}
} else
OLogManager.instance().warn(this, "Record %s will be skipped during data restore.", walRecord);
lsn = writeAheadLog.next(lsn);
}
rollbackAllUnfinishedWALOperations(operationUnits);
}
private void redoOperation(List<OWALRecord> records) throws IOException {
for (int i = 0; i < records.size(); i++) {
OWALRecord record = records.get(i);
if (checkFirstAtomicUnitRecord(i, record))
continue;
if (checkLastAtomicUnitRecord(i, record, records.size()))
continue;
if (record instanceof OUpdatePageRecord) {
final OUpdatePageRecord updatePageRecord = (OUpdatePageRecord) record;
final long fileId = updatePageRecord.getFileId();
final long pageIndex = updatePageRecord.getPageIndex();
if (!diskCache.isOpen(fileId))
diskCache.openFile(fileId);
final OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, true);
final OCachePointer cachePointer = cacheEntry.getCachePointer();
cachePointer.acquireExclusiveLock();
try {
ODurablePage durablePage = new ODurablePage(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE);
durablePage.restoreChanges(updatePageRecord.getChanges());
durablePage.setLsn(updatePageRecord.getLsn());
cacheEntry.markDirty();
} finally {
cachePointer.releaseExclusiveLock();
diskCache.release(cacheEntry);
}
} else {
OLogManager.instance().error(this, "Invalid WAL record type was passed %s. Given record will be skipped.",
record.getClass());
assert false : "Invalid WAL record type was passed " + record.getClass().getName();
}
}
}
private void rollbackAllUnfinishedWALOperations(Map<OOperationUnitId, List<OWALRecord>> operationUnits) throws IOException {
for (List<OWALRecord> operationUnit : operationUnits.values()) {
if (operationUnit.isEmpty())
continue;
final OAtomicUnitStartRecord atomicUnitStartRecord = (OAtomicUnitStartRecord) operationUnit.get(0);
if (!atomicUnitStartRecord.isRollbackSupported())
continue;
final OAtomicUnitEndRecord atomicUnitEndRecord = new OAtomicUnitEndRecord(atomicUnitStartRecord.getOperationUnitId(), true);
writeAheadLog.log(atomicUnitEndRecord);
operationUnit.add(atomicUnitEndRecord);
undoOperation(operationUnit);
}
}
public boolean wereDataRestoredAfterOpen() {
return wereDataRestoredAfterOpen;
}
public void create(final Map<String, Object> iProperties) {
lock.acquireExclusiveLock();
try {
if (status != STATUS.CLOSED)
throw new OStorageException("Cannot create new storage '" + name + "' because it is not closed");
addUser();
final File storageFolder = new File(storagePath);
if (!storageFolder.exists())
storageFolder.mkdirs();
if (exists())
throw new OStorageException("Cannot create new storage '" + name + "' because it already exists");
initWal();
status = STATUS.OPEN;
// ADD THE METADATA CLUSTER TO STORE INTERNAL STUFF
doAddCluster(OMetadataDefault.CLUSTER_INTERNAL_NAME, null, false, null);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF
// INDEXING
doAddCluster(OMetadataDefault.CLUSTER_INDEX_NAME, null, false, null);
// ADD THE INDEX CLUSTER TO STORE, BY DEFAULT, ALL THE RECORDS OF
// INDEXING
doAddCluster(OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, false, null);
// ADD THE DEFAULT CLUSTER
defaultClusterId = doAddCluster(CLUSTER_DEFAULT_NAME, null, false, null);
configuration.create();
if (OGlobalConfiguration.STORAGE_MAKE_FULL_CHECKPOINT_AFTER_CREATE.getValueAsBoolean())
makeFullCheckpoint();
} catch (OStorageException e) {
close();
throw e;
} catch (IOException e) {
close();
throw new OStorageException("Error on creation of storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
}
}
public void reload() {
}
public boolean exists() {
return exists(storagePath);
}
private boolean exists(String path) {
return new File(path + "/" + OMetadataDefault.CLUSTER_INTERNAL_NAME + OPaginatedCluster.DEF_EXTENSION).exists();
}
@Override
public void close(final boolean force) {
doClose(force, true);
}
private void doClose(boolean force, boolean flush) {
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (!checkForClose(force))
return;
status = STATUS.CLOSING;
makeFullCheckpoint();
if (writeAheadLog != null) {
fuzzyCheckpointExecutor.shutdown();
if (!fuzzyCheckpointExecutor.awaitTermination(
OGlobalConfiguration.WAL_FUZZY_CHECKPOINT_SHUTDOWN_TIMEOUT.getValueAsInteger(), TimeUnit.SECONDS))
throw new OStorageException("Can not terminate fuzzy checkpoint task");
checkpointExecutor.shutdown();
if (!checkpointExecutor.awaitTermination(OGlobalConfiguration.WAL_FULL_CHECKPOINT_SHUTDOWN_TIMEOUT.getValueAsInteger(),
TimeUnit.SECONDS))
throw new OStorageException("Can not terminate full checkpoint task");
}
for (OCluster cluster : clusters)
if (cluster != null)
cluster.close(flush);
clusters = new OCluster[0];
clusterMap.clear();
if (configuration != null)
configuration.close();
level2Cache.shutdown();
super.close(force);
diskCache.close();
if (writeAheadLog != null)
writeAheadLog.delete();
Orient.instance().unregisterStorage(this);
status = STATUS.CLOSED;
} catch (InterruptedException ie) {
OLogManager.instance().error(this, "Error on closing of storage '" + name, ie, OStorageException.class);
Thread.interrupted();
} catch (IOException e) {
OLogManager.instance().error(this, "Error on closing of storage '" + name, e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".close", "Close a database", timer, "db.*.close");
}
}
public void delete() {
// CLOSE THE DATABASE BY REMOVING THE CURRENT USER
if (status != STATUS.CLOSED) {
if (getUsers() > 0) {
while (removeUser() > 0)
;
}
}
doClose(true, false);
try {
Orient.instance().unregisterStorage(this);
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot unregister storage", e);
}
final long timer = Orient.instance().getProfiler().startChrono();
// GET REAL DIRECTORY
File dbDir = new File(OIOUtils.getPathFromDatabaseName(OSystemVariableResolver.resolveSystemVariables(url)));
if (!dbDir.exists() || !dbDir.isDirectory())
dbDir = dbDir.getParentFile();
lock.acquireExclusiveLock();
try {
if (writeAheadLog != null)
writeAheadLog.delete();
if (diskCache != null)
diskCache.delete();
// RETRIES
for (int i = 0; i < DELETE_MAX_RETRIES; ++i) {
if (dbDir.exists() && dbDir.isDirectory()) {
int notDeletedFiles = 0;
// TRY TO DELETE ALL THE FILES
for (File f : dbDir.listFiles()) {
// DELETE ONLY THE SUPPORTED FILES
for (String ext : ALL_FILE_EXTENSIONS)
if (f.getPath().endsWith(ext)) {
if (!f.delete()) {
notDeletedFiles++;
}
break;
}
}
if (notDeletedFiles == 0) {
// TRY TO DELETE ALSO THE DIRECTORY IF IT'S EMPTY
dbDir.delete();
return;
}
} else
return;
OLogManager
.instance()
.debug(
this,
"Cannot delete database files because they are still locked by the OrientDB process: waiting %d ms and retrying %d/%d...",
DELETE_WAIT_TIME, i, DELETE_MAX_RETRIES);
// FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS
OMemoryWatchDog.freeMemoryForResourceCleanup(DELETE_WAIT_TIME);
}
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ". Database files seem locked");
} catch (IOException e) {
throw new OStorageException("Cannot delete database '" + name + "' located in: " + dbDir + ".", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".drop", "Drop a database", timer, "db.*.drop");
}
}
public boolean check(final boolean verbose, final OCommandOutputListener listener) {
lock.acquireExclusiveLock();
try {
final long start = System.currentTimeMillis();
OPageDataVerificationError[] pageErrors = diskCache.checkStoredPages(verbose ? listener : null);
listener.onMessage("Check of storage completed in " + (System.currentTimeMillis() - start) + "ms. "
+ (pageErrors.length > 0 ? pageErrors.length + " with errors." : " without errors."));
return pageErrors.length == 0;
} finally {
lock.releaseExclusiveLock();
}
}
public ODataLocal getDataSegmentById(final int dataSegmentId) {
OLogManager.instance().error(
this,
"getDataSegmentById: Local paginated storage does not support data segments. "
+ "null will be returned for data segment %d.", dataSegmentId);
return null;
}
public int getDataSegmentIdByName(final String dataSegmentName) {
OLogManager.instance().error(
this,
"getDataSegmentIdByName: Local paginated storage does not support data segments. "
+ "-1 will be returned for data segment %s.", dataSegmentName);
return -1;
}
/**
* Add a new data segment in the default segment directory and with filename equals to the cluster name.
*/
public int addDataSegment(final String iDataSegmentName) {
return addDataSegment(iDataSegmentName, null);
}
public void enableFullCheckPointAfterClusterCreate() {
checkOpeness();
lock.acquireExclusiveLock();
try {
makeFullCheckPointAfterClusterCreate = true;
} finally {
lock.releaseExclusiveLock();
}
}
public void disableFullCheckPointAfterClusterCreate() {
checkOpeness();
lock.acquireExclusiveLock();
try {
makeFullCheckPointAfterClusterCreate = false;
} finally {
lock.releaseExclusiveLock();
}
}
public boolean isMakeFullCheckPointAfterClusterCreate() {
checkOpeness();
lock.acquireSharedLock();
try {
return makeFullCheckPointAfterClusterCreate;
} finally {
lock.releaseSharedLock();
}
}
public int addDataSegment(String segmentName, final String directory) {
OLogManager.instance().error(
this,
"addDataSegment: Local paginated storage does not support data"
+ " segments, segment %s will not be added in directory %s.", segmentName, directory);
return -1;
}
public int addCluster(final String clusterType, String clusterName, final String location, final String dataSegmentName,
boolean forceListBased, final Object... parameters) {
checkOpeness();
lock.acquireExclusiveLock();
try {
return doAddCluster(clusterName, location, true, parameters);
} catch (Exception e) {
OLogManager.instance().exception("Error in creation of new cluster '" + clusterName + "' of type: " + clusterType, e,
OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return -1;
}
private int doAddCluster(String clusterName, String location, boolean fullCheckPoint, Object[] parameters) throws IOException {
// FIND THE FIRST AVAILABLE CLUSTER ID
int clusterPos = clusters.length;
for (int i = 0; i < clusters.length; ++i) {
if (clusters[i] == null) {
clusterPos = i;
break;
}
}
return addClusterInternal(clusterName, clusterPos, location, fullCheckPoint, parameters);
}
public int addCluster(String clusterType, String clusterName, int requestedId, String location, String dataSegmentName,
boolean forceListBased, Object... parameters) {
lock.acquireExclusiveLock();
try {
if (requestedId < 0) {
throw new OConfigurationException("Cluster id must be positive!");
}
if (requestedId < clusters.length && clusters[requestedId] != null) {
throw new OConfigurationException("Requested cluster ID [" + requestedId + "] is occupied by cluster with name ["
+ clusters[requestedId].getName() + "]");
}
return addClusterInternal(clusterName, requestedId, location, true, parameters);
} catch (Exception e) {
OLogManager.instance().exception("Error in creation of new cluster '" + clusterName + "' of type: " + clusterType, e,
OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return -1;
}
private int addClusterInternal(String clusterName, int clusterPos, String location, boolean fullCheckPoint, Object... parameters)
throws IOException {
final OCluster cluster;
if (clusterName != null) {
clusterName = clusterName.toLowerCase();
cluster = OPaginatedClusterFactory.INSTANCE.createCluster(configuration.version);
cluster.configure(this, clusterPos, clusterName, location, -1, parameters);
if (clusterName.equals(OMVRBTreeRIDProvider.PERSISTENT_CLASS_NAME.toLowerCase())) {
cluster.set(OCluster.ATTRIBUTES.USE_WAL, false);
cluster.set(OCluster.ATTRIBUTES.RECORD_GROW_FACTOR, 5);
cluster.set(OCluster.ATTRIBUTES.RECORD_OVERFLOW_GROW_FACTOR, 2);
}
} else {
cluster = null;
}
final int createdClusterId = registerCluster(cluster);
if (cluster != null) {
if (!cluster.exists()) {
cluster.create(-1);
if (makeFullCheckPointAfterClusterCreate && fullCheckPoint)
makeFullCheckpoint();
} else {
cluster.open();
}
configuration.update();
}
return createdClusterId;
}
public boolean dropCluster(final int iClusterId, final boolean iTruncate) {
lock.acquireExclusiveLock();
try {
if (iClusterId < 0 || iClusterId >= clusters.length)
throw new IllegalArgumentException("Cluster id '" + iClusterId + "' is outside the of range of configured clusters (0-"
+ (clusters.length - 1) + ") in database '" + name + "'");
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return false;
getLevel2Cache().freeCluster(iClusterId);
if (iTruncate)
cluster.truncate();
cluster.delete();
clusterMap.remove(cluster.getName());
clusters[iClusterId] = null;
// UPDATE CONFIGURATION
configuration.dropCluster(iClusterId);
makeFullCheckpoint();
return true;
} catch (Exception e) {
OLogManager.instance().exception("Error while removing cluster '" + iClusterId + "'", e, OStorageException.class);
} finally {
lock.releaseExclusiveLock();
}
return false;
}
public boolean dropDataSegment(final String iName) {
throw new UnsupportedOperationException("dropDataSegment");
}
public long count(final int iClusterId) {
return count(iClusterId, false);
}
@Override
public long count(int iClusterId, boolean countTombstones) {
if (iClusterId == -1)
throw new OStorageException("Cluster Id " + iClusterId + " is invalid in database '" + name + "'");
// COUNT PHYSICAL CLUSTER IF ANY
checkOpeness();
lock.acquireSharedLock();
try {
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
return 0;
if (countTombstones)
return cluster.getEntries();
return cluster.getEntries() - cluster.getTombstonesCount();
} finally {
lock.releaseSharedLock();
}
}
public OClusterPosition[] getClusterDataRange(final int iClusterId) {
if (iClusterId == -1)
return new OClusterPosition[] { OClusterPosition.INVALID_POSITION, OClusterPosition.INVALID_POSITION };
checkOpeness();
lock.acquireSharedLock();
try {
return clusters[iClusterId] != null ? new OClusterPosition[] { clusters[iClusterId].getFirstPosition(),
clusters[iClusterId].getLastPosition() } : new OClusterPosition[0];
} catch (IOException ioe) {
throw new OStorageException("Can not retrieve information about data range", ioe);
} finally {
lock.releaseSharedLock();
}
}
public long count(final int[] iClusterIds) {
return count(iClusterIds, false);
}
@Override
public long count(int[] iClusterIds, boolean countTombstones) {
checkOpeness();
lock.acquireSharedLock();
try {
long tot = 0;
for (int iClusterId : iClusterIds) {
if (iClusterId >= clusters.length)
throw new OConfigurationException("Cluster id " + iClusterId + " was not found in database '" + name + "'");
if (iClusterId > -1) {
final OCluster c = clusters[iClusterId];
if (c != null)
tot += c.getEntries() - (countTombstones ? 0L : c.getTombstonesCount());
}
}
return tot;
} finally {
lock.releaseSharedLock();
}
}
public OStorageOperationResult<OPhysicalPosition> createRecord(final int dataSegmentId, final ORecordId rid,
final byte[] content, ORecordVersion recordVersion, final byte recordType, final int mode,
final ORecordCallback<OClusterPosition> callback) {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
final OCluster cluster = getClusterById(rid.clusterId);
cluster.getExternalModificationLock().requestModificationLock();
try {
modificationLock.requestModificationLock();
try {
checkOpeness();
if (content == null)
throw new IllegalArgumentException("Record is null");
OPhysicalPosition ppos = new OPhysicalPosition(-1, -1, recordType);
try {
lock.acquireSharedLock();
try {
if (recordVersion.getCounter() > -1)
recordVersion.increment();
else
recordVersion = OVersionFactory.instance().createVersion();
ppos = cluster.createRecord(content, recordVersion, recordType);
rid.clusterPosition = ppos.clusterPosition;
if (callback != null)
callback.call(rid, ppos.clusterPosition);
return new OStorageOperationResult<OPhysicalPosition>(ppos);
} finally {
lock.releaseSharedLock();
}
} catch (IOException ioe) {
try {
if (ppos.clusterPosition != null && ppos.clusterPosition.compareTo(OClusterPosition.INVALID_POSITION) != 0)
cluster.deleteRecord(ppos.clusterPosition);
} catch (IOException e) {
OLogManager.instance().error(this, "Error on removing record in cluster: " + cluster, e);
}
OLogManager.instance().error(this, "Error on creating record in cluster: " + cluster, ioe);
return null;
}
} finally {
modificationLock.releaseModificationLock();
}
} finally {
cluster.getExternalModificationLock().releaseModificationLock();
Orient.instance().getProfiler().stopChrono(PROFILER_CREATE_RECORD, "Create a record in database", timer, "db.*.createRecord");
}
}
@Override
public ORecordMetadata getRecordMetadata(ORID rid) {
if (rid.isNew())
throw new OStorageException("Passed record with id " + rid + " is new and can not be stored.");
checkOpeness();
final OCluster cluster = getClusterById(rid.getClusterId());
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED);
try {
final OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.getClusterPosition()));
if (ppos == null)
return null;
return new ORecordMetadata(rid, ppos.recordVersion);
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED);
}
} catch (IOException ioe) {
OLogManager.instance().error(this, "Retrieval of record '" + rid + "' cause: " + ioe.getMessage(), ioe);
} finally {
lock.releaseSharedLock();
}
return null;
}
@Override
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRid, final String iFetchPlan, boolean iIgnoreCache,
ORecordCallback<ORawBuffer> iCallback, boolean loadTombstones) {
checkOpeness();
return new OStorageOperationResult<ORawBuffer>(readRecord(getClusterById(iRid.clusterId), iRid, true, loadTombstones));
}
@Override
protected ORawBuffer readRecord(final OCluster clusterSegment, final ORecordId rid, boolean atomicLock, boolean loadTombstones) {
checkOpeness();
if (!rid.isPersistent())
throw new IllegalArgumentException("Cannot read record " + rid + " since the position is invalid in database '" + name + '\'');
final long timer = Orient.instance().getProfiler().startChrono();
clusterSegment.getExternalModificationLock().requestModificationLock();
try {
if (atomicLock)
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED);
try {
return clusterSegment.readRecord(rid.clusterPosition);
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.SHARED);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on reading record " + rid + " (cluster: " + clusterSegment + ')', e);
return null;
} finally {
if (atomicLock)
lock.releaseSharedLock();
}
} finally {
clusterSegment.getExternalModificationLock().releaseModificationLock();
Orient.instance().getProfiler().stopChrono(PROFILER_READ_RECORD, "Read a record from database", timer, "db.*.readRecord");
}
}
public OStorageOperationResult<ORecordVersion> updateRecord(final ORecordId rid, final byte[] content,
final ORecordVersion version, final byte recordType, final int mode, ORecordCallback<ORecordVersion> callback) {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
final OCluster cluster = getClusterById(rid.clusterId);
cluster.getExternalModificationLock().requestModificationLock();
try {
modificationLock.requestModificationLock();
try {
lock.acquireSharedLock();
try {
// GET THE SHARED LOCK AND GET AN EXCLUSIVE LOCK AGAINST THE RECORD
lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE);
try {
// UPDATE IT
final OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (!checkForRecordValidity(ppos)) {
final ORecordVersion recordVersion = OVersionFactory.instance().createUntrackedVersion();
if (callback != null)
callback.call(rid, recordVersion);
return new OStorageOperationResult<ORecordVersion>(recordVersion);
}
// VERSION CONTROL CHECK
switch (version.getCounter()) {
// DOCUMENT UPDATE, NO VERSION CONTROL
case -1:
ppos.recordVersion.increment();
break;
// DOCUMENT UPDATE, NO VERSION CONTROL, NO VERSION UPDATE
case -2:
ppos.recordVersion.setCounter(-2);
break;
default:
// MVCC CONTROL AND RECORD UPDATE OR WRONG VERSION VALUE
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (!version.equals(ppos.recordVersion))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(rid, ppos.recordVersion, version, ORecordOperation.UPDATED);
ppos.recordVersion.increment();
}
cluster.updateRecord(rid.clusterPosition, content, ppos.recordVersion, recordType);
if (callback != null)
callback.call(rid, ppos.recordVersion);
return new OStorageOperationResult<ORecordVersion>(ppos.recordVersion);
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE);
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on updating record " + rid + " (cluster: " + cluster + ")", e);
final ORecordVersion recordVersion = OVersionFactory.instance().createUntrackedVersion();
if (callback != null)
callback.call(rid, recordVersion);
return new OStorageOperationResult<ORecordVersion>(recordVersion);
} finally {
lock.releaseSharedLock();
}
} finally {
modificationLock.releaseModificationLock();
}
} finally {
cluster.getExternalModificationLock().releaseModificationLock();
Orient.instance().getProfiler().stopChrono(PROFILER_UPDATE_RECORD, "Update a record to database", timer, "db.*.updateRecord");
}
}
@Override
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId rid, final ORecordVersion version, final int mode,
ORecordCallback<Boolean> callback) {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
final OCluster cluster = getClusterById(rid.clusterId);
cluster.getExternalModificationLock().requestModificationLock();
try {
modificationLock.requestModificationLock();
try {
lock.acquireSharedLock();
try {
lockManager.acquireLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE);
try {
final OPhysicalPosition ppos = cluster.getPhysicalPosition(new OPhysicalPosition(rid.clusterPosition));
if (ppos == null)
// ALREADY DELETED
return new OStorageOperationResult<Boolean>(false);
// MVCC TRANSACTION: CHECK IF VERSION IS THE SAME
if (version.getCounter() > -1 && !ppos.recordVersion.equals(version))
if (OFastConcurrentModificationException.enabled())
throw OFastConcurrentModificationException.instance();
else
throw new OConcurrentModificationException(rid, ppos.recordVersion, version, ORecordOperation.DELETED);
cluster.deleteRecord(ppos.clusterPosition);
return new OStorageOperationResult<Boolean>(true);
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, OLockManager.LOCK.EXCLUSIVE);
}
} finally {
lock.releaseSharedLock();
}
} catch (IOException e) {
OLogManager.instance().error(this, "Error on deleting record " + rid + "( cluster: " + cluster + ")", e);
} finally {
modificationLock.releaseModificationLock();
}
} finally {
cluster.getExternalModificationLock().releaseModificationLock();
Orient.instance().getProfiler()
.stopChrono(PROFILER_DELETE_RECORD, "Delete a record from database", timer, "db.*.deleteRecord");
}
return new OStorageOperationResult<Boolean>(false);
}
public boolean updateReplica(final int dataSegmentId, final ORecordId rid, final byte[] content,
final ORecordVersion recordVersion, final byte recordType) throws IOException {
throw new OStorageException("Support of hash based clusters is required.");
}
@Override
public <V> V callInLock(Callable<V> iCallable, boolean iExclusiveLock) {
if (iExclusiveLock) {
modificationLock.requestModificationLock();
try {
return super.callInLock(iCallable, iExclusiveLock);
} finally {
modificationLock.releaseModificationLock();
}
} else {
return super.callInLock(iCallable, iExclusiveLock);
}
}
@Override
public <V> V callInRecordLock(Callable<V> callable, ORID rid, boolean exclusiveLock) {
if (exclusiveLock)
modificationLock.requestModificationLock();
try {
if (exclusiveLock) {
lock.acquireExclusiveLock();
} else
lock.acquireSharedLock();
try {
lockManager
.acquireLock(Thread.currentThread(), rid, exclusiveLock ? OLockManager.LOCK.EXCLUSIVE : OLockManager.LOCK.SHARED);
try {
return callable.call();
} finally {
lockManager.releaseLock(Thread.currentThread(), rid, exclusiveLock ? OLockManager.LOCK.EXCLUSIVE
: OLockManager.LOCK.SHARED);
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new OException("Error on nested call in lock", e);
} finally {
if (exclusiveLock) {
lock.releaseExclusiveLock();
} else
lock.releaseSharedLock();
}
} finally {
if (exclusiveLock)
modificationLock.releaseModificationLock();
}
}
public Set<String> getClusterNames() {
checkOpeness();
lock.acquireSharedLock();
try {
return clusterMap.keySet();
} finally {
lock.releaseSharedLock();
}
}
public int getClusterIdByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
if (iClusterName.length() == 0)
throw new IllegalArgumentException("Cluster name is empty");
if (Character.isDigit(iClusterName.charAt(0)))
return Integer.parseInt(iClusterName);
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getId();
} finally {
lock.releaseSharedLock();
}
return -1;
}
public String getClusterTypeByName(final String iClusterName) {
checkOpeness();
if (iClusterName == null)
throw new IllegalArgumentException("Cluster name is null");
// SEARCH IT BETWEEN PHYSICAL CLUSTERS
lock.acquireSharedLock();
try {
final OCluster segment = clusterMap.get(iClusterName.toLowerCase());
if (segment != null)
return segment.getType();
} finally {
lock.releaseSharedLock();
}
return null;
}
public void commit(final OTransaction clientTx, Runnable callback) {
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (writeAheadLog == null)
throw new OStorageException("WAL mode is not active. Transactions are not supported in given mode");
startStorageTx(clientTx);
final List<ORecordOperation> tmpEntries = new ArrayList<ORecordOperation>();
while (clientTx.getCurrentRecordEntries().iterator().hasNext()) {
for (ORecordOperation txEntry : clientTx.getCurrentRecordEntries())
tmpEntries.add(txEntry);
clientTx.clearRecordEntries();
for (ORecordOperation txEntry : tmpEntries)
// COMMIT ALL THE SINGLE ENTRIES ONE BY ONE
commitEntry(clientTx, txEntry);
}
if (callback != null)
callback.run();
endStorageTx();
OTransactionAbstract.updateCacheFromEntries(clientTx, clientTx.getAllRecordEntries(), false);
} catch (Exception e) {
// WE NEED TO CALL ROLLBACK HERE, IN THE LOCK
OLogManager.instance().debug(this, "Error during transaction commit, transaction will be rolled back (tx-id=%d)", e,
clientTx.getId());
rollback(clientTx);
if (e instanceof OException)
throw ((OException) e);
else
throw new OStorageException("Error during transaction commit.", e);
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
private void commitEntry(final OTransaction clientTx, final ORecordOperation txEntry) throws IOException {
if (txEntry.type != ORecordOperation.DELETED && !txEntry.getRecord().isDirty())
return;
final ORecordId rid = (ORecordId) txEntry.getRecord().getIdentity();
if (rid.clusterId == ORID.CLUSTER_ID_INVALID && txEntry.getRecord() instanceof ODocument
&& ((ODocument) txEntry.getRecord()).getSchemaClass() != null) {
// TRY TO FIX CLUSTER ID TO THE DEFAULT CLUSTER ID DEFINED IN SCHEMA CLASS
rid.clusterId = ((ODocument) txEntry.getRecord()).getSchemaClass().getDefaultClusterId();
}
final OCluster cluster = getClusterById(rid.clusterId);
if (cluster.getName().equals(OMetadataDefault.CLUSTER_INDEX_NAME)
|| cluster.getName().equals(OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME))
// AVOID TO COMMIT INDEX STUFF
return;
if (txEntry.getRecord() instanceof OTxListener)
((OTxListener) txEntry.getRecord()).onEvent(txEntry, OTxListener.EVENT.BEFORE_COMMIT);
switch (txEntry.type) {
case ORecordOperation.LOADED:
break;
case ORecordOperation.CREATED: {
// CHECK 2 TIMES TO ASSURE THAT IT'S A CREATE OR AN UPDATE BASED ON RECURSIVE TO-STREAM METHOD
byte[] stream = txEntry.getRecord().toStream();
if (stream == null) {
OLogManager.instance().warn(this, "Null serialization on committing new record %s in transaction", rid);
break;
}
final ORecordId oldRID = rid.isNew() ? rid.copy() : rid;
if (rid.isNew()) {
rid.clusterId = cluster.getId();
final OPhysicalPosition ppos;
ppos = createRecord(-1, rid, stream, txEntry.getRecord().getRecordVersion(), txEntry.getRecord().getRecordType(), -1, null)
.getResult();
rid.clusterPosition = ppos.clusterPosition;
txEntry.getRecord().getRecordVersion().copyFrom(ppos.recordVersion);
clientTx.updateIdentityAfterCommit(oldRID, rid);
} else {
txEntry
.getRecord()
.getRecordVersion()
.copyFrom(
updateRecord(rid, stream, txEntry.getRecord().getRecordVersion(), txEntry.getRecord().getRecordType(), -1, null)
.getResult());
}
break;
}
case ORecordOperation.UPDATED: {
byte[] stream = txEntry.getRecord().toStream();
if (stream == null) {
OLogManager.instance().warn(this, "Null serialization on committing updated record %s in transaction", rid);
break;
}
txEntry
.getRecord()
.getRecordVersion()
.copyFrom(
updateRecord(rid, stream, txEntry.getRecord().getRecordVersion(), txEntry.getRecord().getRecordType(), -1, null)
.getResult());
break;
}
case ORecordOperation.DELETED: {
deleteRecord(rid, txEntry.getRecord().getRecordVersion(), -1, null);
break;
}
}
txEntry.getRecord().unsetDirty();
if (txEntry.getRecord() instanceof OTxListener)
((OTxListener) txEntry.getRecord()).onEvent(txEntry, OTxListener.EVENT.AFTER_COMMIT);
}
public void rollback(final OTransaction clientTx) {
checkOpeness();
modificationLock.requestModificationLock();
try {
lock.acquireExclusiveLock();
try {
if (transaction == null)
return;
if (writeAheadLog == null)
throw new OStorageException("WAL mode is not active. Transactions are not supported in given mode");
if (transaction.getClientTx().getId() != clientTx.getId())
throw new OStorageException(
"Passed in and active transaction are different transactions. Passed in transaction can not be rolled back.");
rollbackStorageTx();
OTransactionAbstract.updateCacheFromEntries(clientTx, clientTx.getAllRecordEntries(), false);
} catch (IOException e) {
throw new OStorageException("Error during transaction rollback.", e);
} finally {
transaction = null;
lock.releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
public boolean checkForRecordValidity(final OPhysicalPosition ppos) {
return ppos != null && !ppos.recordVersion.isTombstone();
}
public void synch() {
checkOpeness();
final long timer = Orient.instance().getProfiler().startChrono();
lock.acquireExclusiveLock();
try {
if (writeAheadLog != null) {
makeFullCheckpoint();
return;
}
diskCache.flushBuffer();
if (configuration != null)
configuration.synch();
} catch (IOException e) {
throw new OStorageException("Error on synch storage '" + name + "'", e);
} finally {
lock.releaseExclusiveLock();
Orient.instance().getProfiler().stopChrono("db." + name + ".synch", "Synch a database", timer, "db.*.synch");
}
}
public void setDefaultClusterId(final int defaultClusterId) {
this.defaultClusterId = defaultClusterId;
}
public String getPhysicalClusterNameById(final int iClusterId) {
checkOpeness();
lock.acquireSharedLock();
try {
if (iClusterId >= clusters.length)
return null;
return clusters[iClusterId] != null ? clusters[iClusterId].getName() : null;
} finally {
lock.releaseSharedLock();
}
}
@Override
public OStorageConfiguration getConfiguration() {
return configuration;
}
public int getDefaultClusterId() {
return defaultClusterId;
}
public OCluster getClusterById(int iClusterId) {
lock.acquireSharedLock();
try {
if (iClusterId == ORID.CLUSTER_ID_INVALID)
// GET THE DEFAULT CLUSTER
iClusterId = defaultClusterId;
checkClusterSegmentIndexRange(iClusterId);
final OCluster cluster = clusters[iClusterId];
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterId + " is null");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
private void checkClusterSegmentIndexRange(final int iClusterId) {
if (iClusterId > clusters.length - 1)
throw new IllegalArgumentException("Cluster segment #" + iClusterId + " does not exist in database '" + name + "'");
}
@Override
public OCluster getClusterByName(final String iClusterName) {
lock.acquireSharedLock();
try {
final OCluster cluster = clusterMap.get(iClusterName.toLowerCase());
if (cluster == null)
throw new IllegalArgumentException("Cluster " + iClusterName + " does not exist in database '" + name + "'");
return cluster;
} finally {
lock.releaseSharedLock();
}
}
@Override
public String getURL() {
return OEngineLocalPaginated.NAME + ":" + url;
}
public long getSize() {
lock.acquireSharedLock();
try {
long size = 0;
for (OCluster c : clusters)
if (c != null)
size += c.getRecordsSize();
return size;
} catch (IOException ioe) {
throw new OStorageException("Can not calculate records size");
} finally {
lock.releaseSharedLock();
}
}
public String getStoragePath() {
return storagePath;
}
@Override
protected OPhysicalPosition updateRecord(OCluster cluster, ORecordId rid, byte[] recordContent, ORecordVersion recordVersion,
byte recordType) {
throw new UnsupportedOperationException("updateRecord");
}
@Override
protected OPhysicalPosition createRecord(ODataLocal dataSegment, OCluster cluster, byte[] recordContent, byte recordType,
ORecordId rid, ORecordVersion recordVersion) {
throw new UnsupportedOperationException("createRecord");
}
public String getMode() {
return mode;
}
public OStorageVariableParser getVariableParser() {
return variableParser;
}
public int getClusters() {
lock.acquireSharedLock();
try {
return clusterMap.size();
} finally {
lock.releaseSharedLock();
}
}
public Set<OCluster> getClusterInstances() {
final Set<OCluster> result = new HashSet<OCluster>();
lock.acquireSharedLock();
try {
// ADD ALL THE CLUSTERS
for (OCluster c : clusters)
if (c != null)
result.add(c);
} finally {
lock.releaseSharedLock();
}
return result;
}
/**
* Method that completes the cluster rename operation. <strong>IT WILL NOT RENAME A CLUSTER, IT JUST CHANGES THE NAME IN THE
* INTERNAL MAPPING</strong>
*/
public void renameCluster(final String iOldName, final String iNewName) {
clusterMap.put(iNewName, clusterMap.remove(iOldName));
}
@Override
public boolean cleanOutRecord(ORecordId recordId, ORecordVersion recordVersion, int iMode, ORecordCallback<Boolean> callback) {
return deleteRecord(recordId, recordVersion, iMode, callback).getResult();
}
public void freeze(boolean throwException) {
modificationLock.prohibitModifications(throwException);
synch();
try {
diskCache.setSoftlyClosed(true);
if (configuration != null)
configuration.setSoftlyClosed(true);
} catch (IOException e) {
throw new OStorageException("Error on freeze of storage '" + name + "'", e);
}
}
public void release() {
try {
diskCache.setSoftlyClosed(false);
if (configuration != null)
configuration.setSoftlyClosed(false);
} catch (IOException e) {
throw new OStorageException("Error on release of storage '" + name + "'", e);
}
modificationLock.allowModifications();
}
public boolean wasClusterSoftlyClosed(String clusterName) {
lock.acquireSharedLock();
try {
final OCluster indexCluster = clusterMap.get(clusterName);
return indexCluster.wasSoftlyClosed();
} catch (IOException ioe) {
throw new OStorageException("Error during index consistency check", ioe);
} finally {
lock.releaseSharedLock();
}
}
public void makeFuzzyCheckpoint() {
// if (writeAheadLog == null)
// return;
//
// try {
// lock.acquireExclusiveLock();
// try {
// writeAheadLog.flush();
//
// writeAheadLog.logFuzzyCheckPointStart();
//
// diskCache.forceSyncStoredChanges();
// diskCache.logDirtyPagesTable();
//
// writeAheadLog.logFuzzyCheckPointEnd();
//
// writeAheadLog.flush();
// } finally {
// lock.releaseExclusiveLock();
// }
// } catch (IOException ioe) {
// throw new OStorageException("Error during fuzzy checkpoint creation for storage " + name, ioe);
// }
}
public void makeFullCheckpoint() {
if (writeAheadLog == null)
return;
lock.acquireExclusiveLock();
try {
writeAheadLog.flush();
if (configuration != null)
configuration.synch();
writeAheadLog.logFullCheckpointStart();
diskCache.flushBuffer();
writeAheadLog.logFullCheckpointEnd();
writeAheadLog.flush();
} catch (IOException ioe) {
throw new OStorageException("Error during checkpoint creation for storage " + name, ioe);
} finally {
lock.releaseExclusiveLock();
}
}
public void scheduleFullCheckpoint() {
if (checkpointExecutor != null)
checkpointExecutor.execute(new Runnable() {
@Override
public void run() {
try {
makeFullCheckpoint();
} catch (Throwable t) {
OLogManager.instance().error(this, "Error during background checkpoint creation for storage " + name, t);
}
}
});
}
@Override
public String getType() {
return OEngineLocalPaginated.NAME;
}
private int createClusterFromConfig(final OStorageClusterConfiguration iConfig) throws IOException {
OCluster cluster = clusterMap.get(iConfig.getName());
if (cluster != null) {
cluster.configure(this, iConfig);
return -1;
}
cluster = OPaginatedClusterFactory.INSTANCE.createCluster(configuration.version);
cluster.configure(this, iConfig);
return registerCluster(cluster);
}
/**
* Register the cluster internally.
*
* @param cluster
* OCluster implementation
* @return The id (physical position into the array) of the new cluster just created. First is 0.
* @throws IOException
*/
private int registerCluster(final OCluster cluster) throws IOException {
final int id;
if (cluster != null) {
// CHECK FOR DUPLICATION OF NAMES
if (clusterMap.containsKey(cluster.getName()))
throw new OConfigurationException("Cannot add segment '" + cluster.getName()
+ "' because it is already registered in database '" + name + "'");
// CREATE AND ADD THE NEW REF SEGMENT
clusterMap.put(cluster.getName(), cluster);
id = cluster.getId();
} else {
id = clusters.length;
}
if (id >= clusters.length) {
clusters = OArrays.copyOf(clusters, id + 1);
}
clusters[id] = cluster;
return id;
}
private void addDefaultClusters() throws IOException {
configuration.load();
final String storageCompression = OGlobalConfiguration.STORAGE_COMPRESSION_METHOD.getValueAsString();
createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length,
OMetadataDefault.CLUSTER_INTERNAL_NAME, null, true, 20, 4, storageCompression));
createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length,
OMetadataDefault.CLUSTER_INDEX_NAME, null, false, OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR,
OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR, storageCompression));
createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length,
OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME, null, false, 1, 1, storageCompression));
defaultClusterId = createClusterFromConfig(new OStoragePaginatedClusterConfiguration(configuration, clusters.length,
CLUSTER_DEFAULT_NAME, null, true, OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR,
OStoragePaginatedClusterConfiguration.DEFAULT_GROW_FACTOR, storageCompression));
}
public ODiskCache getDiskCache() {
return diskCache;
}
public void freeze(boolean throwException, int clusterId) {
final OCluster cluster = getClusterById(clusterId);
final String name = cluster.getName();
if (OMetadataDefault.CLUSTER_INDEX_NAME.equals(name) || OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME.equals(name)) {
throw new IllegalArgumentException("It is impossible to freeze and release index or manual index cluster!");
}
cluster.getExternalModificationLock().prohibitModifications(throwException);
try {
cluster.synch();
cluster.setSoftlyClosed(true);
} catch (IOException e) {
throw new OStorageException("Error on synch cluster '" + name + "'", e);
}
}
public void release(int clusterId) {
final OCluster cluster = getClusterById(clusterId);
final String name = cluster.getName();
if (OMetadataDefault.CLUSTER_INDEX_NAME.equals(name) || OMetadataDefault.CLUSTER_MANUAL_INDEX_NAME.equals(name)) {
throw new IllegalArgumentException("It is impossible to freeze and release index or manualindex cluster!");
}
try {
cluster.setSoftlyClosed(false);
} catch (IOException e) {
throw new OStorageException("Error on unfreeze storage '" + name + "'", e);
}
cluster.getExternalModificationLock().allowModifications();
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_OLocalPaginatedStorage.java
|
88 |
@Service("blStaticAssetService")
public class StaticAssetServiceImpl extends AbstractContentService implements StaticAssetService {
private static final Log LOG = LogFactory.getLog(StaticAssetServiceImpl.class);
@Value("${asset.server.url.prefix.internal}")
protected String staticAssetUrlPrefix;
@Value("${asset.server.url.prefix}")
protected String staticAssetEnvironmentUrlPrefix;
@Resource(name = "blImageArtifactProcessor")
protected ImageArtifactProcessor imageArtifactProcessor;
@Value("${asset.use.filesystem.storage}")
protected boolean storeAssetsOnFileSystem = false;
@Value("${asset.server.url.prefix.secure}")
protected String staticAssetEnvironmentSecureUrlPrefix;
@Value("${automatically.approve.static.assets}")
protected boolean automaticallyApproveAndPromoteStaticAssets=true;
@Resource(name="blStaticAssetDao")
protected StaticAssetDao staticAssetDao;
@Resource(name="blSandBoxItemDao")
protected SandBoxItemDao sandBoxItemDao;
@Resource(name="blStaticAssetStorageService")
protected StaticAssetStorageService staticAssetStorageService;
private final Random random = new Random();
private final String FILE_NAME_CHARS = "0123456789abcdef";
@Override
public StaticAsset findStaticAssetById(Long id) {
return staticAssetDao.readStaticAssetById(id);
}
@Override
public List<StaticAsset> readAllStaticAssets() {
return staticAssetDao.readAllStaticAssets();
}
static {
MimeUtil.registerMimeDetector(ExtensionMimeDetector.class.getName());
MimeUtil.registerMimeDetector(MagicMimeMimeDetector.class.getName());
}
protected String getFileExtension(String fileName) {
int pos = fileName.lastIndexOf(".");
if (pos > 0) {
return fileName.substring(pos + 1, fileName.length()).toLowerCase();
} else {
LOG.warn("No extension provided for asset : " + fileName);
return null;
}
}
/**
* Generates a filename as a set of Hex digits.
* @param size
* @return
*/
protected String generateFileName(int size) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < size; i++) {
int pos = random.nextInt(FILE_NAME_CHARS.length());
sb = sb.append(FILE_NAME_CHARS.charAt(pos));
}
return sb.toString();
}
/**
* Will assemble the url from the passed in properties as
* /{entityType}/{fileName}
* /product/7001-ab12
*
* If the properties above are not set, it will generate the fileName randomly.
*
* @param url
* @param asset
* @param assetProperties
* @return
*/
protected String buildAssetURL(Map<String, String> assetProperties, String originalFilename) {
StringBuilder path = new StringBuilder("/");
String entityType = assetProperties.get("entityType");
String entityId = assetProperties.get("entityId");
String fileName = assetProperties.get("fileName");
if (entityType != null) {
path = path.append(entityType).append("/");
}
if (entityId != null) {
path = path.append(entityId).append("/");
}
if (fileName != null) {
int pos = fileName.indexOf(":");
if (pos > 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("Removing protocol from URL name" + fileName);
}
fileName = fileName.substring(pos + 1);
}
} else {
fileName = originalFilename;
}
return path.append(fileName).toString();
}
@Override
@Transactional(TransactionUtils.DEFAULT_TRANSACTION_MANAGER)
public StaticAsset createStaticAssetFromFile(MultipartFile file, Map<String, String> properties) {
if (properties == null) {
properties = new HashMap<String, String>();
}
String fullUrl = buildAssetURL(properties, file.getOriginalFilename());
StaticAsset newAsset = staticAssetDao.readStaticAssetByFullUrl(fullUrl, null);
int count = 0;
while (newAsset != null) {
count++;
//try the new format first, then the old
newAsset = staticAssetDao.readStaticAssetByFullUrl(getCountUrl(fullUrl, count, false), null);
if (newAsset == null) {
newAsset = staticAssetDao.readStaticAssetByFullUrl(getCountUrl(fullUrl, count, true), null);
}
}
if (count > 0) {
fullUrl = getCountUrl(fullUrl, count, false);
}
try {
ImageMetadata metadata = imageArtifactProcessor.getImageMetadata(file.getInputStream());
newAsset = new ImageStaticAssetImpl();
((ImageStaticAsset) newAsset).setWidth(metadata.getWidth());
((ImageStaticAsset) newAsset).setHeight(metadata.getHeight());
} catch (Exception e) {
//must not be an image stream
newAsset = new StaticAssetImpl();
}
if (storeAssetsOnFileSystem) {
newAsset.setStorageType(StorageType.FILESYSTEM);
} else {
newAsset.setStorageType(StorageType.DATABASE);
}
newAsset.setName(file.getOriginalFilename());
getMimeType(file, newAsset);
newAsset.setFileExtension(getFileExtension(file.getOriginalFilename()));
newAsset.setFileSize(file.getSize());
newAsset.setFullUrl(fullUrl);
return staticAssetDao.addOrUpdateStaticAsset(newAsset, false);
}
/**
* Gets the count URL based on the original fullUrl. If requested in legacy format this will return URLs like:
*
* /path/to/image.jpg-1
* /path/to/image.jpg-2
*
* Whereas if this is in non-lagacy format (<b>legacy</b> == false):
*
* /path/to/image-1.jpg
* /path/to/image-2.jpg
*
* Used to deal with duplicate URLs of uploaded assets
*
*/
protected String getCountUrl(String fullUrl, int count, boolean legacyFormat) {
String countUrl = fullUrl + '-' + count;
int dotIndex = fullUrl.lastIndexOf('.');
if (dotIndex != -1 && !legacyFormat) {
countUrl = fullUrl.substring(0, dotIndex) + '-' + count + '.' + fullUrl.substring(dotIndex + 1);
}
return countUrl;
}
protected void getMimeType(MultipartFile file, StaticAsset newAsset) {
Collection mimeTypes = MimeUtil.getMimeTypes(file.getOriginalFilename());
if (!mimeTypes.isEmpty()) {
MimeType mimeType = (MimeType) mimeTypes.iterator().next();
newAsset.setMimeType(mimeType.toString());
} else {
try {
mimeTypes = MimeUtil.getMimeTypes(file.getInputStream());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
if (!mimeTypes.isEmpty()) {
MimeType mimeType = (MimeType) mimeTypes.iterator().next();
newAsset.setMimeType(mimeType.toString());
}
}
}
@Override
public StaticAsset findStaticAssetByFullUrl(String fullUrl, SandBox targetSandBox) {
try {
fullUrl = URLDecoder.decode(fullUrl, "UTF-8");
//strip out the jsessionid if it's there
fullUrl = fullUrl.replaceAll(";jsessionid=.*?(?=\\?|$)", "");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("Unsupported encoding to decode fullUrl", e);
}
return staticAssetDao.readStaticAssetByFullUrl(fullUrl, targetSandBox);
}
@Override
@Transactional(TransactionUtils.DEFAULT_TRANSACTION_MANAGER)
public StaticAsset addStaticAsset(StaticAsset staticAsset, SandBox destinationSandbox) {
if (automaticallyApproveAndPromoteStaticAssets) {
if (destinationSandbox != null && destinationSandbox.getSite() != null) {
destinationSandbox = destinationSandbox.getSite().getProductionSandbox();
} else {
// Null means production for single-site installations.
destinationSandbox = null;
}
}
staticAsset.setSandbox(destinationSandbox);
staticAsset.setDeletedFlag(false);
staticAsset.setArchivedFlag(false);
StaticAsset newAsset = staticAssetDao.addOrUpdateStaticAsset(staticAsset, true);
if (! isProductionSandBox(destinationSandbox)) {
sandBoxItemDao.addSandBoxItem(destinationSandbox.getId(), SandBoxOperationType.ADD, SandBoxItemType.STATIC_ASSET, newAsset.getFullUrl(), newAsset.getId(), null);
}
return newAsset;
}
@Override
@Transactional(TransactionUtils.DEFAULT_TRANSACTION_MANAGER)
public StaticAsset updateStaticAsset(StaticAsset staticAsset, SandBox destSandbox) {
if (staticAsset.getLockedFlag()) {
throw new IllegalArgumentException("Unable to update a locked record");
}
if (automaticallyApproveAndPromoteStaticAssets) {
if (destSandbox != null && destSandbox.getSite() != null) {
destSandbox = destSandbox.getSite().getProductionSandbox();
} else {
// Null means production for single-site installations.
destSandbox = null;
}
}
if (checkForSandboxMatch(staticAsset.getSandbox(), destSandbox)) {
if (staticAsset.getDeletedFlag()) {
SandBoxItem item = sandBoxItemDao.retrieveBySandboxAndTemporaryItemId(staticAsset.getSandbox()==null?null:staticAsset.getSandbox().getId(), SandBoxItemType.STATIC_ASSET, staticAsset.getId());
if (staticAsset.getOriginalAssetId() == null && item != null) {
// This item was added in this sandbox and now needs to be deleted.
staticAsset.setArchivedFlag(true);
item.setArchivedFlag(true);
} else if (item != null) {
// This item was being updated but now is being deleted - so change the
// sandbox operation type to deleted
item.setSandBoxOperationType(SandBoxOperationType.DELETE);
sandBoxItemDao.updateSandBoxItem(item);
} else if (automaticallyApproveAndPromoteStaticAssets) {
staticAsset.setArchivedFlag(true);
}
}
return staticAssetDao.addOrUpdateStaticAsset(staticAsset, true);
} else if (isProductionSandBox(staticAsset.getSandbox())) {
// Move from production to destSandbox
StaticAsset clonedAsset = staticAsset.cloneEntity();
clonedAsset.setOriginalAssetId(staticAsset.getId());
clonedAsset.setSandbox(destSandbox);
StaticAsset returnAsset = staticAssetDao.addOrUpdateStaticAsset(clonedAsset, true);
StaticAsset prod = findStaticAssetById(staticAsset.getId());
prod.setLockedFlag(true);
staticAssetDao.addOrUpdateStaticAsset(prod, false);
SandBoxOperationType type = SandBoxOperationType.UPDATE;
if (clonedAsset.getDeletedFlag()) {
type = SandBoxOperationType.DELETE;
}
sandBoxItemDao.addSandBoxItem(destSandbox.getId(), type, SandBoxItemType.STATIC_ASSET, returnAsset.getFullUrl(), returnAsset.getId(), returnAsset.getOriginalAssetId());
return returnAsset;
} else {
// This should happen via a promote, revert, or reject in the sandbox service
throw new IllegalArgumentException("Update called when promote or reject was expected.");
}
}
// Returns true if the src and dest sandbox are the same.
private boolean checkForSandboxMatch(SandBox src, SandBox dest) {
if (src != null) {
if (dest != null) {
return src.getId().equals(dest.getId());
}
}
return (src == null && dest == null);
}
// // Returns true if the dest sandbox is production.
// private boolean checkForProductionSandbox(SandBox dest) {
// boolean productionSandbox = false;
//
// if (dest == null) {
// productionSandbox = true;
// } else {
// if (dest.getSite() != null && dest.getSite().getProductionSandbox() != null && dest.getSite().getProductionSandbox().getId() != null) {
// productionSandbox = dest.getSite().getProductionSandbox().getId().equals(dest.getId());
// }
// }
//
// return productionSandbox;
// }
// Returns true if the dest sandbox is production.
private boolean isProductionSandBox(SandBox dest) {
return dest == null || SandBoxType.PRODUCTION.equals(dest.getSandBoxType());
}
@Override
@Transactional("blTransactionManager")
public void deleteStaticAsset(StaticAsset staticAsset, SandBox destinationSandbox) {
staticAsset.setDeletedFlag(true);
updateStaticAsset(staticAsset, destinationSandbox);
}
@Override
public List<StaticAsset> findAssets(SandBox sandbox, Criteria c) {
return findItems(sandbox, c, StaticAsset.class, StaticAssetImpl.class, "originalAssetId");
}
@Override
public Long countAssets(SandBox sandbox, Criteria c) {
return countItems(sandbox, c, StaticAssetImpl.class, "originalAssetId");
}
@Override
public void itemPromoted(SandBoxItem sandBoxItem, SandBox destinationSandBox) {
if (! SandBoxItemType.STATIC_ASSET.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StaticAsset asset = staticAssetDao.readStaticAssetById(sandBoxItem.getTemporaryItemId());
if (asset == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Asset not found " + sandBoxItem.getTemporaryItemId());
}
} else {
boolean productionSandBox = isProductionSandBox(destinationSandBox);
if (productionSandBox) {
asset.setLockedFlag(false);
} else {
asset.setLockedFlag(true);
}
if (productionSandBox && asset.getOriginalAssetId() != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Asset promoted to production. " + asset.getId() + ". Archiving original asset " + asset.getOriginalAssetId());
}
StaticAsset originalAsset = staticAssetDao.readStaticAssetById(sandBoxItem.getTemporaryItemId());
originalAsset.setArchivedFlag(Boolean.TRUE);
staticAssetDao.addOrUpdateStaticAsset(originalAsset, false);
asset.setOriginalAssetId(null);
if (asset.getDeletedFlag()) {
asset.setArchivedFlag(Boolean.TRUE);
}
}
}
if (asset.getOriginalSandBox() == null) {
asset.setOriginalSandBox(asset.getSandbox());
}
asset.setSandbox(destinationSandBox);
staticAssetDao.addOrUpdateStaticAsset(asset, false);
}
@Override
public void itemRejected(SandBoxItem sandBoxItem, SandBox destinationSandBox) {
if (! SandBoxItemType.STATIC_ASSET.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StaticAsset asset = staticAssetDao.readStaticAssetById(sandBoxItem.getTemporaryItemId());
if (asset != null) {
asset.setSandbox(destinationSandBox);
asset.setOriginalSandBox(null);
asset.setLockedFlag(false);
staticAssetDao.addOrUpdateStaticAsset(asset, false);
}
}
@Override
public void itemReverted(SandBoxItem sandBoxItem) {
if (! SandBoxItemType.STATIC_ASSET.equals(sandBoxItem.getSandBoxItemType())) {
return;
}
StaticAsset asset = staticAssetDao.readStaticAssetById(sandBoxItem.getTemporaryItemId());
if (asset != null) {
asset.setArchivedFlag(Boolean.TRUE);
asset.setLockedFlag(false);
staticAssetDao.addOrUpdateStaticAsset(asset, false);
StaticAsset originalAsset = staticAssetDao.readStaticAssetById(sandBoxItem.getOriginalItemId());
originalAsset.setLockedFlag(false);
staticAssetDao.addOrUpdateStaticAsset(originalAsset, false);
}
}
@Override
public String getStaticAssetUrlPrefix() {
return staticAssetUrlPrefix;
}
@Override
public void setStaticAssetUrlPrefix(String staticAssetUrlPrefix) {
this.staticAssetUrlPrefix = staticAssetUrlPrefix;
}
@Override
public String getStaticAssetEnvironmentUrlPrefix() {
return fixEnvironmentUrlPrefix(staticAssetEnvironmentUrlPrefix);
}
@Override
public void setStaticAssetEnvironmentUrlPrefix(String staticAssetEnvironmentUrlPrefix) {
this.staticAssetEnvironmentUrlPrefix = staticAssetEnvironmentUrlPrefix;
}
@Override
public String getStaticAssetEnvironmentSecureUrlPrefix() {
if (StringUtils.isEmpty(staticAssetEnvironmentSecureUrlPrefix)) {
if (!StringUtils.isEmpty(staticAssetEnvironmentUrlPrefix) && staticAssetEnvironmentUrlPrefix.indexOf("http:") >= 0) {
staticAssetEnvironmentSecureUrlPrefix = staticAssetEnvironmentUrlPrefix.replace("http:", "https:");
}
}
return fixEnvironmentUrlPrefix(staticAssetEnvironmentSecureUrlPrefix);
}
public void setStaticAssetEnvironmentSecureUrlPrefix(String staticAssetEnvironmentSecureUrlPrefix) {
this.staticAssetEnvironmentSecureUrlPrefix = staticAssetEnvironmentSecureUrlPrefix;
}
@Override
public boolean getAutomaticallyApproveAndPromoteStaticAssets() {
return automaticallyApproveAndPromoteStaticAssets;
}
@Override
public void setAutomaticallyApproveAndPromoteStaticAssets(boolean automaticallyApproveAndPromoteStaticAssets) {
this.automaticallyApproveAndPromoteStaticAssets = automaticallyApproveAndPromoteStaticAssets;
}
/**
* Trims whitespace. If the value is the same as the internal url prefix, then return
* null.
*
* @param urlPrefix
* @return
*/
private String fixEnvironmentUrlPrefix(String urlPrefix) {
if (urlPrefix != null) {
urlPrefix = urlPrefix.trim();
if ("".equals(urlPrefix)) {
// The value was not set.
urlPrefix = null;
} else if (urlPrefix.equals(staticAssetUrlPrefix)) {
// The value is the same as the default, so no processing needed.
urlPrefix = null;
}
}
if (urlPrefix != null && !urlPrefix.endsWith("/")) {
urlPrefix = urlPrefix + "/";
}
return urlPrefix;
}
/**
* This method will take in an assetPath (think image url) and prepend the
* staticAssetUrlPrefix if one exists.
*
* Will append any contextPath onto the request. If the incoming assetPath contains
* the internalStaticAssetPrefix and the image is being prepended, the prepend will be
* removed.
*
* @param assetPath - The path to rewrite if it is a cms managed asset
* @param contextPath - The context path of the web application (if applicable)
* @param secureRequest - True if the request is being served over https
* @return
* @see org.broadleafcommerce.cms.file.service.StaticAssetService#getStaticAssetUrlPrefix()
* @see org.broadleafcommerce.cms.file.service.StaticAssetService#getStaticAssetEnvironmentUrlPrefix()
*/
@Override
public String convertAssetPath(String assetPath, String contextPath, boolean secureRequest) {
String returnValue = assetPath;
if (assetPath != null && getStaticAssetEnvironmentUrlPrefix() != null && ! "".equals(getStaticAssetEnvironmentUrlPrefix())) {
final String envPrefix;
if (secureRequest) {
envPrefix = getStaticAssetEnvironmentSecureUrlPrefix();
} else {
envPrefix = getStaticAssetEnvironmentUrlPrefix();
}
if (envPrefix != null) {
// remove the starting "/" if it exists.
if (returnValue.startsWith("/")) {
returnValue = returnValue.substring(1);
}
// Also, remove the "cmsstatic" from the URL before prepending the staticAssetUrlPrefix.
if (returnValue.startsWith(getStaticAssetUrlPrefix())) {
returnValue = returnValue.substring(getStaticAssetUrlPrefix().trim().length());
// remove the starting "/" if it exists.
if (returnValue.startsWith("/")) {
returnValue = returnValue.substring(1);
}
}
returnValue = envPrefix + returnValue;
}
} else {
if (returnValue != null && ! ImportSupport.isAbsoluteUrl(returnValue)) {
if (! returnValue.startsWith("/")) {
returnValue = "/" + returnValue;
}
// Add context path
if (contextPath != null && ! contextPath.equals("")) {
if (! contextPath.equals("/")) {
// Shouldn't be the case, but let's handle it anyway
if (contextPath.endsWith("/")) {
returnValue = returnValue.substring(1);
}
if (contextPath.startsWith("/")) {
returnValue = contextPath + returnValue; // normal case
} else {
returnValue = "/" + contextPath + returnValue;
}
}
}
}
}
return returnValue;
}
}
| 1no label
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_service_StaticAssetServiceImpl.java
|
290 |
Thread t = new Thread(new Runnable() {
public void run() {
lock.lock();
try {
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
condition.await();
if (lock.isLockedByCurrentThread()) {
count.incrementAndGet();
}
} catch (InterruptedException ignored) {
} finally {
lock.unlock();
}
}
});
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientConditionTest.java
|
122 |
public interface OProfilerHookValue {
public Object getValue();
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_profiler_OAbstractProfiler.java
|
403 |
public class OomeOnClientShutdownMain {
public static void main(String[] args) {
Hazelcast.newHazelcastInstance();
ClientConfig clientConfig = new ClientConfig();
for (int k = 0; k < 1000000; k++) {
System.out.println("At:" + k);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
client.shutdown();
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_oome_OomeOnClientShutdownMain.java
|
156 |
public class OIntegerSerializer implements OBinarySerializer<Integer> {
private static final OBinaryConverter CONVERTER = OBinaryConverterFactory.getConverter();
public static OIntegerSerializer INSTANCE = new OIntegerSerializer();
public static final byte ID = 8;
/**
* size of int value in bytes
*/
public static final int INT_SIZE = 4;
public int getObjectSize(Integer object, Object... hints) {
return INT_SIZE;
}
public void serialize(Integer object, byte[] stream, int startPosition, Object... hints) {
final int value = object;
stream[startPosition] = (byte) ((value >>> 24) & 0xFF);
stream[startPosition + 1] = (byte) ((value >>> 16) & 0xFF);
stream[startPosition + 2] = (byte) ((value >>> 8) & 0xFF);
stream[startPosition + 3] = (byte) ((value >>> 0) & 0xFF);
}
public Integer deserialize(byte[] stream, int startPosition) {
return (stream[startPosition]) << 24 | (0xff & stream[startPosition + 1]) << 16 | (0xff & stream[startPosition + 2]) << 8
| ((0xff & stream[startPosition + 3]));
}
public int getObjectSize(byte[] stream, int startPosition) {
return INT_SIZE;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return INT_SIZE;
}
public void serializeNative(Integer object, byte[] stream, int startPosition, Object... hints) {
CONVERTER.putInt(stream, startPosition, object, ByteOrder.nativeOrder());
}
public Integer deserializeNative(byte[] stream, int startPosition) {
return CONVERTER.getInt(stream, startPosition, ByteOrder.nativeOrder());
}
@Override
public void serializeInDirectMemory(Integer object, ODirectMemoryPointer pointer, long offset, Object... hints) {
pointer.setInt(offset, object);
}
@Override
public Integer deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
return pointer.getInt(offset);
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return INT_SIZE;
}
public boolean isFixedLength() {
return true;
}
public int getFixedLength() {
return INT_SIZE;
}
@Override
public Integer preprocess(Integer value, Object... hints) {
return value;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_serialization_types_OIntegerSerializer.java
|
1,589 |
public class SafeMapperOutputs {
private final MultipleOutputs outputs;
private final Mapper.Context context;
private final boolean testing;
public SafeMapperOutputs(final Mapper.Context context) {
this.context = context;
this.outputs = new MultipleOutputs(this.context);
this.testing = this.context.getConfiguration().getBoolean(HadoopCompiler.TESTING, false);
}
public void write(final String type, final Writable key, final Writable value) throws IOException, InterruptedException {
if (this.testing) {
if (type.equals(Tokens.SIDEEFFECT))
this.context.write(key, value);
} else
this.outputs.write(type, key, value);
}
public void close() throws IOException, InterruptedException {
this.outputs.close();
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_util_SafeMapperOutputs.java
|
506 |
public class TransportCreateIndexAction extends TransportMasterNodeOperationAction<CreateIndexRequest, CreateIndexResponse> {
private final MetaDataCreateIndexService createIndexService;
@Inject
public TransportCreateIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataCreateIndexService createIndexService) {
super(settings, transportService, clusterService, threadPool);
this.createIndexService = createIndexService;
}
@Override
protected String executor() {
// we go async right away
return ThreadPool.Names.SAME;
}
@Override
protected String transportAction() {
return CreateIndexAction.NAME;
}
@Override
protected CreateIndexRequest newRequest() {
return new CreateIndexRequest();
}
@Override
protected CreateIndexResponse newResponse() {
return new CreateIndexResponse();
}
@Override
protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, request.index());
}
@Override
protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener<CreateIndexResponse> listener) throws ElasticsearchException {
String cause = request.cause();
if (cause.length() == 0) {
cause = "api";
}
CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(cause, request.index())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings())
.customs(request.customs());
createIndexService.createIndex(updateRequest, new ClusterStateUpdateListener() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new CreateIndexResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Throwable t) {
if (t instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create", t, request.index());
} else {
logger.debug("[{}] failed to create", t, request.index());
}
listener.onFailure(t);
}
});
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_indices_create_TransportCreateIndexAction.java
|
286 |
public abstract class ActionResponse extends TransportResponse {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_ActionResponse.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.