Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
1,098 |
public class OSQLFunctionDistinct extends OSQLFunctionAbstract {
public static final String NAME = "distinct";
private Set<Object> context = new LinkedHashSet<Object>();
public OSQLFunctionDistinct() {
super(NAME, 1, 1);
}
public Object execute(final OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters,
OCommandContext iContext) {
final Object value = iParameters[0];
if (value != null && !context.contains(value)) {
context.add(value);
return value;
}
return null;
}
@Override
public boolean filterResult() {
return true;
}
public String getSyntax() {
return "Syntax error: distinct(<field>)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_coll_OSQLFunctionDistinct.java
|
330 |
public class PluginsInfo implements Streamable, Serializable, ToXContent {
static final class Fields {
static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
}
private List<PluginInfo> infos;
public PluginsInfo() {
infos = new ArrayList<PluginInfo>();
}
public PluginsInfo(int size) {
infos = new ArrayList<PluginInfo>(size);
}
public List<PluginInfo> getInfos() {
return infos;
}
public void add(PluginInfo info) {
infos.add(info);
}
public static PluginsInfo readPluginsInfo(StreamInput in) throws IOException {
PluginsInfo infos = new PluginsInfo();
infos.readFrom(in);
return infos;
}
@Override
public void readFrom(StreamInput in) throws IOException {
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
infos.add(PluginInfo.readPluginInfo(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(infos.size());
for (PluginInfo plugin : infos) {
plugin.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.PLUGINS);
for (PluginInfo pluginInfo : infos) {
pluginInfo.toXContent(builder, params);
}
builder.endArray();
return builder;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_info_PluginsInfo.java
|
199 |
public class ODatabaseHelper {
public static void createDatabase(ODatabase database, final String url) throws IOException {
createDatabase(database, url, "server", "plocal");
}
public static void createDatabase(ODatabase database, final String url, String type) throws IOException {
createDatabase(database, url, "server", type);
}
public static void createDatabase(ODatabase database, final String url, String directory, String type) throws IOException {
if (url.startsWith(OEngineRemote.NAME)) {
new OServerAdmin(url).connect("root", getServerRootPassword(directory)).createDatabase("document", type).close();
} else {
database.create();
database.close();
}
}
public static void deleteDatabase(final ODatabase database, String storageType) throws IOException {
deleteDatabase(database, "server", storageType);
}
@Deprecated
public static void deleteDatabase(final ODatabase database, final String directory, String storageType) throws IOException {
dropDatabase(database, directory, storageType);
}
public static void dropDatabase(final ODatabase database, String storageType) throws IOException {
dropDatabase(database, "server", storageType);
}
public static void dropDatabase(final ODatabase database, final String directory, String storageType) throws IOException {
if (existsDatabase(database, storageType)) {
if (database.getURL().startsWith("remote:")) {
new OServerAdmin(database.getURL()).connect("root", getServerRootPassword(directory)).dropDatabase(storageType);
} else {
if (database.isClosed())
database.open("admin", "admin");
database.drop();
}
}
}
public static boolean existsDatabase(final ODatabase database, String storageType) throws IOException {
if (database.getURL().startsWith("remote")) {
return new OServerAdmin(database.getURL()).connect("root", getServerRootPassword()).existsDatabase(storageType);
} else {
return database.exists();
}
}
public static void freezeDatabase(final ODatabase database) throws IOException {
if (database.getURL().startsWith("remote")) {
final OServerAdmin serverAdmin = new OServerAdmin(database.getURL());
serverAdmin.connect("root", getServerRootPassword()).freezeDatabase("plocal");
serverAdmin.close();
} else {
database.freeze();
}
}
public static void releaseDatabase(final ODatabase database) throws IOException {
if (database.getURL().startsWith("remote")) {
final OServerAdmin serverAdmin = new OServerAdmin(database.getURL());
serverAdmin.connect("root", getServerRootPassword()).releaseDatabase("plocal");
serverAdmin.close();
} else {
database.release();
}
}
public static File getConfigurationFile() {
return getConfigurationFile(null);
}
protected static String getServerRootPassword() throws IOException {
return getServerRootPassword("server");
}
protected static String getServerRootPassword(final String iDirectory) throws IOException {
File file = getConfigurationFile(iDirectory);
FileReader f = new FileReader(file);
final char[] buffer = new char[(int) file.length()];
f.read(buffer);
f.close();
String fileContent = new String(buffer);
// TODO search is wrong because if first user is not root tests will fail
int pos = fileContent.indexOf("password=\"");
pos += "password=\"".length();
return fileContent.substring(pos, fileContent.indexOf('"', pos));
}
protected static File getConfigurationFile(final String iDirectory) {
// LOAD SERVER CONFIG FILE TO EXTRACT THE ROOT'S PASSWORD
String sysProperty = System.getProperty("orientdb.config.file");
File file = new File(sysProperty != null ? sysProperty : "");
if (!file.exists()) {
sysProperty = System.getenv("CONFIG_FILE");
file = new File(sysProperty != null ? sysProperty : "");
}
if (!file.exists())
file = new File("../releases/orientdb-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../releases/orientdb-community-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../../releases/orientdb-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../../releases/orientdb-community-" + OConstants.ORIENT_VERSION + "/config/orientdb-server-config.xml");
if (!file.exists() && iDirectory != null) {
file = new File(iDirectory + "/config/orientdb-server-config.xml");
if (!file.exists())
file = new File("../" + iDirectory + "/config/orientdb-server-config.xml");
}
if (!file.exists())
file = new File(OSystemVariableResolver.resolveSystemVariables("${" + Orient.ORIENTDB_HOME
+ "}/config/orientdb-server-config.xml"));
if (!file.exists())
throw new OConfigurationException(
"Cannot load file orientdb-server-config.xml to execute remote tests. Current directory is "
+ new File(".").getAbsolutePath());
return file;
}
}
| 0true
|
client_src_main_java_com_orientechnologies_orient_client_db_ODatabaseHelper.java
|
190 |
public class KeyColumnValueStoreUtil {
public static void delete(KeyColumnValueStore store, StoreTransaction txn, long key, String col) throws BackendException {
StaticBuffer k = longToByteBuffer(key);
StaticBuffer c = stringToByteBuffer(col);
store.mutate(k, KeyColumnValueStore.NO_ADDITIONS, Arrays.asList(c), txn);
}
public static String get(KeyColumnValueStore store, StoreTransaction txn, long key, String col) throws BackendException {
StaticBuffer k = longToByteBuffer(key);
StaticBuffer c = stringToByteBuffer(col);
StaticBuffer valBytes = KCVSUtil.get(store, k, c, txn);
if (null == valBytes)
return null;
return byteBufferToString(valBytes);
}
public static void insert(KeyColumnValueStore store, StoreTransaction txn, long key, String col, String val) throws BackendException {
StaticBuffer k = longToByteBuffer(key);
StaticBuffer c = stringToByteBuffer(col);
StaticBuffer v = stringToByteBuffer(val);
store.mutate(k, Arrays.<Entry>asList(StaticArrayEntry.of(c, v)), KeyColumnValueStore.NO_DELETIONS, txn);
}
// TODO rename as "bufferToString" after syntax errors are resolved
public static String byteBufferToString(StaticBuffer b) {
try {
ByteBuffer bb = b.asByteBuffer();
return new String(bb.array(), bb.position() + bb.arrayOffset(), bb.remaining(), "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
// TODO rename as "stringToBuffer" after syntax errors are resolved
public static StaticBuffer stringToByteBuffer(String s) {
byte[] b;
try {
b = s.getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
ByteBuffer bb = ByteBuffer.allocate(b.length);
bb.put(b);
bb.flip();
return StaticArrayBuffer.of(bb);
}
// TODO rename as "longToBuffer" after syntax errors are resolved
public static StaticBuffer longToByteBuffer(long l) {
return new WriteByteBuffer(8).putLong(l).getStaticBuffer();
}
public static long bufferToLong(StaticBuffer b) {
return b.getLong(0);
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_diskstorage_KeyColumnValueStoreUtil.java
|
335 |
public class NodesRestartRequest extends NodesOperationRequest<NodesRestartRequest> {
TimeValue delay = TimeValue.timeValueSeconds(1);
protected NodesRestartRequest() {
}
/**
* Restarts down nodes based on the nodes ids specified. If none are passed, <b>all</b>
* nodes will be shutdown.
*/
public NodesRestartRequest(String... nodesIds) {
super(nodesIds);
}
/**
* The delay for the restart to occur. Defaults to <tt>1s</tt>.
*/
public NodesRestartRequest delay(TimeValue delay) {
this.delay = delay;
return this;
}
/**
* The delay for the restart to occur. Defaults to <tt>1s</tt>.
*/
public NodesRestartRequest delay(String delay) {
return delay(TimeValue.parseTimeValue(delay, null));
}
public TimeValue delay() {
return this.delay;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
delay = readTimeValue(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
delay.writeTo(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_restart_NodesRestartRequest.java
|
655 |
public class PutIndexTemplateRequest extends MasterNodeOperationRequest<PutIndexTemplateRequest> {
private String name;
private String cause = "";
private String template;
private int order;
private boolean create;
private Settings settings = EMPTY_SETTINGS;
private Map<String, String> mappings = newHashMap();
private Map<String, IndexMetaData.Custom> customs = newHashMap();
PutIndexTemplateRequest() {
}
/**
* Constructs a new put index template request with the provided name.
*/
public PutIndexTemplateRequest(String name) {
this.name = name;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
if (template == null) {
validationException = addValidationError("template is missing", validationException);
}
return validationException;
}
/**
* Sets the name of the index template.
*/
public PutIndexTemplateRequest name(String name) {
this.name = name;
return this;
}
/**
* The name of the index template.
*/
public String name() {
return this.name;
}
public PutIndexTemplateRequest template(String template) {
this.template = template;
return this;
}
public String template() {
return this.template;
}
public PutIndexTemplateRequest order(int order) {
this.order = order;
return this;
}
public int order() {
return this.order;
}
/**
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
*/
public PutIndexTemplateRequest create(boolean create) {
this.create = create;
return this;
}
public boolean create() {
return create;
}
/**
* The settings to create the index template with.
*/
public PutIndexTemplateRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* The settings to create the index template with.
*/
public PutIndexTemplateRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* The settings to create the index template with (either json/yaml/properties format).
*/
public PutIndexTemplateRequest settings(String source) {
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
return this;
}
/**
* The settings to crete the index template with (either json/yaml/properties format).
*/
public PutIndexTemplateRequest settings(Map<String, Object> source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
settings(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
Settings settings() {
return this.settings;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
*/
public PutIndexTemplateRequest mapping(String type, String source) {
mappings.put(type, source);
return this;
}
/**
* The cause for this index template creation.
*/
public PutIndexTemplateRequest cause(String cause) {
this.cause = cause;
return this;
}
public String cause() {
return this.cause;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
*/
public PutIndexTemplateRequest mapping(String type, XContentBuilder source) {
try {
mappings.put(type, source.string());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e);
}
return this;
}
/**
* Adds mapping that will be added when the index gets created.
*
* @param type The mapping type
* @param source The mapping source
*/
public PutIndexTemplateRequest mapping(String type, Map<String, Object> source) {
// wrap it in a type map if its not
if (source.size() != 1 || !source.containsKey(type)) {
source = MapBuilder.<String, Object>newMapBuilder().put(type, source).map();
}
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
return mapping(type, builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
}
Map<String, String> mappings() {
return this.mappings;
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(XContentBuilder templateBuilder) {
try {
return source(templateBuilder.bytes());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("Failed to build json for template request", e);
}
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(Map templateSource) {
Map<String, Object> source = templateSource;
for (Map.Entry<String, Object> entry : source.entrySet()) {
String name = entry.getKey();
if (name.equals("template")) {
template(entry.getValue().toString());
} else if (name.equals("order")) {
order(XContentMapValues.nodeIntegerValue(entry.getValue(), order()));
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
}
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("mappings")) {
Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
if (!(entry1.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping");
}
mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
}
} else {
// maybe custom?
IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name);
if (factory != null) {
try {
customs.put(name, factory.fromMap((Map<String, Object>) entry.getValue()));
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]");
}
}
}
}
return this;
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(String templateSource) {
try {
return source(XContentFactory.xContent(templateSource).createParser(templateSource).mapOrderedAndClose());
} catch (Exception e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source [" + templateSource + "]", e);
}
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(byte[] source, int offset, int length) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
/**
* The template source definition.
*/
public PutIndexTemplateRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
public PutIndexTemplateRequest custom(IndexMetaData.Custom custom) {
customs.put(custom.type(), custom);
return this;
}
Map<String, IndexMetaData.Custom> customs() {
return this.customs;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
cause = in.readString();
name = in.readString();
template = in.readString();
order = in.readInt();
create = in.readBoolean();
settings = readSettingsFromStream(in);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
mappings.put(in.readString(), in.readString());
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
customs.put(type, customIndexMetaData);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(cause);
out.writeString(name);
out.writeString(template);
out.writeInt(order);
out.writeBoolean(create);
writeSettingsToStream(settings, out);
out.writeVInt(mappings.size());
for (Map.Entry<String, String> entry : mappings.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
out.writeVInt(customs.size());
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
out.writeString(entry.getKey());
IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_put_PutIndexTemplateRequest.java
|
911 |
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
final boolean listenerThreaded;
final ThreadPool threadPool;
volatile Object listeners;
boolean executedListeners = false;
protected AbstractListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) {
this.listenerThreaded = listenerThreaded;
this.threadPool = threadPool;
}
public boolean listenerThreaded() {
return false; // we control execution of the listener
}
public ThreadPool threadPool() {
return threadPool;
}
public void addListener(final ActionListener<T> listener) {
internalAddListener(listener);
}
public void addListener(final Runnable listener) {
internalAddListener(listener);
}
public void internalAddListener(Object listener) {
boolean executeImmediate = false;
synchronized (this) {
if (executedListeners) {
executeImmediate = true;
} else {
Object listeners = this.listeners;
if (listeners == null) {
listeners = listener;
} else if (listeners instanceof List) {
((List) this.listeners).add(listener);
} else {
Object orig = listeners;
listeners = Lists.newArrayListWithCapacity(2);
((List) listeners).add(orig);
((List) listeners).add(listener);
}
this.listeners = listeners;
}
}
if (executeImmediate) {
executeListener(listener);
}
}
@Override
protected void done() {
super.done();
synchronized (this) {
executedListeners = true;
}
Object listeners = this.listeners;
if (listeners != null) {
if (listeners instanceof List) {
List list = (List) listeners;
for (Object listener : list) {
executeListener(listener);
}
} else {
executeListener(listeners);
}
}
}
private void executeListener(final Object listener) {
if (listenerThreaded) {
if (listener instanceof Runnable) {
threadPool.generic().execute((Runnable) listener);
} else {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
ActionListener<T> lst = (ActionListener<T>) listener;
try {
lst.onResponse(actionGet());
} catch (ElasticsearchException e) {
lst.onFailure(e);
}
}
});
}
} else {
if (listener instanceof Runnable) {
((Runnable) listener).run();
} else {
ActionListener<T> lst = (ActionListener<T>) listener;
try {
lst.onResponse(actionGet());
} catch (ElasticsearchException e) {
lst.onFailure(e);
}
}
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_AbstractListenableActionFuture.java
|
1,382 |
public class IndexTemplateMetaData {
private final String name;
private final int order;
private final String template;
private final Settings settings;
// the mapping source should always include the type as top level
private final ImmutableOpenMap<String, CompressedString> mappings;
private final ImmutableOpenMap<String, IndexMetaData.Custom> customs;
public IndexTemplateMetaData(String name, int order, String template, Settings settings, ImmutableOpenMap<String, CompressedString> mappings, ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
this.name = name;
this.order = order;
this.template = template;
this.settings = settings;
this.mappings = mappings;
this.customs = customs;
}
public String name() {
return this.name;
}
public int order() {
return this.order;
}
public int getOrder() {
return order();
}
public String getName() {
return this.name;
}
public String template() {
return this.template;
}
public String getTemplate() {
return this.template;
}
public Settings settings() {
return this.settings;
}
public Settings getSettings() {
return settings();
}
public ImmutableOpenMap<String, CompressedString> mappings() {
return this.mappings;
}
public ImmutableOpenMap<String, CompressedString> getMappings() {
return this.mappings;
}
public ImmutableOpenMap<String, IndexMetaData.Custom> customs() {
return this.customs;
}
public ImmutableOpenMap<String, IndexMetaData.Custom> getCustoms() {
return this.customs;
}
public <T extends IndexMetaData.Custom> T custom(String type) {
return (T) customs.get(type);
}
public static Builder builder(String name) {
return new Builder(name);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexTemplateMetaData that = (IndexTemplateMetaData) o;
if (order != that.order) return false;
if (!mappings.equals(that.mappings)) return false;
if (!name.equals(that.name)) return false;
if (!settings.equals(that.settings)) return false;
if (!template.equals(that.template)) return false;
return true;
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + order;
result = 31 * result + template.hashCode();
result = 31 * result + settings.hashCode();
result = 31 * result + mappings.hashCode();
return result;
}
public static class Builder {
private static final Set<String> VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings");
static {
VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet());
}
private String name;
private int order;
private String template;
private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
private final ImmutableOpenMap.Builder<String, CompressedString> mappings;
private final ImmutableOpenMap.Builder<String, IndexMetaData.Custom> customs;
public Builder(String name) {
this.name = name;
mappings = ImmutableOpenMap.builder();
customs = ImmutableOpenMap.builder();
}
public Builder(IndexTemplateMetaData indexTemplateMetaData) {
this.name = indexTemplateMetaData.name();
order(indexTemplateMetaData.order());
template(indexTemplateMetaData.template());
settings(indexTemplateMetaData.settings());
mappings = ImmutableOpenMap.builder(indexTemplateMetaData.mappings());
customs = ImmutableOpenMap.builder(indexTemplateMetaData.customs());
}
public Builder order(int order) {
this.order = order;
return this;
}
public Builder template(String template) {
this.template = template;
return this;
}
public String template() {
return template;
}
public Builder settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
public Builder settings(Settings settings) {
this.settings = settings;
return this;
}
public Builder removeMapping(String mappingType) {
mappings.remove(mappingType);
return this;
}
public Builder putMapping(String mappingType, CompressedString mappingSource) throws IOException {
mappings.put(mappingType, mappingSource);
return this;
}
public Builder putMapping(String mappingType, String mappingSource) throws IOException {
mappings.put(mappingType, new CompressedString(mappingSource));
return this;
}
public Builder putCustom(String type, IndexMetaData.Custom customIndexMetaData) {
this.customs.put(type, customIndexMetaData);
return this;
}
public Builder removeCustom(String type) {
this.customs.remove(type);
return this;
}
public IndexMetaData.Custom getCustom(String type) {
return this.customs.get(type);
}
public IndexTemplateMetaData build() {
return new IndexTemplateMetaData(name, order, template, settings, mappings.build(), customs.build());
}
public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(indexTemplateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("order", indexTemplateMetaData.order());
builder.field("template", indexTemplateMetaData.template());
builder.startObject("settings");
for (Map.Entry<String, String> entry : indexTemplateMetaData.settings().getAsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
builder.endObject();
if (params.paramAsBoolean("reduce_mappings", false)) {
builder.startObject("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
byte[] mappingSource = cursor.value.uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor.key);
}
builder.field(cursor.key);
builder.map(mapping);
}
builder.endObject();
} else {
builder.startArray("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
byte[] data = cursor.value.uncompressed();
XContentParser parser = XContentFactory.xContent(data).createParser(data);
Map<String, Object> mapping = parser.mapOrderedAndClose();
builder.map(mapping);
}
builder.endArray();
}
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.endObject();
}
public static IndexTemplateMetaData fromXContentStandalone(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
if (token == null) {
throw new IOException("no data");
}
if (token != XContentParser.Token.START_OBJECT) {
throw new IOException("should start object");
}
token = parser.nextToken();
if (token != XContentParser.Token.FIELD_NAME) {
throw new IOException("the first field should be the template name");
}
return fromXContent(parser);
}
public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {
Builder builder = new Builder(parser.currentName());
String currentFieldName = skipTemplateName(parser);
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("settings".equals(currentFieldName)) {
ImmutableSettings.Builder templateSettingsBuilder = ImmutableSettings.settingsBuilder();
for (Map.Entry<String, String> entry : SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()).entrySet()) {
if (!entry.getKey().startsWith("index.")) {
templateSettingsBuilder.put("index." + entry.getKey(), entry.getValue());
} else {
templateSettingsBuilder.put(entry.getKey(), entry.getValue());
}
}
builder.settings(templateSettingsBuilder.build());
} else if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
String mappingType = currentFieldName;
Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
builder.putMapping(mappingType, XContentFactory.jsonBuilder().map(mappingSource).string());
}
}
} else {
// check if its a custom index metadata
IndexMetaData.Custom.Factory<IndexMetaData.Custom> factory = IndexMetaData.lookupFactory(currentFieldName);
if (factory == null) {
//TODO warn
parser.skipChildren();
} else {
builder.putCustom(factory.type(), factory.fromXContent(parser));
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Map<String, Object> mapping = parser.mapOrdered();
if (mapping.size() == 1) {
String mappingType = mapping.keySet().iterator().next();
String mappingSource = XContentFactory.jsonBuilder().map(mapping).string();
if (mappingSource == null) {
// crap, no mapping source, warn?
} else {
builder.putMapping(mappingType, mappingSource);
}
}
}
}
} else if (token.isValue()) {
if ("template".equals(currentFieldName)) {
builder.template(parser.text());
} else if ("order".equals(currentFieldName)) {
builder.order(parser.intValue());
}
}
}
return builder.build();
}
private static String skipTemplateName(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
if (token != null && token == XContentParser.Token.START_OBJECT) {
token = parser.nextToken();
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
if (VALID_FIELDS.contains(currentFieldName)) {
return currentFieldName;
} else {
// we just hit the template name, which should be ignored and we move on
parser.nextToken();
}
}
}
return null;
}
public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
Builder builder = new Builder(in.readString());
builder.order(in.readInt());
builder.template(in.readString());
builder.settings(ImmutableSettings.readSettingsFromStream(in));
int mappingsSize = in.readVInt();
for (int i = 0; i < mappingsSize; i++) {
builder.putMapping(in.readString(), CompressedString.readCompressedString(in));
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
public static void writeTo(IndexTemplateMetaData indexTemplateMetaData, StreamOutput out) throws IOException {
out.writeString(indexTemplateMetaData.name());
out.writeInt(indexTemplateMetaData.order());
out.writeString(indexTemplateMetaData.template());
ImmutableSettings.writeSettingsToStream(indexTemplateMetaData.settings(), out);
out.writeVInt(indexTemplateMetaData.mappings().size());
for (ObjectObjectCursor<String, CompressedString> cursor : indexTemplateMetaData.mappings()) {
out.writeString(cursor.key);
cursor.value.writeTo(out);
}
out.writeVInt(indexTemplateMetaData.customs().size());
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
out.writeString(cursor.key);
IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_metadata_IndexTemplateMetaData.java
|
205 |
private class CleanResourcesTask implements Runnable {
@Override
public void run() {
waitForPacketsProcessed();
cleanResources(new TargetDisconnectedException(remoteEndpoint));
}
private void waitForPacketsProcessed() {
final long begin = System.currentTimeMillis();
int count = packetCount.get();
while (count != 0) {
try {
Thread.sleep(SLEEP_TIME);
} catch (InterruptedException e) {
logger.warning(e);
break;
}
long elapsed = System.currentTimeMillis() - begin;
if (elapsed > 5000) {
logger.warning("There are packets which are not processed " + count);
break;
}
count = packetCount.get();
}
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_nio_ClientConnection.java
|
609 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(NightlyTest.class)
public class MemberListTest {
@Before
@After
public void killAllHazelcastInstances() throws IOException {
Hazelcast.shutdownAll();
}
/*
* Sets up a situation where node3 removes the master and sets node2 as the
* master but none of the other nodes do. This means that node3 thinks node2
* is master but node2 thinks node1 is master.
*/
@Test
public void testOutOfSyncMemberList() throws Exception {
Config c1 = buildConfig(false);
Config c2 = buildConfig(false);
Config c3 = buildConfig(false);
c1.getNetworkConfig().setPort(25701);
c2.getNetworkConfig().setPort(25702);
c3.getNetworkConfig().setPort(25703);
List<String> allMembers = Arrays.asList("127.0.0.1:25701, 127.0.0.1:25702, 127.0.0.1:25703");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
// All three nodes join into one cluster
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
// This simulates each node reading from the other nodes in the list at regular intervals
// This prevents the heart beat code from timing out
final HazelcastInstance[] instances = new HazelcastInstance[]{h1, h2, h3};
final AtomicBoolean doingWork = new AtomicBoolean(true);
Thread[] workThreads = new Thread[instances.length];
for (int i = 0; i < instances.length; i++) {
final int threadNum = i;
workThreads[threadNum] = new Thread(new Runnable() {
public void run() {
while (doingWork.get()) {
final HazelcastInstance hz = instances[threadNum];
Set<Member> members = new HashSet<Member>(hz.getCluster().getMembers());
members.remove(hz.getCluster().getLocalMember());
final Map<Member, Future<String>> futures = hz.getExecutorService("test")
.submitToMembers(new PingCallable(), members);
for (Future<String> f : futures.values()) {
try {
f.get();
} catch (MemberLeftException ignored) {
} catch (Exception e) {
e.printStackTrace();
}
}
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
});
workThreads[threadNum].start();
}
final Node n3 = TestUtil.getNode(h3);
n3.clusterService.removeAddress(((MemberImpl) h1.getCluster().getLocalMember()).getAddress());
// Give the cluster some time to figure things out. The merge and heartbeat code should have kicked in by this point
Thread.sleep(30 * 1000);
doingWork.set(false);
for (Thread t : workThreads) {
t.join();
}
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
}
private static class PingCallable implements Callable<String>, Serializable {
public String call() throws Exception {
return "ping response";
}
}
/*
* Sets up a situation where the member list is out of order on node2. Both
* node2 and node1 think they are masters and both think each other are in
* their clusters.
*/
@Test
public void testOutOfSyncMemberListTwoMasters() throws Exception {
Config c1 = buildConfig(false);
Config c2 = buildConfig(false);
Config c3 = buildConfig(false);
c1.getNetworkConfig().setPort(35701);
c2.getNetworkConfig().setPort(35702);
c3.getNetworkConfig().setPort(35703);
List<String> allMembers = Arrays.asList("127.0.0.1:35701, 127.0.0.1:35702, 127.0.0.1:35703");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
final MemberImpl m1 = (MemberImpl) h1.getCluster().getLocalMember();
final MemberImpl m2 = (MemberImpl) h2.getCluster().getLocalMember();
final MemberImpl m3 = (MemberImpl) h3.getCluster().getLocalMember();
// All three nodes join into one cluster
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
final Node n2 = TestUtil.getNode(h2);
// Simulates node2 getting an out of order member list. That causes node2 to think it's the master.
List<MemberInfo> members = new ArrayList<MemberInfo>();
members.add(new MemberInfo(m2.getAddress(), m2.getUuid(), Collections. <String, Object> emptyMap()));
members.add(new MemberInfo(m3.getAddress(), m3.getUuid(), Collections. <String, Object> emptyMap()));
members.add(new MemberInfo(m1.getAddress(), m1.getUuid(), Collections. <String, Object> emptyMap()));
n2.clusterService.updateMembers(members);
n2.setMasterAddress(m2.getAddress());
// Give the cluster some time to figure things out. The merge and heartbeat code should have kicked in by this point
Thread.sleep(30 * 1000);
assertEquals(m1, h1.getCluster().getMembers().iterator().next());
assertEquals(m1, h2.getCluster().getMembers().iterator().next());
assertEquals(m1, h3.getCluster().getMembers().iterator().next());
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
}
/*
* Sets up situation where all nodes have the same master, but node 2's list
* doesn't contain node 3.
*/
@Test
public void testSameMasterDifferentMemberList() throws Exception {
Config c1 = buildConfig(false);
Config c2 = buildConfig(false);
Config c3 = buildConfig(false);
c1.getNetworkConfig().setPort(45701);
c2.getNetworkConfig().setPort(45702);
c3.getNetworkConfig().setPort(45703);
List<String> allMembers = Arrays.asList("127.0.0.1:45701, 127.0.0.1:45702, 127.0.0.1:45703");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
final MemberImpl m1 = (MemberImpl) h1.getCluster().getLocalMember();
final MemberImpl m2 = (MemberImpl) h2.getCluster().getLocalMember();
// All three nodes join into one cluster
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
final Node n2 = TestUtil.getNode(h2);
// Simulates node2 getting an out of order member list. That causes node2 to think it's the master.
List<MemberInfo> members = new ArrayList<MemberInfo>();
members.add(new MemberInfo(m1.getAddress(), m1.getUuid(), Collections. <String, Object> emptyMap()));
members.add(new MemberInfo(m2.getAddress(), m2.getUuid(), Collections. <String, Object> emptyMap()));
n2.clusterService.updateMembers(members);
// Give the cluster some time to figure things out. The merge and heartbeat code should have kicked in by this point
Thread.sleep(30 * 1000);
assertEquals(m1, h1.getCluster().getMembers().iterator().next());
assertEquals(m1, h2.getCluster().getMembers().iterator().next());
assertEquals(m1, h3.getCluster().getMembers().iterator().next());
assertEquals(3, h1.getCluster().getMembers().size());
assertEquals(3, h2.getCluster().getMembers().size());
assertEquals(3, h3.getCluster().getMembers().size());
}
@Test
public void testSwitchingMasters() throws Exception {
Config c1 = buildConfig(false);
Config c2 = buildConfig(false);
Config c3 = buildConfig(false);
Config c4 = buildConfig(false);
Config c5 = buildConfig(false);
c1.getNetworkConfig().setPort(55701);
c2.getNetworkConfig().setPort(55702);
c3.getNetworkConfig().setPort(55703);
c4.getNetworkConfig().setPort(55704);
c5.getNetworkConfig().setPort(55705);
List<String> allMembers = Arrays.asList("127.0.0.1:55701", "127.0.0.1:55702",
"127.0.0.1:55703", "127.0.0.1:55704", "127.0.0.1:55705");
c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c4.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
c5.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
final HazelcastInstance h4 = Hazelcast.newHazelcastInstance(c4);
final HazelcastInstance h5 = Hazelcast.newHazelcastInstance(c5);
assertEquals(5, h1.getCluster().getMembers().size());
assertEquals(5, h2.getCluster().getMembers().size());
assertEquals(5, h3.getCluster().getMembers().size());
assertEquals(5, h4.getCluster().getMembers().size());
assertEquals(5, h5.getCluster().getMembers().size());
// Need to wait for at least as long as PROP_MAX_NO_MASTER_CONFIRMATION_SECONDS
Thread.sleep(15 * 1000);
Member master = h1.getCluster().getLocalMember();
assertEquals(master, h2.getCluster().getMembers().iterator().next());
assertEquals(master, h3.getCluster().getMembers().iterator().next());
assertEquals(master, h4.getCluster().getMembers().iterator().next());
assertEquals(master, h5.getCluster().getMembers().iterator().next());
h1.shutdown();
assertEquals(4, h2.getCluster().getMembers().size());
assertEquals(4, h3.getCluster().getMembers().size());
assertEquals(4, h4.getCluster().getMembers().size());
assertEquals(4, h5.getCluster().getMembers().size());
master = h2.getCluster().getLocalMember();
assertEquals(master, h2.getCluster().getMembers().iterator().next());
assertEquals(master, h3.getCluster().getMembers().iterator().next());
assertEquals(master, h4.getCluster().getMembers().iterator().next());
assertEquals(master, h5.getCluster().getMembers().iterator().next());
Thread.sleep(10 * 1000);
assertEquals(4, h2.getCluster().getMembers().size());
assertEquals(4, h3.getCluster().getMembers().size());
assertEquals(4, h4.getCluster().getMembers().size());
assertEquals(4, h5.getCluster().getMembers().size());
assertEquals(master, h2.getCluster().getMembers().iterator().next());
assertEquals(master, h3.getCluster().getMembers().iterator().next());
assertEquals(master, h4.getCluster().getMembers().iterator().next());
assertEquals(master, h5.getCluster().getMembers().iterator().next());
}
private static Config buildConfig(boolean multicastEnabled) {
Config c = new Config();
c.getGroupConfig().setName("group").setPassword("pass");
c.setProperty(GroupProperties.PROP_MERGE_FIRST_RUN_DELAY_SECONDS, "10");
c.setProperty(GroupProperties.PROP_MERGE_NEXT_RUN_DELAY_SECONDS, "5");
c.setProperty(GroupProperties.PROP_MAX_NO_HEARTBEAT_SECONDS, "10");
c.setProperty(GroupProperties.PROP_MASTER_CONFIRMATION_INTERVAL_SECONDS, "2");
c.setProperty(GroupProperties.PROP_MAX_NO_MASTER_CONFIRMATION_SECONDS, "10");
c.setProperty(GroupProperties.PROP_MEMBER_LIST_PUBLISH_INTERVAL_SECONDS, "10");
final NetworkConfig networkConfig = c.getNetworkConfig();
networkConfig.getJoin().getMulticastConfig().setEnabled(multicastEnabled);
networkConfig.getJoin().getTcpIpConfig().setEnabled(!multicastEnabled);
networkConfig.setPortAutoIncrement(false);
return c;
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_MemberListTest.java
|
625 |
h3.getLifecycleService().addLifecycleListener(new LifecycleListener() {
public void stateChanged(LifecycleEvent event) {
if (event.getState() == LifecycleState.MERGED) {
latch.countDown();
}
}
});
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_SplitBrainHandlerTest.java
|
346 |
static abstract class TestHelper extends Thread {
protected static final int ITERATIONS = 1000*10;
protected final Random random = new Random();
protected final IMap<String, Integer> map;
protected final String upKey;
protected final String downKey;
public TestHelper(IMap map, String upKey, String downKey){
this.map = map;
this.upKey = upKey;
this.downKey = downKey;
}
public void run() {
try{
for ( int i=0; i < ITERATIONS; i++ ) {
doRun();
}
}catch(Exception e){
throw new RuntimeException("Test Thread crashed with ", e);
}
}
abstract void doRun()throws Exception;
public void work(){
int upTotal = map.get(upKey);
int downTotal = map.get(downKey);
int dif = random.nextInt(1000);
upTotal += dif;
downTotal -= dif;
map.put(upKey, upTotal);
map.put(downKey, downTotal);
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTryLockConcurrentTests.java
|
611 |
public abstract class OIndexMultiValues extends OIndexAbstract<Set<OIdentifiable>> {
public OIndexMultiValues(final String type, String algorithm, OIndexEngine<Set<OIdentifiable>> indexEngine,
String valueContainerAlgorithm) {
super(type, algorithm, indexEngine, valueContainerAlgorithm);
}
public Set<OIdentifiable> get(Object key) {
checkForRebuild();
key = getCollatingValue(key);
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return Collections.emptySet();
return new HashSet<OIdentifiable>(values);
} finally {
releaseSharedLock();
}
}
public long count(Object key) {
checkForRebuild();
key = getCollatingValue(key);
acquireSharedLock();
try {
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
return 0;
return values.size();
} finally {
releaseSharedLock();
}
}
public OIndexMultiValues put(Object key, final OIdentifiable iSingleValue) {
checkForRebuild();
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
checkForKeyType(key);
Set<OIdentifiable> values = indexEngine.get(key);
if (values == null) {
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
values = new OIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
}
if (!iSingleValue.getIdentity().isValid())
((ORecord<?>) iSingleValue).save();
values.add(iSingleValue.getIdentity());
indexEngine.put(key, values);
return this;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void putInSnapshot(Object key, OIdentifiable value, final Map<Object, Object> snapshot) {
key = getCollatingValue(key);
Object snapshotValue = snapshot.get(key);
Set<OIdentifiable> values;
if (snapshotValue == null)
values = indexEngine.get(key);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
values = null;
else
values = (Set<OIdentifiable>) snapshotValue;
if (values == null) {
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm)) {
values = new OIndexRIDContainer(getName());
} else {
values = new OMVRBTreeRIDSet(OGlobalConfiguration.MVRBTREE_RID_BINARY_THRESHOLD.getValueAsInteger());
((OMVRBTreeRIDSet) values).setAutoConvertToRecord(false);
}
snapshot.put(key, values);
}
values.add(value.getIdentity());
if (values instanceof OIndexRIDContainer && ((OIndexRIDContainer) values).isEmbedded())
snapshot.put(key, values);
}
@Override
public boolean remove(Object key, final OIdentifiable value) {
checkForRebuild();
key = getCollatingValue(key);
modificationLock.requestModificationLock();
try {
acquireExclusiveLock();
try {
Set<OIdentifiable> recs = indexEngine.get(key);
if (recs == null)
return false;
if (recs.remove(value)) {
if (recs.isEmpty())
indexEngine.remove(key);
else
indexEngine.put(key, recs);
return true;
}
return false;
} finally {
releaseExclusiveLock();
}
} finally {
modificationLock.releaseModificationLock();
}
}
@Override
protected void removeFromSnapshot(Object key, final OIdentifiable value, final Map<Object, Object> snapshot) {
key = getCollatingValue(key);
final Object snapshotValue = snapshot.get(key);
Set<OIdentifiable> values;
if (snapshotValue == null)
values = indexEngine.get(key);
else if (snapshotValue.equals(RemovedValue.INSTANCE))
values = null;
else
values = (Set<OIdentifiable>) snapshotValue;
if (values == null)
return;
if (values.remove(value)) {
if (values.isEmpty())
snapshot.put(key, RemovedValue.INSTANCE);
else
snapshot.put(key, values);
}
}
@Override
protected void commitSnapshot(Map<Object, Object> snapshot) {
for (Map.Entry<Object, Object> snapshotEntry : snapshot.entrySet()) {
Object key = snapshotEntry.getKey();
Object value = snapshotEntry.getValue();
checkForKeyType(key);
if (value.equals(RemovedValue.INSTANCE))
indexEngine.remove(key);
else
indexEngine.put(key, (Set<OIdentifiable>) value);
}
}
public OIndexMultiValues create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
final OStreamSerializer serializer;
if (ODefaultIndexFactory.SBTREEBONSAI_VALUE_CONTAINER.equals(valueContainerAlgorithm))
serializer = OStreamSerializerSBTreeIndexRIDContainer.INSTANCE;
else
serializer = OStreamSerializerListRID.INSTANCE;
return (OIndexMultiValues) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
serializer);
}
public void getValuesBetween(Object iRangeFrom, final boolean fromInclusive, Object iRangeTo, final boolean toInclusive,
final IndexValuesResultListener resultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getValuesBetween(iRangeFrom, fromInclusive, iRangeTo, toInclusive, MultiValuesTransformer.INSTANCE,
new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValuesMajor(Object iRangeFrom, final boolean isInclusive, final IndexValuesResultListener valuesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
acquireSharedLock();
try {
indexEngine.getValuesMajor(iRangeFrom, isInclusive, MultiValuesTransformer.INSTANCE, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return valuesResultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValuesMinor(Object iRangeTo, final boolean isInclusive, final IndexValuesResultListener resultListener) {
checkForRebuild();
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getValuesMinor(iRangeTo, isInclusive, MultiValuesTransformer.INSTANCE, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValues(final Collection<?> iKeys, final IndexValuesResultListener resultListener) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
for (Object key : sortedKeys) {
key = getCollatingValue(key);
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
if (!resultListener.addResult(value))
return;
}
}
}
} finally {
releaseSharedLock();
}
}
public void getEntriesMajor(Object iRangeFrom, final boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
acquireSharedLock();
try {
indexEngine.getEntriesMajor(iRangeFrom, isInclusive, MultiValuesTransformer.INSTANCE,
new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntriesMinor(Object iRangeTo, boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getEntriesMinor(iRangeTo, isInclusive, MultiValuesTransformer.INSTANCE, new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntriesBetween(Object iRangeFrom, Object iRangeTo, boolean inclusive,
final IndexEntriesResultListener indexEntriesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
iRangeFrom = OType.convert(iRangeFrom, types[0].getDefaultJavaType());
iRangeTo = OType.convert(iRangeTo, types[0].getDefaultJavaType());
}
acquireSharedLock();
try {
indexEngine.getEntriesBetween(iRangeFrom, iRangeTo, inclusive, MultiValuesTransformer.INSTANCE,
new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return indexEntriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public long count(Object iRangeFrom, final boolean fromInclusive, Object iRangeTo, final boolean toInclusive,
final int maxValuesToFetch) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
final OType[] types = getDefinition().getTypes();
if (types.length == 1) {
iRangeFrom = OType.convert(iRangeFrom, types[0].getDefaultJavaType());
iRangeTo = OType.convert(iRangeTo, types[0].getDefaultJavaType());
}
if (iRangeFrom != null && iRangeTo != null && iRangeFrom.getClass() != iRangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
acquireSharedLock();
try {
return indexEngine.count(iRangeFrom, fromInclusive, iRangeTo, toInclusive, maxValuesToFetch, MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public void getEntries(Collection<?> iKeys, IndexEntriesResultListener resultListener) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(iKeys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
for (Object key : sortedKeys) {
key = getCollatingValue(key);
final Set<OIdentifiable> values = indexEngine.get(key);
if (values == null)
continue;
if (!values.isEmpty()) {
for (final OIdentifiable value : values) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", value.getIdentity());
document.unsetDirty();
if (!resultListener.addResult(document))
return;
}
}
}
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(MultiValuesTransformer.INSTANCE);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireSharedLock();
try {
return indexEngine.size(null);
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, new OMultiCollectionIterator<OIdentifiable>(
indexEngine.valuesIterator()));
} finally {
releaseSharedLock();
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator(this, new OMultiCollectionIterator<OIdentifiable>(indexEngine.inverseValuesIterator()));
} finally {
releaseSharedLock();
}
}
private static final class MultiValuesTransformer implements OIndexEngine.ValuesTransformer<Set<OIdentifiable>> {
private static final MultiValuesTransformer INSTANCE = new MultiValuesTransformer();
@Override
public Collection<OIdentifiable> transformFromValue(Set<OIdentifiable> value) {
return value;
}
@Override
public Set<OIdentifiable> transformToValue(Collection<OIdentifiable> collection) {
return (Set<OIdentifiable>) collection;
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_index_OIndexMultiValues.java
|
31 |
final class NestedLiteralCompletionProposal implements ICompletionProposal,
ICompletionProposalExtension2 {
private final int loc;
private final int index;
private final String value;
NestedLiteralCompletionProposal(String value, int loc,
int index) {
this.value = value;
this.loc = loc;
this.index = index;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public void apply(IDocument document) {
//the following awfulness is necessary because the
//insertion point may have changed (and even its
//text may have changed, since the proposal was
//instantiated).
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
if (offset>0 && document.getChar(offset)==' ') {
offset++;
}
int nextOffset = findCharCount(index+1, document,
loc+startOfArgs, endOfLine,
",;", "", true);
int middleOffset = findCharCount(1, document,
offset, nextOffset,
"=", "", true)+1;
if (middleOffset>0 &&
document.getChar(middleOffset)=='>') {
middleOffset++;
}
while (middleOffset>0 &&
document.getChar(middleOffset)==' ') {
middleOffset++;
}
if (middleOffset>offset &&
middleOffset<nextOffset) {
offset = middleOffset;
}
String str = value;
if (nextOffset==-1) {
nextOffset = offset;
}
if (document.getChar(nextOffset)=='}') {
str += " ";
}
document.replace(offset, nextOffset-offset, str);
}
catch (BadLocationException e) {
e.printStackTrace();
}
//adding imports drops us out of linked mode :(
/*try {
DocumentChange tc = new DocumentChange("imports", document);
tc.setEdit(new MultiTextEdit());
HashSet<Declaration> decs = new HashSet<Declaration>();
CompilationUnit cu = cpc.getRootNode();
importDeclaration(decs, d, cu);
if (d instanceof Functional) {
List<ParameterList> pls = ((Functional) d).getParameterLists();
if (!pls.isEmpty()) {
for (Parameter p: pls.get(0).getParameters()) {
MethodOrValue pm = p.getModel();
if (pm instanceof Method) {
for (ParameterList ppl: ((Method) pm).getParameterLists()) {
for (Parameter pp: ppl.getParameters()) {
importSignatureTypes(pp.getModel(), cu, decs);
}
}
}
}
}
}
applyImports(tc, decs, cu, document);
tc.perform(new NullProgressMonitor());
}
catch (Exception e) {
e.printStackTrace();
}*/
}
@Override
public Point getSelection(IDocument document) {
return null;
}
@Override
public String getDisplayString() {
return value;
}
@Override
public Image getImage() {
return getDecoratedImage(CEYLON_LITERAL, 0, false);
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int currentOffset,
DocumentEvent event) {
if (event==null) {
return true;
}
else {
try {
IRegion li = document.getLineInformationOfOffset(loc);
int endOfLine = li.getOffset() + li.getLength();
int startOfArgs = getFirstPosition();
int offset = findCharCount(index, document,
loc+startOfArgs, endOfLine,
",;", "", true)+1;
String content = document.get(offset, currentOffset - offset);
int eq = content.indexOf("=");
if (eq>0) {
content = content.substring(eq+1);
}
String filter = content.trim().toLowerCase();
if (value.toLowerCase().startsWith(filter)) {
return true;
}
}
catch (BadLocationException e) {
// ignore concurrently modified document
}
return false;
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_InvocationCompletionProposal.java
|
317 |
new Thread() {
public void run() {
map.lock(key);
lockedLatch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapLockTest.java
|
777 |
public class MoreLikeThisRequestBuilder extends ActionRequestBuilder<MoreLikeThisRequest, SearchResponse, MoreLikeThisRequestBuilder> {
public MoreLikeThisRequestBuilder(Client client) {
super((InternalClient) client, new MoreLikeThisRequest());
}
public MoreLikeThisRequestBuilder(Client client, String index, String type, String id) {
super((InternalClient) client, new MoreLikeThisRequest(index).type(type).id(id));
}
/**
* The fields of the document to use in order to find documents "like" this one. Defaults to run
* against all the document fields.
*/
public MoreLikeThisRequestBuilder setField(String... fields) {
request.fields(fields);
return this;
}
/**
* Sets the routing. Required if routing isn't id based.
*/
public MoreLikeThisRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* The percent of the terms to match for each field. Defaults to <tt>0.3f</tt>.
*/
public MoreLikeThisRequestBuilder setPercentTermsToMatch(float percentTermsToMatch) {
request.percentTermsToMatch(percentTermsToMatch);
return this;
}
/**
* The frequency below which terms will be ignored in the source doc. Defaults to <tt>2</tt>.
*/
public MoreLikeThisRequestBuilder setMinTermFreq(int minTermFreq) {
request.minTermFreq(minTermFreq);
return this;
}
/**
* The maximum number of query terms that will be included in any generated query. Defaults to <tt>25</tt>.
*/
public MoreLikeThisRequestBuilder maxQueryTerms(int maxQueryTerms) {
request.maxQueryTerms(maxQueryTerms);
return this;
}
/**
* Any word in this set is considered "uninteresting" and ignored.
* <p/>
* <p>Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
* for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
* <p/>
* <p>Defaults to no stop words.
*/
public MoreLikeThisRequestBuilder setStopWords(String... stopWords) {
request.stopWords(stopWords);
return this;
}
/**
* The frequency at which words will be ignored which do not occur in at least this
* many docs. Defaults to <tt>5</tt>.
*/
public MoreLikeThisRequestBuilder setMinDocFreq(int minDocFreq) {
request.minDocFreq(minDocFreq);
return this;
}
/**
* The maximum frequency in which words may still appear. Words that appear
* in more than this many docs will be ignored. Defaults to unbounded.
*/
public MoreLikeThisRequestBuilder setMaxDocFreq(int maxDocFreq) {
request.maxDocFreq(maxDocFreq);
return this;
}
/**
* The minimum word length below which words will be ignored. Defaults to <tt>0</tt>.
*/
public MoreLikeThisRequestBuilder setMinWordLen(int minWordLen) {
request.minWordLength(minWordLen);
return this;
}
/**
* The maximum word length above which words will be ignored. Defaults to unbounded.
*/
public MoreLikeThisRequestBuilder setMaxWordLen(int maxWordLen) {
request().maxWordLength(maxWordLen);
return this;
}
/**
* The boost factor to use when boosting terms. Defaults to <tt>1</tt>.
*/
public MoreLikeThisRequestBuilder setBoostTerms(float boostTerms) {
request.boostTerms(boostTerms);
return this;
}
/**
* An optional search source request allowing to control the search request for the
* more like this documents.
*/
public MoreLikeThisRequestBuilder setSearchSource(SearchSourceBuilder sourceBuilder) {
request.searchSource(sourceBuilder);
return this;
}
/**
* An optional search source request allowing to control the search request for the
* more like this documents.
*/
public MoreLikeThisRequestBuilder setSearchSource(String searchSource) {
request.searchSource(searchSource);
return this;
}
/**
* An optional search source request allowing to control the search request for the
* more like this documents.
*/
public MoreLikeThisRequestBuilder setSearchSource(Map searchSource) {
request.searchSource(searchSource);
return this;
}
/**
* An optional search source request allowing to control the search request for the
* more like this documents.
*/
public MoreLikeThisRequestBuilder setSearchSource(XContentBuilder builder) {
request.searchSource(builder);
return this;
}
/**
* An optional search source request allowing to control the search request for the
* more like this documents.
*/
public MoreLikeThisRequestBuilder setSearchSource(byte[] searchSource) {
request.searchSource(searchSource);
return this;
}
/**
* The search type of the mlt search query.
*/
public MoreLikeThisRequestBuilder setSearchType(SearchType searchType) {
request.searchType(searchType);
return this;
}
/**
* The search type of the mlt search query.
*/
public MoreLikeThisRequestBuilder setSearchType(String searchType) throws ElasticsearchIllegalArgumentException {
request.searchType(searchType);
return this;
}
/**
* The indices the resulting mlt query will run against. If not set, will run
* against the index the document was fetched from.
*/
public MoreLikeThisRequestBuilder setSearchIndices(String... searchIndices) {
request.searchIndices(searchIndices);
return this;
}
/**
* The types the resulting mlt query will run against. If not set, will run
* against the type of the document fetched.
*/
public MoreLikeThisRequestBuilder setSearchTypes(String... searchTypes) {
request.searchTypes(searchTypes);
return this;
}
/**
* An optional search scroll request to be able to continue and scroll the search
* operation.
*/
public MoreLikeThisRequestBuilder setSearchScroll(Scroll searchScroll) {
request.searchScroll(searchScroll);
return this;
}
/**
* The number of documents to return, defaults to 10.
*/
public MoreLikeThisRequestBuilder setSearchSize(int size) {
request.searchSize(size);
return this;
}
/**
* From which search result set to return.
*/
public MoreLikeThisRequestBuilder setSearchFrom(int from) {
request.searchFrom(from);
return this;
}
@Override
protected void doExecute(ActionListener<SearchResponse> listener) {
((Client) client).moreLikeThis(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_mlt_MoreLikeThisRequestBuilder.java
|
447 |
final Thread t1 = new Thread() {
public void run() {
try {
Thread.sleep(2 * 1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
q.poll();
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_queue_ClientQueueTest.java
|
360 |
public class TransportNodesStatsAction extends TransportNodesOperationAction<NodesStatsRequest, NodesStatsResponse, TransportNodesStatsAction.NodeStatsRequest, NodeStats> {
private final NodeService nodeService;
@Inject
public TransportNodesStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
NodeService nodeService) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.nodeService = nodeService;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected String transportAction() {
return NodesStatsAction.NAME;
}
@Override
protected NodesStatsResponse newResponse(NodesStatsRequest nodesInfoRequest, AtomicReferenceArray responses) {
final List<NodeStats> nodeStats = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeStats) {
nodeStats.add((NodeStats) resp);
}
}
return new NodesStatsResponse(clusterName, nodeStats.toArray(new NodeStats[nodeStats.size()]));
}
@Override
protected NodesStatsRequest newRequest() {
return new NodesStatsRequest();
}
@Override
protected NodeStatsRequest newNodeRequest() {
return new NodeStatsRequest();
}
@Override
protected NodeStatsRequest newNodeRequest(String nodeId, NodesStatsRequest request) {
return new NodeStatsRequest(nodeId, request);
}
@Override
protected NodeStats newNodeResponse() {
return new NodeStats();
}
@Override
protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) throws ElasticsearchException {
NodesStatsRequest request = nodeStatsRequest.request;
return nodeService.stats(request.indices(), request.os(), request.process(), request.jvm(), request.threadPool(), request.network(),
request.fs(), request.transport(), request.http(), request.breaker());
}
@Override
protected boolean accumulateExceptions() {
return false;
}
static class NodeStatsRequest extends NodeOperationRequest {
NodesStatsRequest request;
NodeStatsRequest() {
}
NodeStatsRequest(String nodeId, NodesStatsRequest request) {
super(request, nodeId);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new NodesStatsRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_stats_TransportNodesStatsAction.java
|
136 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(QuickTest.class)
public class ClientTimeoutTest {
@Test(timeout = 20000, expected = IllegalStateException.class)
public void testTimeoutToOutsideNetwork() throws Exception {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName( "dev" ).setPassword( "dev-pass" );
clientConfig.getNetworkConfig().addAddress( "8.8.8.8:5701" );
HazelcastInstance client = HazelcastClient.newHazelcastClient( clientConfig );
IList<Object> list = client.getList( "test" );
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_ClientTimeoutTest.java
|
1,005 |
private class AsyncSingleAction {
private final ActionListener<Response> listener;
private final ShardsIterator shardsIt;
private final Request request;
private final DiscoveryNodes nodes;
private AsyncSingleAction(Request request, ActionListener<Response> listener) {
this.request = request;
this.listener = listener;
ClusterState clusterState = clusterService.state();
nodes = clusterState.nodes();
ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
if (blockException != null) {
throw blockException;
}
blockException = checkRequestBlock(clusterState, request);
if (blockException != null) {
throw blockException;
}
this.shardsIt = shards(clusterState, request);
}
public void start() {
performFirst();
}
private void onFailure(ShardRouting shardRouting, Throwable e) {
if (logger.isTraceEnabled() && e != null) {
logger.trace(shardRouting.shortSummary() + ": Failed to execute [" + request + "]", e);
}
perform(e);
}
/**
* First get should try and use a shard that exists on a local node for better performance
*/
private void performFirst() {
if (shardsIt == null) {
// just execute it on the local node
if (request.operationThreaded()) {
request.beforeLocalFork();
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
try {
Response response = shardOperation(request, -1);
listener.onResponse(response);
} catch (Throwable e) {
onFailure(null, e);
}
}
});
return;
} else {
try {
final Response response = shardOperation(request, -1);
listener.onResponse(response);
return;
} catch (Throwable e) {
onFailure(null, e);
}
}
return;
}
if (request.preferLocalShard()) {
boolean foundLocal = false;
ShardRouting shardX;
while ((shardX = shardsIt.nextOrNull()) != null) {
final ShardRouting shard = shardX;
if (shard.currentNodeId().equals(nodes.localNodeId())) {
foundLocal = true;
if (request.operationThreaded()) {
request.beforeLocalFork();
threadPool.executor(executor()).execute(new Runnable() {
@Override
public void run() {
try {
Response response = shardOperation(request, shard.id());
listener.onResponse(response);
} catch (Throwable e) {
shardsIt.reset();
onFailure(shard, e);
}
}
});
return;
} else {
try {
final Response response = shardOperation(request, shard.id());
listener.onResponse(response);
return;
} catch (Throwable e) {
shardsIt.reset();
onFailure(shard, e);
}
}
}
}
if (!foundLocal) {
// no local node get, go remote
shardsIt.reset();
perform(null);
}
} else {
perform(null);
}
}
private void perform(final Throwable lastException) {
final ShardRouting shard = shardsIt == null ? null : shardsIt.nextOrNull();
if (shard == null) {
Throwable failure = lastException;
if (failure == null) {
failure = new NoShardAvailableActionException(null, "No shard available for [" + request + "]");
} else {
if (logger.isDebugEnabled()) {
logger.debug("failed to execute [" + request + "]", failure);
}
}
listener.onFailure(failure);
} else {
if (shard.currentNodeId().equals(nodes.localNodeId())) {
// we don't prefer local shard, so try and do it here
if (!request.preferLocalShard()) {
try {
if (request.operationThreaded()) {
request.beforeLocalFork();
threadPool.executor(executor).execute(new Runnable() {
@Override
public void run() {
try {
Response response = shardOperation(request, shard.id());
listener.onResponse(response);
} catch (Throwable e) {
onFailure(shard, e);
}
}
});
} else {
final Response response = shardOperation(request, shard.id());
listener.onResponse(response);
}
} catch (Throwable e) {
onFailure(shard, e);
}
} else {
perform(lastException);
}
} else {
DiscoveryNode node = nodes.get(shard.currentNodeId());
transportService.sendRequest(node, transportShardAction, new ShardSingleOperationRequest(request, shard.id()), new BaseTransportResponseHandler<Response>() {
@Override
public Response newInstance() {
return newResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(final Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
onFailure(shard, exp);
}
});
}
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_single_custom_TransportSingleCustomOperationAction.java
|
1,118 |
public class OSQLFunctionCoalesce extends OSQLFunctionAbstract {
public static final String NAME = "coalesce";
public OSQLFunctionCoalesce() {
super(NAME, 1, 1000);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, Object iCurrentResult, final Object[] iParameters, OCommandContext iContext) {
int length = iParameters.length;
for (int i = 0; i < length; i++) {
if (iParameters[i] != null)
return iParameters[i];
}
return null;
}
@Override
public String getSyntax() {
return "Returns the first not-null parameter or null if all parameters are null. Syntax: coalesce(<field|value> [,<field|value>]*)";
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_misc_OSQLFunctionCoalesce.java
|
146 |
public final class HazelcastClientProxy
implements HazelcastInstance {
volatile HazelcastClient client;
HazelcastClientProxy(HazelcastClient client) {
this.client = client;
}
@Override
public Config getConfig() {
return getClient().getConfig();
}
@Override
public String getName() {
return getClient().getName();
}
@Override
public <E> IQueue<E> getQueue(String name) {
return getClient().getQueue(name);
}
@Override
public <E> ITopic<E> getTopic(String name) {
return getClient().getTopic(name);
}
@Override
public <E> ISet<E> getSet(String name) {
return getClient().getSet(name);
}
@Override
public <E> IList<E> getList(String name) {
return getClient().getList(name);
}
@Override
public <K, V> IMap<K, V> getMap(String name) {
return getClient().getMap(name);
}
@Override
public <K, V> MultiMap<K, V> getMultiMap(String name) {
return getClient().getMultiMap(name);
}
@Override
public <K, V> ReplicatedMap<K, V> getReplicatedMap(String name) {
return getClient().getReplicatedMap(name);
}
@Override
public JobTracker getJobTracker(String name) {
return getClient().getJobTracker(name);
}
@Override
public ILock getLock(Object key) {
return getClient().getLock(key);
}
@Override
public ILock getLock(String key) {
return getClient().getLock(key);
}
@Override
public Cluster getCluster() {
return getClient().getCluster();
}
@Override
public Client getLocalEndpoint() {
return getClient().getLocalEndpoint();
}
@Override
public IExecutorService getExecutorService(String name) {
return getClient().getExecutorService(name);
}
@Override
public <T> T executeTransaction(TransactionalTask<T> task)
throws TransactionException {
return getClient().executeTransaction(task);
}
@Override
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task)
throws TransactionException {
return getClient().executeTransaction(options, task);
}
@Override
public TransactionContext newTransactionContext() {
return getClient().newTransactionContext();
}
@Override
public TransactionContext newTransactionContext(TransactionOptions options) {
return getClient().newTransactionContext(options);
}
@Override
public IdGenerator getIdGenerator(String name) {
return getClient().getIdGenerator(name);
}
@Override
public IAtomicLong getAtomicLong(String name) {
return getClient().getAtomicLong(name);
}
@Override
public <E> IAtomicReference<E> getAtomicReference(String name) {
return getClient().getAtomicReference(name);
}
@Override
public ICountDownLatch getCountDownLatch(String name) {
return getClient().getCountDownLatch(name);
}
@Override
public ISemaphore getSemaphore(String name) {
return getClient().getSemaphore(name);
}
@Override
public Collection<DistributedObject> getDistributedObjects() {
return getClient().getDistributedObjects();
}
@Override
public String addDistributedObjectListener(DistributedObjectListener distributedObjectListener) {
return getClient().addDistributedObjectListener(distributedObjectListener);
}
@Override
public boolean removeDistributedObjectListener(String registrationId) {
return getClient().removeDistributedObjectListener(registrationId);
}
@Override
public PartitionService getPartitionService() {
return getClient().getPartitionService();
}
@Override
public ClientService getClientService() {
return getClient().getClientService();
}
@Override
public LoggingService getLoggingService() {
return getClient().getLoggingService();
}
@Override
public LifecycleService getLifecycleService() {
final HazelcastClient hz = client;
return hz != null ? hz.getLifecycleService() : new TerminatedLifecycleService();
}
@Deprecated
public <T extends DistributedObject> T getDistributedObject(String serviceName, Object id) {
return getClient().getDistributedObject(serviceName, id);
}
@Override
public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) {
return getClient().getDistributedObject(serviceName, name);
}
@Override
public ConcurrentMap<String, Object> getUserContext() {
return getClient().getUserContext();
}
public ClientConfig getClientConfig() {
return getClient().getClientConfig();
}
@Override
public void shutdown() {
getLifecycleService().shutdown();
}
public SerializationService getSerializationService() {
return getClient().getSerializationService();
}
private HazelcastClient getClient() {
final HazelcastClient c = client;
if (c == null || !c.getLifecycleService().isRunning()) {
throw new HazelcastInstanceNotActiveException();
}
return c;
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_HazelcastClientProxy.java
|
682 |
constructors[COLLECTION_REMOVE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionRemoveOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
1,496 |
public static class NodeExplanation {
private final DiscoveryNode node;
private final String description;
/**
* Creates a new {@link NodeExplanation}
*
* @param node node referenced by {@link This} {@link NodeExplanation}
* @param description a message associated with the given node
*/
public NodeExplanation(DiscoveryNode node, String description) {
this.node = node;
this.description = description;
}
/**
* The node referenced by the explanation
* @return referenced node
*/
public DiscoveryNode node() {
return node;
}
/**
* Get the explanation for the node
* @return explanation for the node
*/
public String description() {
return description;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_routing_allocation_AllocationExplanation.java
|
237 |
public interface ModuleProvider {
/**
* Indicates if, given the configuration, this module can respond to the particular request.
*
* @param config
* @return
*/
public boolean canRespond(ModuleConfiguration config);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_service_ModuleProvider.java
|
788 |
public class StandardSchemaCache implements SchemaCache {
public static final int MAX_CACHED_TYPES_DEFAULT = 10000;
private static final int INITIAL_CAPACITY = 128;
private static final int INITIAL_CACHE_SIZE = 16;
private static final int CACHE_RELATION_MULTIPLIER = 3; // 1) type-name, 2) type-definitions, 3) modifying edges [index, lock]
private static final int CONCURRENCY_LEVEL = 2;
// private static final int SCHEMAID_FORW_SHIFT = 4; //Number of bits at the end to append the id of the system type
private static final int SCHEMAID_TOTALFORW_SHIFT = 3; //Total number of bits appended - the 1 is for the 1 bit direction
private static final int SCHEMAID_BACK_SHIFT = 2; //Number of bits to remove from end of schema id since its just the padding
{
assert IDManager.VertexIDType.Schema.removePadding(1l<<SCHEMAID_BACK_SHIFT)==1;
assert SCHEMAID_TOTALFORW_SHIFT-SCHEMAID_BACK_SHIFT>=0;
}
private final int maxCachedTypes;
private final int maxCachedRelations;
private final StoreRetrieval retriever;
private volatile ConcurrentMap<String,Long> typeNames;
private final Cache<String,Long> typeNamesBackup;
private volatile ConcurrentMap<Long,EntryList> schemaRelations;
private final Cache<Long,EntryList> schemaRelationsBackup;
public StandardSchemaCache(final StoreRetrieval retriever) {
this(MAX_CACHED_TYPES_DEFAULT,retriever);
}
public StandardSchemaCache(final int size, final StoreRetrieval retriever) {
Preconditions.checkArgument(size>0,"Size must be positive");
Preconditions.checkNotNull(retriever);
maxCachedTypes = size;
maxCachedRelations = maxCachedTypes *CACHE_RELATION_MULTIPLIER;
this.retriever=retriever;
typeNamesBackup = CacheBuilder.newBuilder()
.concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE)
.maximumSize(maxCachedTypes).build();
typeNames = new ConcurrentHashMap<String, Long>(INITIAL_CAPACITY,0.75f,CONCURRENCY_LEVEL);
schemaRelationsBackup = CacheBuilder.newBuilder()
.concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE *CACHE_RELATION_MULTIPLIER)
.maximumSize(maxCachedRelations).build();
// typeRelations = new ConcurrentHashMap<Long, EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER,0.75f,CONCURRENCY_LEVEL);
schemaRelations = new NonBlockingHashMapLong<EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER); //TODO: Is this data structure safe or should we go with ConcurrentHashMap (line above)?
}
@Override
public Long getSchemaId(final String schemaName, final StandardTitanTx tx) {
ConcurrentMap<String,Long> types = typeNames;
Long id;
if (types==null) {
id = typeNamesBackup.getIfPresent(schemaName);
if (id==null) {
id = retriever.retrieveSchemaByName(schemaName, tx);
if (id!=null) { //only cache if type exists
typeNamesBackup.put(schemaName,id);
}
}
} else {
id = types.get(schemaName);
if (id==null) { //Retrieve it
if (types.size()> maxCachedTypes) {
/* Safe guard against the concurrent hash map growing to large - this would be a VERY rare event
as it only happens for graph databases with thousands of types.
*/
typeNames = null;
return getSchemaId(schemaName, tx);
} else {
//Expand map
id = retriever.retrieveSchemaByName(schemaName, tx);
if (id!=null) { //only cache if type exists
types.put(schemaName,id);
}
}
}
}
return id;
}
private long getIdentifier(final long schemaId, final SystemRelationType type, final Direction dir) {
int edgeDir = EdgeDirection.position(dir);
assert edgeDir==0 || edgeDir==1;
long typeid = (schemaId >>> SCHEMAID_BACK_SHIFT);
int systemTypeId;
if (type== BaseLabel.SchemaDefinitionEdge) systemTypeId=0;
else if (type== BaseKey.SchemaName) systemTypeId=1;
else if (type== BaseKey.SchemaCategory) systemTypeId=2;
else if (type== BaseKey.SchemaDefinitionProperty) systemTypeId=3;
else throw new AssertionError("Unexpected SystemType encountered in StandardSchemaCache: " + type.getName());
//Ensure that there is enough padding
assert (systemTypeId<(1<<2));
return (((typeid<<2)+systemTypeId)<<1)+edgeDir;
}
@Override
public EntryList getSchemaRelations(final long schemaId, final BaseRelationType type, final Direction dir, final StandardTitanTx tx) {
assert IDManager.isSystemRelationTypeId(type.getLongId()) && type.getLongId()>0;
Preconditions.checkArgument(IDManager.VertexIDType.Schema.is(schemaId));
Preconditions.checkArgument((Long.MAX_VALUE>>>(SCHEMAID_TOTALFORW_SHIFT-SCHEMAID_BACK_SHIFT))>= schemaId);
int edgeDir = EdgeDirection.position(dir);
assert edgeDir==0 || edgeDir==1;
final long typePlusRelation = getIdentifier(schemaId,type,dir);
ConcurrentMap<Long,EntryList> types = schemaRelations;
EntryList entries;
if (types==null) {
entries = schemaRelationsBackup.getIfPresent(typePlusRelation);
if (entries==null) {
entries = retriever.retrieveSchemaRelations(schemaId, type, dir, tx);
if (!entries.isEmpty()) { //only cache if type exists
schemaRelationsBackup.put(typePlusRelation, entries);
}
}
} else {
entries = types.get(typePlusRelation);
if (entries==null) { //Retrieve it
if (types.size()> maxCachedRelations) {
/* Safe guard against the concurrent hash map growing to large - this would be a VERY rare event
as it only happens for graph databases with thousands of types.
*/
schemaRelations = null;
return getSchemaRelations(schemaId, type, dir, tx);
} else {
//Expand map
entries = retriever.retrieveSchemaRelations(schemaId, type, dir, tx);
types.put(typePlusRelation,entries);
}
}
}
assert entries!=null;
return entries;
}
// @Override
// public void expireSchemaName(final String name) {
// ConcurrentMap<String,Long> types = typeNames;
// if (types!=null) types.remove(name);
// typeNamesBackup.invalidate(name);
// }
@Override
public void expireSchemaElement(final long schemaId) {
//1) expire relations
final long cuttypeid = (schemaId >>> SCHEMAID_BACK_SHIFT);
ConcurrentMap<Long,EntryList> types = schemaRelations;
if (types!=null) {
Iterator<Long> keys = types.keySet().iterator();
while (keys.hasNext()) {
long key = keys.next();
if ((key>>>SCHEMAID_TOTALFORW_SHIFT)==cuttypeid) keys.remove();
}
}
Iterator<Long> keys = schemaRelationsBackup.asMap().keySet().iterator();
while (keys.hasNext()) {
long key = keys.next();
if ((key>>>SCHEMAID_TOTALFORW_SHIFT)==cuttypeid) schemaRelationsBackup.invalidate(key);
}
//2) expire names
ConcurrentMap<String,Long> names = typeNames;
if (names!=null) {
for (Iterator<Map.Entry<String, Long>> iter = names.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<String, Long> next = iter.next();
if (next.getValue().equals(schemaId)) iter.remove();
}
}
for (Map.Entry<String,Long> entry : typeNamesBackup.asMap().entrySet()) {
if (entry.getValue().equals(schemaId)) typeNamesBackup.invalidate(entry.getKey());
}
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_database_cache_StandardSchemaCache.java
|
353 |
public class SingleTableInheritanceClassTransformer implements BroadleafClassTransformer {
public static final String SINGLE_TABLE_ENTITIES = "broadleaf.ejb.entities.override_single_table";
private static final Log LOG = LogFactory.getLog(SingleTableInheritanceClassTransformer.class);
protected List<SingleTableInheritanceInfo> infos = new ArrayList<SingleTableInheritanceInfo>();
public void compileJPAProperties(Properties props, Object key) throws Exception {
if (((String) key).equals(SINGLE_TABLE_ENTITIES)) {
String[] classes = StringUtils.tokenizeToStringArray(props.getProperty((String) key), ConfigurableApplicationContext.CONFIG_LOCATION_DELIMITERS);
for (String clazz : classes) {
String keyName;
int pos = clazz.lastIndexOf(".");
if (pos >= 0) {
keyName = clazz.substring(pos + 1, clazz.length());
} else {
keyName = clazz;
}
SingleTableInheritanceInfo info = new SingleTableInheritanceInfo();
info.setClassName(clazz);
String discriminatorName = props.getProperty("broadleaf.ejb."+keyName+".discriminator.name");
if (discriminatorName != null) {
info.setDiscriminatorName(discriminatorName);
String type = props.getProperty("broadleaf.ejb."+keyName+".discriminator.type");
if (type != null) {
info.setDiscriminatorType(DiscriminatorType.valueOf(type));
}
String length = props.getProperty("broadleaf.ejb."+keyName+".discriminator.length");
if (length != null) {
info.setDiscriminatorLength(Integer.parseInt(length));
}
}
infos.remove(info);
infos.add(info);
}
}
}
public byte[] transform(ClassLoader loader, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws IllegalClassFormatException {
if (infos.isEmpty()) {
return null;
}
String convertedClassName = className.replace('/', '.');
SingleTableInheritanceInfo key = new SingleTableInheritanceInfo();
key.setClassName(convertedClassName);
int pos = infos.indexOf(key);
if (pos >= 0) {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Converting " + convertedClassName + " to a SingleTable inheritance strategy.");
}
SingleTableInheritanceInfo myInfo = infos.get(pos);
ClassFile classFile = new ClassFile(new DataInputStream(new ByteArrayInputStream(classfileBuffer)));
ConstPool constantPool = classFile.getConstPool();
AnnotationsAttribute annotationsAttribute = new AnnotationsAttribute(constantPool, AnnotationsAttribute.visibleTag);
List<?> attributes = classFile.getAttributes();
Iterator<?> itr = attributes.iterator();
while(itr.hasNext()) {
Object object = itr.next();
if (AnnotationsAttribute.class.isAssignableFrom(object.getClass())) {
AnnotationsAttribute attr = (AnnotationsAttribute) object;
Annotation[] items = attr.getAnnotations();
for (Annotation annotation : items) {
String typeName = annotation.getTypeName();
if (!typeName.equals(Inheritance.class.getName())) {
annotationsAttribute.addAnnotation(annotation);
}
}
itr.remove();
}
}
Annotation inheritance = new Annotation(Inheritance.class.getName(), constantPool);
ClassPool pool = ClassPool.getDefault();
pool.importPackage("javax.persistence");
pool.importPackage("java.lang");
EnumMemberValue strategy = (EnumMemberValue) Annotation.createMemberValue(constantPool, pool.makeClass("InheritanceType"));
strategy.setType(InheritanceType.class.getName());
strategy.setValue(InheritanceType.SINGLE_TABLE.name());
inheritance.addMemberValue("strategy", strategy);
annotationsAttribute.addAnnotation(inheritance);
if (myInfo.getDiscriminatorName() != null) {
Annotation discriminator = new Annotation(DiscriminatorColumn.class.getName(), constantPool);
StringMemberValue name = new StringMemberValue(constantPool);
name.setValue(myInfo.getDiscriminatorName());
discriminator.addMemberValue("name", name);
EnumMemberValue discriminatorType = (EnumMemberValue) Annotation.createMemberValue(constantPool, pool.makeClass("DiscriminatorType"));
discriminatorType.setType(DiscriminatorType.class.getName());
discriminatorType.setValue(myInfo.getDiscriminatorType().name());
discriminator.addMemberValue("discriminatorType", discriminatorType);
IntegerMemberValue length = new IntegerMemberValue(constantPool);
length.setValue(myInfo.getDiscriminatorLength());
discriminator.addMemberValue("length", length);
annotationsAttribute.addAnnotation(discriminator);
}
classFile.addAttribute(annotationsAttribute);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream os = new DataOutputStream(bos);
classFile.write(os);
os.close();
return bos.toByteArray();
} catch(Exception ex) {
ex.printStackTrace();
throw new IllegalClassFormatException("Unable to convert " + convertedClassName + " to a SingleTable inheritance strategy: " + ex.getMessage());
}
} else {
return null;
}
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_extensibility_jpa_convert_inheritance_SingleTableInheritanceClassTransformer.java
|
513 |
public class OCommandExecutionException extends OException {
private static final long serialVersionUID = -7430575036316163711L;
public OCommandExecutionException(String message, Throwable cause) {
super(message, cause);
}
public OCommandExecutionException(String message) {
super(message);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_exception_OCommandExecutionException.java
|
1,612 |
public class ODeployDatabaseTask extends OAbstractReplicatedTask {
private static final long serialVersionUID = 1L;
// private static final String BACKUP_DIRECTORY = "tempBackups";
protected final static int CHUNK_MAX_SIZE = 1048576; // 1MB
public ODeployDatabaseTask() {
}
@Override
public Object execute(final OServer iServer, ODistributedServerManager iManager, final ODatabaseDocumentTx database)
throws Exception {
if (!getNodeSource().equals(iManager.getLocalNodeName())) {
final String databaseName = database.getName();
final Lock lock = iManager.getLock(databaseName);
if (lock.tryLock()) {
try {
ODistributedServerLog.warn(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT, "deploying database %s...",
databaseName);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
database.backup(out, null, null);
final byte[] buffer = out.toByteArray();
ODistributedServerLog.warn(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT,
"sending the compressed database %s over the network, total %s", databaseName,
OFileUtils.getSizeAsString(buffer.length));
return new OBuffer(buffer);
// final File f = new File(BACKUP_DIRECTORY + "/" + database.getName());
// database.backup(new FileOutputStream(f), null);
//
// final ByteArrayOutputStream out = new ByteArrayOutputStream(CHUNK_MAX_SIZE);
// final FileInputStream in = new FileInputStream(f);
// try {
// final long fileSize = f.length();
//
// ODistributedServerLog.warn(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT,
// "copying %s to remote node...", OFileUtils.getSizeAsString(fileSize));
//
// for (int byteCopied = 0; byteCopied < fileSize;) {
// byteCopied += OIOUtils.copyStream(in, out, CHUNK_MAX_SIZE);
//
// if ((Boolean) iManager.sendRequest(database.getName(), null, new OCopyDatabaseChunkTask(out.toByteArray()),
// EXECUTION_MODE.RESPONSE)) {
// out.reset();
// }
// }
//
// return "deployed";
// } finally {
// OFileUtils.deleteRecursively(new File(BACKUP_DIRECTORY));
// }
} finally {
lock.unlock();
}
} else
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.NONE,
"skip deploying database %s because another node is doing it", databaseName);
} else
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.NONE,
"skip deploying database from the same node");
return new OBuffer(new byte[0]);
}
public RESULT_STRATEGY getResultStrategy() {
return RESULT_STRATEGY.UNION;
}
public QUORUM_TYPE getQuorumType() {
return QUORUM_TYPE.NONE;
}
@Override
public long getTimeout() {
return 60000;
}
@Override
public String getPayload() {
return null;
}
@Override
public String getName() {
return "deploy_db";
}
@Override
public void writeExternal(final ObjectOutput out) throws IOException {
}
@Override
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
}
@Override
public OFixUpdateRecordTask getFixTask(ODistributedRequest iRequest, ODistributedResponse iBadResponse,
ODistributedResponse iGoodResponse) {
return null;
}
}
| 1no label
|
server_src_main_java_com_orientechnologies_orient_server_distributed_task_ODeployDatabaseTask.java
|
477 |
public final class ProxyManager {
private static final ILogger LOGGER = Logger.getLogger(ProxyManager.class);
private static final Class[] CONSTRUCTOR_ARGUMENT_TYPES = new Class[]{String.class, String.class, String.class};
private final HazelcastClient client;
private final ConcurrentMap<String, ClientProxyFactory> proxyFactories = new ConcurrentHashMap<String, ClientProxyFactory>();
private final ConcurrentMap<ObjectNamespace, ClientProxyFuture> proxies = new ConcurrentHashMap<ObjectNamespace, ClientProxyFuture>();
public ProxyManager(HazelcastClient client) {
this.client = client;
final List<ListenerConfig> listenerConfigs = client.getClientConfig().getListenerConfigs();
if (listenerConfigs != null && !listenerConfigs.isEmpty()) {
for (ListenerConfig listenerConfig : listenerConfigs) {
if (listenerConfig.getImplementation() instanceof DistributedObjectListener) {
addDistributedObjectListener((DistributedObjectListener) listenerConfig.getImplementation());
}
}
}
}
public void init(ClientConfig config) {
// register defaults
register(MapService.SERVICE_NAME, ClientMapProxy.class);
register(QueueService.SERVICE_NAME, ClientQueueProxy.class);
register(MultiMapService.SERVICE_NAME, ClientMultiMapProxy.class);
register(ListService.SERVICE_NAME, ClientListProxy.class);
register(SetService.SERVICE_NAME, ClientSetProxy.class);
register(SemaphoreService.SERVICE_NAME, ClientSemaphoreProxy.class);
register(TopicService.SERVICE_NAME, ClientTopicProxy.class);
register(AtomicLongService.SERVICE_NAME, ClientAtomicLongProxy.class);
register(AtomicReferenceService.SERVICE_NAME, ClientAtomicReferenceProxy.class);
register(DistributedExecutorService.SERVICE_NAME, ClientExecutorServiceProxy.class);
register(LockServiceImpl.SERVICE_NAME, ClientLockProxy.class);
register(CountDownLatchService.SERVICE_NAME, ClientCountDownLatchProxy.class);
register(MapReduceService.SERVICE_NAME, ClientMapReduceProxy.class);
register(ReplicatedMapService.SERVICE_NAME, ClientReplicatedMapProxy.class);
register(IdGeneratorService.SERVICE_NAME, new ClientProxyFactory() {
public ClientProxy create(String id) {
String instanceName = client.getName();
IAtomicLong atomicLong = client.getAtomicLong(IdGeneratorService.ATOMIC_LONG_NAME + id);
return new ClientIdGeneratorProxy(instanceName, IdGeneratorService.SERVICE_NAME, id, atomicLong);
}
});
for (ProxyFactoryConfig proxyFactoryConfig : config.getProxyFactoryConfigs()) {
try {
ClassLoader classLoader = config.getClassLoader();
String className = proxyFactoryConfig.getClassName();
ClientProxyFactory clientProxyFactory = ClassLoaderUtil.newInstance(classLoader, className);
register(proxyFactoryConfig.getService(), clientProxyFactory);
} catch (Exception e) {
LOGGER.severe(e);
}
}
}
public void register(String serviceName, ClientProxyFactory factory) {
if (proxyFactories.putIfAbsent(serviceName, factory) != null) {
throw new IllegalArgumentException("Factory for service: " + serviceName + " is already registered!");
}
}
public void register(final String serviceName, final Class<? extends ClientProxy> proxyType) {
try {
register(serviceName, new ClientProxyFactory() {
@Override
public ClientProxy create(String id) {
String instanceName = client.getName();
return instantiateClientProxy(proxyType, instanceName, serviceName, id);
}
});
} catch (Exception e) {
throw new HazelcastException("Could not initialize Proxy", e);
}
}
public ClientProxy getProxy(String service, String id) {
final ObjectNamespace ns = new DefaultObjectNamespace(service, id);
ClientProxyFuture proxyFuture = proxies.get(ns);
if (proxyFuture != null) {
return proxyFuture.get();
}
final ClientProxyFactory factory = proxyFactories.get(service);
if (factory == null) {
throw new IllegalArgumentException("No factory registered for service: " + service);
}
final ClientProxy clientProxy = factory.create(id);
proxyFuture = new ClientProxyFuture();
final ClientProxyFuture current = proxies.putIfAbsent(ns, proxyFuture);
if (current != null) {
return current.get();
}
try {
initialize(clientProxy);
} catch (Exception e) {
proxies.remove(ns);
proxyFuture.set(e);
throw ExceptionUtil.rethrow(e);
}
proxyFuture.set(clientProxy);
return clientProxy;
}
public ClientProxy removeProxy(String service, String id) {
final ObjectNamespace ns = new DefaultObjectNamespace(service, id);
return proxies.remove(ns).get();
}
private void initialize(ClientProxy clientProxy) throws Exception {
ClientCreateRequest request = new ClientCreateRequest(clientProxy.getName(), clientProxy.getServiceName());
client.getInvocationService().invokeOnRandomTarget(request).get();
clientProxy.setContext(new ClientContext(client, this));
}
public Collection<? extends DistributedObject> getDistributedObjects() {
Collection<DistributedObject> objects = new LinkedList<DistributedObject>();
for (ClientProxyFuture future : proxies.values()) {
objects.add(future.get());
}
return objects;
}
public void destroy() {
for (ClientProxyFuture future : proxies.values()) {
future.get().onShutdown();
}
proxies.clear();
}
public String addDistributedObjectListener(final DistributedObjectListener listener) {
final DistributedObjectListenerRequest request = new DistributedObjectListenerRequest();
final EventHandler<PortableDistributedObjectEvent> eventHandler = new EventHandler<PortableDistributedObjectEvent>() {
public void handle(PortableDistributedObjectEvent e) {
final ObjectNamespace ns = new DefaultObjectNamespace(e.getServiceName(), e.getName());
ClientProxyFuture future = proxies.get(ns);
ClientProxy proxy = future == null ? null : future.get();
if (proxy == null) {
proxy = getProxy(e.getServiceName(), e.getName());
}
DistributedObjectEvent event = new DistributedObjectEvent(e.getEventType(), e.getServiceName(), proxy);
if (DistributedObjectEvent.EventType.CREATED.equals(e.getEventType())) {
listener.distributedObjectCreated(event);
} else if (DistributedObjectEvent.EventType.DESTROYED.equals(e.getEventType())) {
listener.distributedObjectDestroyed(event);
}
}
@Override
public void onListenerRegister() {
}
};
final ClientContext clientContext = new ClientContext(client, this);
return ListenerUtil.listen(clientContext, request, null, eventHandler);
}
public boolean removeDistributedObjectListener(String id) {
final RemoveDistributedObjectListenerRequest request = new RemoveDistributedObjectListenerRequest(id);
final ClientContext clientContext = new ClientContext(client, this);
return ListenerUtil.stopListening(clientContext, request, id);
}
private static class ClientProxyFuture {
volatile Object proxy;
ClientProxy get() {
if (proxy == null) {
boolean interrupted = false;
synchronized (this) {
while (proxy == null) {
try {
wait();
} catch (InterruptedException e) {
interrupted = true;
}
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
}
if (proxy instanceof Throwable) {
throw ExceptionUtil.rethrow((Throwable)proxy);
}
return (ClientProxy)proxy;
}
void set(Object o) {
if (o == null) {
throw new IllegalArgumentException();
}
synchronized (this) {
proxy = o;
notifyAll();
}
}
}
private <T> T instantiateClientProxy(Class<T> proxyType, String instanceName, String serviceName, String id) {
try {
final Constructor<T> constructor = proxyType.getConstructor(CONSTRUCTOR_ARGUMENT_TYPES);
return constructor.newInstance(instanceName, serviceName, id);
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
}
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_ProxyManager.java
|
944 |
public abstract class MasterNodeOperationRequest<T extends MasterNodeOperationRequest> extends ActionRequest<T> {
public static TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30);
protected TimeValue masterNodeTimeout = DEFAULT_MASTER_NODE_TIMEOUT;
/**
* A timeout value in case the master has not been discovered yet or disconnected.
*/
@SuppressWarnings("unchecked")
public final T masterNodeTimeout(TimeValue timeout) {
this.masterNodeTimeout = timeout;
return (T) this;
}
/**
* A timeout value in case the master has not been discovered yet or disconnected.
*/
public final T masterNodeTimeout(String timeout) {
return masterNodeTimeout(TimeValue.parseTimeValue(timeout, null));
}
public final TimeValue masterNodeTimeout() {
return this.masterNodeTimeout;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
masterNodeTimeout = TimeValue.readTimeValue(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
masterNodeTimeout.writeTo(out);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_support_master_MasterNodeOperationRequest.java
|
20 |
return Iterables.transform(Iterables.filter(set, new Predicate<ByteEntry>() {
@Override
public boolean apply(@Nullable ByteEntry entry) {
return !CHECK_VALUE || entry.value.getInt(0) == value;
}
}), new Function<ByteEntry, Vertex>() {
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
1,607 |
public class SimpleValueMapStructure extends MapStructure {
private static final long serialVersionUID = 1L;
private String valuePropertyName;
private String valuePropertyFriendlyName;
public SimpleValueMapStructure() {
super();
}
/**
* @param keyClassName
* @param keyPropertyName
* @param keyPropertyFriendlyName
* @param valueClassName
* @param mapProperty
*/
public SimpleValueMapStructure(String keyClassName, String keyPropertyName, String keyPropertyFriendlyName, String valueClassName, String valuePropertyName, String valuePropertyFriendlyName, String mapProperty, String mapKeyValueProperty) {
super(keyClassName, keyPropertyName, keyPropertyFriendlyName, valueClassName, mapProperty, false, mapKeyValueProperty);
this.valuePropertyFriendlyName = valuePropertyFriendlyName;
this.valuePropertyName = valuePropertyName;
}
public String getValuePropertyName() {
return valuePropertyName;
}
public void setValuePropertyName(String valuePropertyName) {
this.valuePropertyName = valuePropertyName;
}
public String getValuePropertyFriendlyName() {
return valuePropertyFriendlyName;
}
public void setValuePropertyFriendlyName(String valuePropertyFriendlyName) {
this.valuePropertyFriendlyName = valuePropertyFriendlyName;
}
public void accept(PersistencePerspectiveItemVisitor visitor) {
visitor.visit(this);
}
@Override
public PersistencePerspectiveItem clonePersistencePerspectiveItem() {
SimpleValueMapStructure mapStructure = new SimpleValueMapStructure();
mapStructure.setKeyClassName(getKeyClassName());
mapStructure.setKeyPropertyName(getKeyPropertyName());
mapStructure.setValuePropertyFriendlyName(getKeyPropertyFriendlyName());
mapStructure.setValueClassName(getValueClassName());
mapStructure.setMapProperty(getMapProperty());
mapStructure.setDeleteValueEntity(getDeleteValueEntity());
mapStructure.valuePropertyName = valuePropertyName;
mapStructure.valuePropertyFriendlyName = valuePropertyFriendlyName;
return mapStructure;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof SimpleValueMapStructure)) return false;
if (!super.equals(o)) return false;
SimpleValueMapStructure that = (SimpleValueMapStructure) o;
if (valuePropertyFriendlyName != null ? !valuePropertyFriendlyName.equals(that.valuePropertyFriendlyName) : that.valuePropertyFriendlyName != null)
return false;
if (valuePropertyName != null ? !valuePropertyName.equals(that.valuePropertyName) : that.valuePropertyName != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (valuePropertyName != null ? valuePropertyName.hashCode() : 0);
result = 31 * result + (valuePropertyFriendlyName != null ? valuePropertyFriendlyName.hashCode() : 0);
return result;
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_dto_SimpleValueMapStructure.java
|
275 |
public class SimpleClientInterceptor implements MapInterceptor, Portable {
public static final int ID = 345;
@Override
public Object interceptGet(Object value) {
if (value == null)
return null;
return value + ":";
}
@Override
public void afterGet(Object value) {
}
@Override
public Object interceptPut(Object oldValue, Object newValue) {
return newValue.toString().toUpperCase();
}
@Override
public void afterPut(Object value) {
}
@Override
public Object interceptRemove(Object removedValue) {
if (removedValue.equals("ISTANBUL"))
throw new RuntimeException("you can not remove this");
return removedValue;
}
@Override
public void afterRemove(Object value) {
}
@Override
public int getFactoryId() {
return PortableHelpersFactory.ID; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public int getClassId() {
return ID; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void writePortable(PortableWriter writer) throws IOException {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void readPortable(PortableReader reader) throws IOException {
//To change body of implemented methods use File | Settings | File Templates.
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_helpers_SimpleClientInterceptor.java
|
1,031 |
private class TransportHandler extends BaseTransportRequestHandler<Request> {
@Override
public Request newInstance() {
return newRequest();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void messageReceived(Request request, final TransportChannel channel) throws Exception {
// no need to have a threaded listener since we just send back a response
request.listenerThreaded(false);
// if we have a local operation, execute it on a thread since we don't spawn
request.operationThreaded(true);
execute(request, new ActionListener<Response>() {
@Override
public void onResponse(Response result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("failed to send response for get", e1);
}
}
});
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_single_shard_TransportShardSingleOperationAction.java
|
114 |
{
@Override
public Object doWork( Void state )
{
try
{
tm.rollback();
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
return null;
}
};
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestJtaCompliance.java
|
999 |
public static class PrimaryResponse<Response, ReplicaRequest> {
private final ReplicaRequest replicaRequest;
private final Response response;
private final Object payload;
public PrimaryResponse(ReplicaRequest replicaRequest, Response response, Object payload) {
this.replicaRequest = replicaRequest;
this.response = response;
this.payload = payload;
}
public ReplicaRequest replicaRequest() {
return this.replicaRequest;
}
public Response response() {
return response;
}
public Object payload() {
return payload;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_support_replication_TransportShardReplicationOperationAction.java
|
32 |
@Service("blOrderItemFieldService")
public class OrderItemFieldServiceImpl extends AbstractRuleBuilderFieldService {
//TODO: extensibility mechanism, support i18N
@Override
public void init() {
fields.add(new FieldData.Builder()
.label("rule_orderItemName")
.name("name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemPrice")
.name("price")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemQuantity")
.name("quantity")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.INTEGER)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemRetailPrice")
.name("retailPrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSalePrice")
.name("salePrice")
.operators("blcOperators_Numeric")
.options("[]")
.type(SupportedFieldType.MONEY)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryId")
.name("category.id")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.ID)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryName")
.name("category.name")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryFulfillmentType")
.name("category.fulfillmentType")
.operators("blcOperators_Enumeration")
.options("blcOptions_FulfillmentType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemInventoryType")
.name("category.inventoryType")
.operators("blcOperators_Enumeration")
.options("blcOptions_InventoryType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryUrl")
.name("category.url")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryUrlKey")
.name("category.urlKey")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryDescription")
.name("category.description")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemCategoryLongDescription")
.name("category.longDescription")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductCanSellWithoutOptions")
.name("product.canSellWithoutOptions")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductUrl")
.name("product.url")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductUrlKey")
.name("product.urlKey")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductIsFeatured")
.name("product.isFeaturedProduct")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductManufacturer")
.name("product.manufacturer")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemProductModel")
.name("product.model")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuFulfillmentType")
.name("sku.fulfillmentType")
.operators("blcOperators_Enumeration")
.options("blcOptions_FulfillmentType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuInventoryType")
.name("sku.inventoryType")
.operators("blcOperators_Enumeration")
.options("blcOptions_InventoryType")
.type(SupportedFieldType.BROADLEAF_ENUMERATION)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuDescription")
.name("sku.description")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuLongDescription")
.name("sku.longDescription")
.operators("blcOperators_Text")
.options("[]")
.type(SupportedFieldType.STRING)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuTaxable")
.name("sku.taxable")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuAvailable")
.name("sku.available")
.operators("blcOperators_Boolean")
.options("[]")
.type(SupportedFieldType.BOOLEAN)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuStartDate")
.name("sku.activeStartDate")
.operators("blcOperators_Date")
.options("[]")
.type(SupportedFieldType.DATE)
.build());
fields.add(new FieldData.Builder()
.label("rule_orderItemSkuEndDate")
.name("sku.activeEndDate")
.operators("blcOperators_Date")
.options("[]")
.type(SupportedFieldType.DATE)
.build());
}
@Override
public String getName() {
return RuleIdentifier.ORDERITEM;
}
@Override
public String getDtoClassName() {
return "org.broadleafcommerce.core.order.domain.OrderItemImpl";
}
}
| 0true
|
admin_broadleaf-admin-module_src_main_java_org_broadleafcommerce_admin_web_rulebuilder_service_OrderItemFieldServiceImpl.java
|
1,481 |
public class RoutingService extends AbstractLifecycleComponent<RoutingService> implements ClusterStateListener {
private static final String CLUSTER_UPDATE_TASK_SOURCE = "routing-table-updater";
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final AllocationService allocationService;
private final TimeValue schedule;
private volatile boolean routingTableDirty = false;
private volatile Future scheduledRoutingTableFuture;
@Inject
public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.allocationService = allocationService;
this.schedule = componentSettings.getAsTime("schedule", timeValueSeconds(10));
clusterService.addFirst(this);
}
@Override
protected void doStart() throws ElasticsearchException {
}
@Override
protected void doStop() throws ElasticsearchException {
}
@Override
protected void doClose() throws ElasticsearchException {
if (scheduledRoutingTableFuture != null) {
scheduledRoutingTableFuture.cancel(true);
scheduledRoutingTableFuture = null;
}
clusterService.remove(this);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.source().equals(CLUSTER_UPDATE_TASK_SOURCE)) {
// that's us, ignore this event
return;
}
if (event.state().nodes().localNodeMaster()) {
// we are master, schedule the routing table updater
if (scheduledRoutingTableFuture == null) {
// a new master (us), make sure we reroute shards
routingTableDirty = true;
scheduledRoutingTableFuture = threadPool.scheduleWithFixedDelay(new RoutingTableUpdater(), schedule);
}
if (event.nodesRemoved()) {
// if nodes were removed, we don't want to wait for the scheduled task
// since we want to get primary election as fast as possible
routingTableDirty = true;
reroute();
// Commented out since we make sure to reroute whenever shards changes state or metadata changes state
// } else if (event.routingTableChanged()) {
// routingTableDirty = true;
// reroute();
} else {
if (event.nodesAdded()) {
for (DiscoveryNode node : event.nodesDelta().addedNodes()) {
if (node.dataNode()) {
routingTableDirty = true;
break;
}
}
}
}
} else {
if (scheduledRoutingTableFuture != null) {
scheduledRoutingTableFuture.cancel(true);
scheduledRoutingTableFuture = null;
}
}
}
private void reroute() {
try {
if (!routingTableDirty) {
return;
}
if (lifecycle.stopped()) {
return;
}
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, Priority.HIGH, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
RoutingAllocation.Result routingResult = allocationService.reroute(currentState);
if (!routingResult.changed()) {
// no state changed
return currentState;
}
return ClusterState.builder(currentState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
routingTableDirty = false;
} catch (Exception e) {
logger.warn("Failed to reroute routing table", e);
}
}
private class RoutingTableUpdater implements Runnable {
@Override
public void run() {
reroute();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_routing_RoutingService.java
|
1,094 |
public abstract class OSQLFunctionConfigurableAbstract extends OSQLFunctionAbstract {
protected Object[] configuredParameters;
protected OSQLFunctionConfigurableAbstract(final String iName, final int iMinParams, final int iMaxParams) {
super(iName, iMinParams, iMaxParams);
}
@Override
public void config(final Object[] iConfiguredParameters) {
configuredParameters = iConfiguredParameters;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_functions_OSQLFunctionConfigurableAbstract.java
|
2,890 |
public abstract static class AbstractPredicate implements IndexAwarePredicate, DataSerializable {
protected String attribute;
private transient volatile AttributeType attributeType;
protected AbstractPredicate() {
}
protected AbstractPredicate(String attribute) {
this.attribute = attribute;
}
protected Comparable convert(Map.Entry mapEntry, Comparable entryValue, Comparable attributeValue) {
if (attributeValue == null) {
return null;
}
if (attributeValue instanceof IndexImpl.NullObject) {
return IndexImpl.NULL;
}
AttributeType type = attributeType;
if (type == null) {
QueryableEntry queryableEntry = (QueryableEntry) mapEntry;
type = queryableEntry.getAttributeType(attribute);
attributeType = type;
}
if (type == AttributeType.ENUM) {
// if attribute type is enum, convert given attribute to enum string
return type.getConverter().convert(attributeValue);
} else {
// if given attribute value is already in expected type then there's no need for conversion.
if (entryValue != null && entryValue.getClass().isAssignableFrom(attributeValue.getClass())) {
return attributeValue;
} else if (type != null) {
return type.getConverter().convert(attributeValue);
} else {
throw new QueryException("Unknown attribute type: " + attributeValue.getClass());
}
}
}
@Override
public boolean isIndexed(QueryContext queryContext) {
return getIndex(queryContext) != null;
}
protected Index getIndex(QueryContext queryContext) {
return queryContext.getIndex(attribute);
}
protected Comparable readAttribute(Map.Entry entry) {
QueryableEntry queryableEntry = (QueryableEntry) entry;
Comparable val = queryableEntry.getAttribute(attribute);
if (val != null && val.getClass().isEnum()) {
val = val.toString();
}
return val;
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
out.writeUTF(attribute);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
attribute = in.readUTF();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_Predicates.java
|
5,092 |
transportService.sendRequest(node, SearchQueryFetchTransportHandler.ACTION, request, new BaseTransportResponseHandler<QueryFetchSearchResult>() {
@Override
public QueryFetchSearchResult newInstance() {
return new QueryFetchSearchResult();
}
@Override
public void handleResponse(QueryFetchSearchResult response) {
listener.onResult(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
| 1no label
|
src_main_java_org_elasticsearch_search_action_SearchServiceTransportAction.java
|
291 |
public class OSQLScriptFormatter implements OScriptFormatter {
public String getFunctionDefinition(final OFunction f) {
return null;
}
@Override
public String getFunctionInvoke(final OFunction iFunction, final Object[] iArgs) {
// TODO: BIND ARGS
return iFunction.getCode();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_script_formatter_OSQLScriptFormatter.java
|
690 |
public static class Failure {
private final String index;
private final String type;
private final String id;
private final String message;
private final RestStatus status;
public Failure(String index, String type, String id, Throwable t) {
this.index = index;
this.type = type;
this.id = id;
this.message = ExceptionsHelper.detailedMessage(t);
this.status = ExceptionsHelper.status(t);
}
public Failure(String index, String type, String id, String message, RestStatus status) {
this.index = index;
this.type = type;
this.id = id;
this.message = message;
this.status = status;
}
/**
* The index name of the action.
*/
public String getIndex() {
return this.index;
}
/**
* The type of the action.
*/
public String getType() {
return type;
}
/**
* The id of the action.
*/
public String getId() {
return id;
}
/**
* The failure message.
*/
public String getMessage() {
return this.message;
}
/**
* The rest status.
*/
public RestStatus getStatus() {
return this.status;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_bulk_BulkItemResponse.java
|
54 |
private class ClusterListenerImpl extends ClusterListener.Adapter
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
// Catch up with elections
for ( Map.Entry<String, InstanceId> memberRoles : clusterConfiguration.getRoles().entrySet() )
{
elected( memberRoles.getKey(), memberRoles.getValue(),
clusterConfiguration.getUriForId( memberRoles.getValue() ) );
}
}
@Override
public void elected( String role, final InstanceId instanceId, final URI electedMember )
{
if ( role.equals( ClusterConfiguration.COORDINATOR ) )
{
// Use the cluster coordinator as master for HA
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
listener.coordinatorIsElected( instanceId );
}
} );
}
}
@Override
public void leftCluster( final InstanceId member )
{
// Notify unavailability of members
Listeners.notifyListeners( listeners, new Listeners.Notification<ClusterMemberListener>()
{
@Override
public void notify( ClusterMemberListener listener )
{
for ( MemberIsAvailable memberIsAvailable : clusterMembersSnapshot.getCurrentAvailable( member ) )
{
listener.memberIsUnavailable( memberIsAvailable.getRole(), member );
}
}
} );
clusterMembersSnapshot.unavailableMember( member );
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_member_paxos_PaxosClusterMemberEvents.java
|
1,537 |
public static class Balancer {
private final ESLogger logger;
private final Map<String, ModelNode> nodes = new HashMap<String, ModelNode>();
private final HashSet<String> indices = new HashSet<String>();
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
private final WeightFunction weight;
private final float threshold;
private final MetaData metaData;
private final Predicate<MutableShardRouting> assignedFilter = new Predicate<MutableShardRouting>() {
@Override
public boolean apply(MutableShardRouting input) {
return input.assignedToNode();
}
};
public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) {
this.logger = logger;
this.allocation = allocation;
this.weight = weight;
this.threshold = threshold;
this.routingNodes = allocation.routingNodes();
for (RoutingNode node : routingNodes) {
nodes.put(node.nodeId(), new ModelNode(node.nodeId()));
}
metaData = routingNodes.metaData();
}
/**
* Returns an array view on the nodes in the balancer. Nodes should not be removed from this list.
*/
private ModelNode[] nodesArray() {
return nodes.values().toArray(new ModelNode[nodes.size()]);
}
/**
* Returns the average of shards per node for the given index
*/
public float avgShardsPerNode(String index) {
return ((float) metaData.index(index).totalNumberOfShards()) / nodes.size();
}
/**
* Returns the global average of shards per node
*/
public float avgShardsPerNode() {
return ((float) metaData.totalNumberOfShards()) / nodes.size();
}
/**
* Returns the global average of primaries per node
*/
public float avgPrimariesPerNode() {
return ((float) metaData.numberOfShards()) / nodes.size();
}
/**
* Returns the average of primaries per node for the given index
*/
public float avgPrimariesPerNode(String index) {
return ((float) metaData.index(index).numberOfShards()) / nodes.size();
}
/**
* Returns a new {@link NodeSorter} that sorts the nodes based on their
* current weight with respect to the index passed to the sorter. The
* returned sorter is not sorted. Use {@link NodeSorter#reset(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Operation, String)}
* to sort based on an index.
*/
private NodeSorter newNodeSorter() {
return new NodeSorter(nodesArray(), weight, this);
}
private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) {
if (logger.isTraceEnabled()) {
logger.trace("Start distributing Shards");
}
indices.addAll(allocation.routingTable().indicesRouting().keySet());
buildModelFromAssigned(routing.shards(assignedFilter));
return allocateUnassigned(unassigned, routing.ignoredUnassigned());
}
private static boolean lessThan(float delta, float threshold) {
/* deltas close to the threshold are "rounded" to the threshold manually
to prevent floating point problems if the delta is very close to the
threshold ie. 1.000000002 which can trigger unnecessary balance actions*/
return delta <= threshold + 0.001f;
}
/**
* Balances the nodes on the cluster model according to the weight
* function. The configured threshold is the minimum delta between the
* weight of the maximum node and the minimum node according to the
* {@link WeightFunction}. This weight is calculated per index to
* distribute shards evenly per index. The balancer tries to relocate
* shards only if the delta exceeds the threshold. If the default case
* the threshold is set to <tt>1.0</tt> to enforce gaining relocation
* only, or in other words relocations that move the weight delta closer
* to <tt>0.0</tt>
*
* @return <code>true</code> if the current configuration has been
* changed, otherwise <code>false</code>
*/
public boolean balance() {
if (this.nodes.isEmpty()) {
/* with no nodes this is pointless */
return false;
}
if (logger.isTraceEnabled()) {
logger.trace("Start balancing cluster");
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
boolean changed = initialize(routingNodes, unassigned);
if (!changed) {
NodeSorter sorter = newNodeSorter();
if (nodes.size() > 1) { /* skip if we only have one node */
for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) {
sorter.reset(Operation.BALANCE, index);
final float[] weights = sorter.weights;
final ModelNode[] modelNodes = sorter.modelNodes;
int lowIdx = 0;
int highIdx = weights.length - 1;
while (true) {
final ModelNode minNode = modelNodes[lowIdx];
final ModelNode maxNode = modelNodes[highIdx];
advance_range:
if (maxNode.numShards(index) > 0) {
float delta = weights[highIdx] - weights[lowIdx];
delta = lessThan(delta, threshold) ? delta : sorter.weight(Operation.THRESHOLD_CHECK, maxNode) - sorter.weight(Operation.THRESHOLD_CHECK, minNode);
if (lessThan(delta, threshold)) {
if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta?
&& (weights[highIdx-1] - weights[0] > threshold) // check if we need to break at all
) {
/* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible
* due to some allocation decider restrictions like zone awareness. if one zone has for instance
* less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we
* can't move to the "lighter" shards since otherwise the zone would go over capacity.
*
* This break jumps straight to the condition below were we start moving from the high index towards
* the low index to shrink the window we are considering for balance from the other direction.
* (check shrinking the window from MAX to MIN)
* See #3580
*/
break advance_range;
}
if (logger.isTraceEnabled()) {
logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]",
index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
break;
}
if (logger.isTraceEnabled()) {
logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]",
maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);
}
/* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.
* a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */
if (tryRelocateShard(Operation.BALANCE, minNode, maxNode, index, delta)) {
/*
* TODO we could be a bit smarter here, we don't need to fully sort necessarily
* we could just find the place to insert linearly but the win might be minor
* compared to the added complexity
*/
weights[lowIdx] = sorter.weight(Operation.BALANCE, modelNodes[lowIdx]);
weights[highIdx] = sorter.weight(Operation.BALANCE, modelNodes[highIdx]);
sorter.sort(0, weights.length);
lowIdx = 0;
highIdx = weights.length - 1;
changed = true;
continue;
}
}
if (lowIdx < highIdx - 1) {
/* Shrinking the window from MIN to MAX
* we can't move from any shard from the min node lets move on to the next node
* and see if the threshold still holds. We either don't have any shard of this
* index on this node of allocation deciders prevent any relocation.*/
lowIdx++;
} else if (lowIdx > 0) {
/* Shrinking the window from MAX to MIN
* now we go max to min since obviously we can't move anything to the max node
* lets pick the next highest */
lowIdx = 0;
highIdx--;
} else {
/* we are done here, we either can't relocate anymore or we are balanced */
break;
}
}
}
}
}
routingNodes.unassigned().transactionEnd(unassigned);
return changed;
}
/**
* This builds a initial index ordering where the indices are returned
* in most unbalanced first. We need this in order to prevent over
* allocations on added nodes from one index when the weight parameters
* for global balance overrule the index balance at an intermediate
* state. For example this can happen if we have 3 nodes and 3 indices
* with 3 shards and 1 shard. At the first stage all three nodes hold
* 2 shard for each index. now we add another node and the first index
* is balanced moving 3 two of the nodes over to the new node since it
* has no shards yet and global balance for the node is way below
* average. To re-balance we need to move shards back eventually likely
* to the nodes we relocated them from.
*/
private String[] buildWeightOrderedIndidces(Operation operation, NodeSorter sorter) {
final String[] indices = this.indices.toArray(new String[this.indices.size()]);
final float[] deltas = new float[indices.length];
for (int i = 0; i < deltas.length; i++) {
sorter.reset(operation, indices[i]);
deltas[i] = sorter.delta();
}
new IntroSorter() {
float pivotWeight;
@Override
protected void swap(int i, int j) {
final String tmpIdx = indices[i];
indices[i] = indices[j];
indices[j] = tmpIdx;
final float tmpDelta = deltas[i];
deltas[i] = deltas[j];
deltas[j] = tmpDelta;
}
@Override
protected int compare(int i, int j) {
return Float.compare(deltas[j], deltas[i]);
}
@Override
protected void setPivot(int i) {
pivotWeight = deltas[i];
}
@Override
protected int comparePivot(int j) {
return Float.compare(deltas[j], pivotWeight);
}
}.sort(0, deltas.length);
return indices;
}
/**
* This function executes a move operation moving the given shard from
* the given node to the minimal eligible node with respect to the
* weight function. Iff the shard is moved the shard will be set to
* {@link ShardRoutingState#RELOCATING} and a shadow instance of this
* shard is created with an incremented version in the state
* {@link ShardRoutingState#INITIALIZING}.
*
* @return <code>true</code> iff the shard has successfully been moved.
*/
public boolean move(MutableShardRouting shard, RoutingNode node ) {
if (nodes.isEmpty() || !shard.started()) {
/* with no nodes or a not started shard this is pointless */
return false;
}
if (logger.isTraceEnabled()) {
logger.trace("Try moving shard [{}] from [{}]", shard, node);
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
boolean changed = initialize(routingNodes, unassigned);
if (!changed) {
final ModelNode sourceNode = nodes.get(node.nodeId());
assert sourceNode != null;
final NodeSorter sorter = newNodeSorter();
sorter.reset(Operation.MOVE, shard.getIndex());
final ModelNode[] nodes = sorter.modelNodes;
assert sourceNode.containsShard(shard);
/*
* the sorter holds the minimum weight node first for the shards index.
* We now walk through the nodes until we find a node to allocate the shard.
* This is not guaranteed to be balanced after this operation we still try best effort to
* allocate on the minimal eligible node.
*/
for (ModelNode currentNode : nodes) {
if (currentNode.getNodeId().equals(node.nodeId())) {
continue;
}
RoutingNode target = routingNodes.node(currentNode.getNodeId());
Decision decision = allocation.deciders().canAllocate(shard, target, allocation);
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
sourceNode.removeShard(shard);
final MutableShardRouting initializingShard = new MutableShardRouting(shard.index(), shard.id(), currentNode.getNodeId(),
shard.currentNodeId(), shard.restoreSource(), shard.primary(), INITIALIZING, shard.version() + 1);
currentNode.addShard(initializingShard, decision);
routingNodes.assign(initializingShard, target.nodeId());
routingNodes.relocate(shard, target.nodeId()); // set the node to relocate after we added the initializing shard
if (logger.isTraceEnabled()) {
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
}
changed = true;
break;
}
}
}
routingNodes.unassigned().transactionEnd(unassigned);
return changed;
}
/**
* Builds the internal model from all shards in the given
* {@link Iterable}. All shards in the {@link Iterable} must be assigned
* to a node. This method will skip shards in the state
* {@link ShardRoutingState#RELOCATING} since each relocating shard has
* a shadow shard in the state {@link ShardRoutingState#INITIALIZING}
* on the target node which we respect during the allocation / balancing
* process. In short, this method recreates the status-quo in the cluster.
*/
private void buildModelFromAssigned(Iterable<MutableShardRouting> shards) {
for (MutableShardRouting shard : shards) {
assert shard.assignedToNode();
/* we skip relocating shards here since we expect an initializing shard with the same id coming in */
if (shard.state() == RELOCATING) {
continue;
}
ModelNode node = nodes.get(shard.currentNodeId());
assert node != null;
node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId()));
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId());
}
}
}
/**
* Allocates all given shards on the minimal eligable node for the shards index
* with respect to the weight function. All given shards must be unassigned.
*/
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned, List<MutableShardRouting> ignoredUnassigned) {
assert !nodes.isEmpty();
if (logger.isTraceEnabled()) {
logger.trace("Start allocating unassigned shards");
}
if (unassigned.isEmpty()) {
return false;
}
boolean changed = false;
/*
* TODO: We could be smarter here and group the shards by index and then
* use the sorter to save some iterations.
*/
final AllocationDeciders deciders = allocation.deciders();
final Comparator<MutableShardRouting> comparator = new Comparator<MutableShardRouting>() {
@Override
public int compare(MutableShardRouting o1,
MutableShardRouting o2) {
if (o1.primary() ^ o2.primary()) {
return o1.primary() ? -1 : o2.primary() ? 1 : 0;
}
final int indexCmp;
if ((indexCmp = o1.index().compareTo(o2.index())) == 0) {
return o1.getId() - o2.getId();
}
return indexCmp;
}
};
/*
* we use 2 arrays and move replicas to the second array once we allocated an identical
* replica in the current iteration to make sure all indices get allocated in the same manner.
* The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like:
* [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned.
*/
MutableShardRouting[] primary = unassigned.drain();
MutableShardRouting[] secondary = new MutableShardRouting[primary.length];
int secondaryLength = 0;
int primaryLength = primary.length;
ArrayUtil.timSort(primary, comparator);
final Set<ModelNode> throttledNodes = new IdentityHashSet<ModelNode>();
do {
for (int i = 0; i < primaryLength; i++) {
MutableShardRouting shard = primary[i];
if (!shard.primary()) {
boolean drop = deciders.canAllocate(shard, allocation).type() == Type.NO;
if (drop) {
ignoredUnassigned.add(shard);
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
ignoredUnassigned.add(primary[++i]);
}
continue;
} else {
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
secondary[secondaryLength++] = primary[++i];
}
}
}
assert !shard.assignedToNode() : shard;
/* find an node with minimal weight we can allocate on*/
float minWeight = Float.POSITIVE_INFINITY;
ModelNode minNode = null;
Decision decision = null;
if (throttledNodes.size() < nodes.size()) {
/* Don't iterate over an identity hashset here the
* iteration order is different for each run and makes testing hard */
for (ModelNode node : nodes.values()) {
if (throttledNodes.contains(node)) {
continue;
}
/*
* The shard we add is removed below to simulate the
* addition for weight calculation we use Decision.ALWAYS to
* not violate the not null condition.
*/
if (!node.containsShard(shard)) {
node.addShard(shard, Decision.ALWAYS);
float currentWeight = weight.weight(Operation.ALLOCATE, this, node, shard.index());
/*
* Remove the shard from the node again this is only a
* simulation
*/
Decision removed = node.removeShard(shard);
assert removed != null;
/*
* Unless the operation is not providing any gains we
* don't check deciders
*/
if (currentWeight <= minWeight) {
Decision currentDecision = deciders.canAllocate(shard, routingNodes.node(node.getNodeId()), allocation);
NOUPDATE:
if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) {
if (currentWeight == minWeight) {
/* we have an equal weight tie breaking:
* 1. if one decision is YES prefer it
* 2. prefer the node that holds the primary for this index with the next id in the ring ie.
* for the 3 shards 2 replica case we try to build up:
* 1 2 0
* 2 0 1
* 0 1 2
* such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater
* than the id of the shard we need to assign. This works find when new indices are created since
* primaries are added first and we only add one shard set a time in this algorithm.
*/
if (currentDecision.type() == decision.type()) {
final int repId = shard.id();
final int nodeHigh = node.highestPrimary(shard.index());
final int minNodeHigh = minNode.highestPrimary(shard.index());
if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh))
|| (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) {
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
} else {
break NOUPDATE;
}
} else if (currentDecision.type() != Type.YES) {
break NOUPDATE;
}
}
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
}
}
}
}
}
assert decision != null && minNode != null || decision == null && minNode == null;
if (minNode != null) {
minNode.addShard(shard, decision);
if (decision.type() == Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
}
routingNodes.assign(shard, routingNodes.node(minNode.getNodeId()).nodeId());
changed = true;
continue; // don't add to ignoreUnassigned
} else {
final RoutingNode node = routingNodes.node(minNode.getNodeId());
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Can not allocate on node [{}] remove from round decisin [{}]", node, decision.type());
}
throttledNodes.add(minNode);
}
}
if (logger.isTraceEnabled()) {
logger.trace("No eligable node found to assign shard [{}] decision [{}]", shard, decision.type());
}
} else if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);
}
ignoredUnassigned.add(shard);
if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) {
ignoredUnassigned.add(secondary[--secondaryLength]);
}
}
}
primaryLength = secondaryLength;
MutableShardRouting[] tmp = primary;
primary = secondary;
secondary = tmp;
secondaryLength = 0;
} while (primaryLength > 0);
// clear everything we have either added it or moved to ingoreUnassigned
return changed;
}
/**
* Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the
* balance model. Iff this method returns a <code>true</code> the relocation has already been executed on the
* simulation model as well as on the cluster.
*/
private boolean tryRelocateShard(Operation operation, ModelNode minNode, ModelNode maxNode, String idx, float minCost) {
final ModelIndex index = maxNode.getIndex(idx);
Decision decision = null;
if (index != null) {
if (logger.isTraceEnabled()) {
logger.trace("Try relocating shard for index index [{}] from node [{}] to node [{}]", idx, maxNode.getNodeId(),
minNode.getNodeId());
}
final RoutingNode node = routingNodes.node(minNode.getNodeId());
MutableShardRouting candidate = null;
final AllocationDeciders deciders = allocation.deciders();
/* make a copy since we modify this list in the loop */
final ArrayList<MutableShardRouting> shards = new ArrayList<MutableShardRouting>(index.getAllShards());
for (MutableShardRouting shard : shards) {
if (shard.started()) {
// skip initializing, unassigned and relocating shards we can't relocate them anyway
Decision allocationDecision = deciders.canAllocate(shard, node, allocation);
Decision rebalanceDecision = deciders.canRebalance(shard, allocation);
if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE))
&& ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) {
Decision srcDecision;
if ((srcDecision = maxNode.removeShard(shard)) != null) {
minNode.addShard(shard, srcDecision);
final float delta = weight.weight(operation, this, minNode, idx) - weight.weight(operation, this, maxNode, idx);
if (delta < minCost ||
(candidate != null && delta == minCost && candidate.id() > shard.id())) {
/* this last line is a tie-breaker to make the shard allocation alg deterministic
* otherwise we rely on the iteration order of the index.getAllShards() which is a set.*/
minCost = delta;
candidate = shard;
decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
}
minNode.removeShard(shard);
maxNode.addShard(shard, srcDecision);
}
}
}
}
if (candidate != null) {
/* allocate on the model even if not throttled */
maxNode.removeShard(candidate);
minNode.addShard(candidate, decision);
if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */
if (logger.isTraceEnabled()) {
logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(),
minNode.getNodeId());
}
/* now allocate on the cluster - if we are started we need to relocate the shard */
if (candidate.started()) {
RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
routingNodes.assign(new MutableShardRouting(candidate.index(), candidate.id(), lowRoutingNode.nodeId(), candidate
.currentNodeId(), candidate.restoreSource(), candidate.primary(), INITIALIZING, candidate.version() + 1), lowRoutingNode.nodeId());
routingNodes.relocate(candidate, lowRoutingNode.nodeId());
} else {
assert candidate.unassigned();
routingNodes.assign(candidate, routingNodes.node(minNode.getNodeId()).nodeId());
}
return true;
}
}
}
if (logger.isTraceEnabled()) {
logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", maxNode.getNodeId(),
minNode.getNodeId(), decision == null ? "NO" : decision.type().name());
}
return false;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_routing_allocation_allocator_BalancedShardsAllocator.java
|
535 |
@RunWith(HazelcastSerialClassRunner.class)
@Category(SlowTest.class)
public class ClientXaTest {
static final Random random = new Random(System.currentTimeMillis());
UserTransactionManager tm = null;
public void cleanAtomikosLogs() {
try {
File currentDir = new File(".");
final File[] tmLogs = currentDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if (name.endsWith(".epoch") || name.startsWith("tmlog")) {
return true;
}
return false;
}
});
for (File tmLog : tmLogs) {
tmLog.delete();
}
} catch (Exception e) {
e.printStackTrace();
}
}
@Before
public void init() throws SystemException {
cleanAtomikosLogs();
tm = new UserTransactionManager();
tm.setTransactionTimeout(60);
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@After
public void cleanup() {
tm.close();
cleanAtomikosLogs();
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void testRollbackAfterNodeShutdown() throws Exception {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance();
final HazelcastInstance client = HazelcastClient.newHazelcastClient();
tm.begin();
final TransactionContext context = client.newTransactionContext();
final XAResource xaResource = context.getXaResource();
final Transaction transaction = tm.getTransaction();
transaction.enlistResource(xaResource);
boolean error = false;
try {
final TransactionalMap m = context.getMap("m");
m.put("key", "value");
throw new RuntimeException("Exception for rolling back");
} catch (Exception e) {
error = true;
} finally {
close(error, xaResource);
}
assertNull(client.getMap("m").get("key"));
}
@Test
public void testRecovery() throws Exception {
final HazelcastInstance instance1 = Hazelcast.newHazelcastInstance();
final HazelcastInstance instance2 = Hazelcast.newHazelcastInstance();
final HazelcastInstance instance3 = Hazelcast.newHazelcastInstance();
final HazelcastInstance client1 = HazelcastClient.newHazelcastClient();
final TransactionContext context1 = client1.newTransactionContext(TransactionOptions.getDefault().setDurability(2));
final XAResource xaResource1 = context1.getXaResource();
final MyXid myXid = new MyXid();
xaResource1.start(myXid, 0);
final TransactionalMap<Object, Object> map = context1.getMap("map");
map.put("key", "value");
xaResource1.prepare(myXid);
client1.shutdown();
assertNull(instance1.getMap("map").get("key"));
final HazelcastInstance client2 = HazelcastClient.newHazelcastClient();
final TransactionContext context2 = client2.newTransactionContext();
final XAResource xaResource2 = context2.getXaResource();
final Xid[] recover = xaResource2.recover(0);
for (Xid xid : recover) {
xaResource2.commit(xid, false);
}
assertEquals("value", instance1.getMap("map").get("key"));
try {
context1.rollbackTransaction(); //for setting ThreadLocal of unfinished transaction
} catch (Throwable ignored) {
}
}
public static class MyXid implements Xid {
public int getFormatId() {
return 42;
}
@Override
public byte[] getGlobalTransactionId() {
return "GlobalTransactionId".getBytes();
}
@Override
public byte[] getBranchQualifier() {
return "BranchQualifier".getBytes();
}
}
@Test
public void testParallel() throws Exception {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance();
final HazelcastInstance client = HazelcastClient.newHazelcastClient();
final int size = 20;
ExecutorService executorService = Executors.newFixedThreadPool(5);
final CountDownLatch latch = new CountDownLatch(size);
for (int i = 0; i < size; i++) {
executorService.execute(new Runnable() {
public void run() {
try {
txn(client);
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown();
}
}
});
}
assertOpenEventually(latch, 20);
final IMap m = client.getMap("m");
for (int i = 0; i < 10; i++) {
assertFalse(m.isLocked(i));
}
}
@Test
public void testSequential() throws Exception {
final HazelcastInstance instance = Hazelcast.newHazelcastInstance();
final HazelcastInstance client = HazelcastClient.newHazelcastClient();
txn(client);
txn(client);
txn(client);
}
private void txn(HazelcastInstance instance) throws Exception {
tm.begin();
final TransactionContext context = instance.newTransactionContext();
final XAResource xaResource = context.getXaResource();
final Transaction transaction = tm.getTransaction();
transaction.enlistResource(xaResource);
boolean error = false;
try {
final TransactionalMap m = context.getMap("m");
m.put(random.nextInt(10), "value");
} catch (Exception e) {
e.printStackTrace();
error = true;
} finally {
close(error, xaResource);
}
}
private void close(boolean error, XAResource... xaResource) throws Exception {
int flag = XAResource.TMSUCCESS;
// get the current tx
Transaction tx = tm.getTransaction();
// closeConnection
if (error)
flag = XAResource.TMFAIL;
for (XAResource resource : xaResource) {
tx.delistResource(resource, flag);
}
if (error)
tm.rollback();
else
tm.commit();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_txn_ClientXaTest.java
|
136 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_SC_FLD_TMPLT")
@Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements")
@AdminPresentationClass(populateToOneFields = PopulateToOneFieldsEnum.TRUE, friendlyName = "StructuredContentFieldTemplateImpl_baseStructuredContentFieldTemplate")
public class StructuredContentFieldTemplateImpl implements StructuredContentFieldTemplate {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "StructuredContentFieldTemplateId")
@GenericGenerator(
name="StructuredContentFieldTemplateId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StructuredContentFieldTemplateImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.structure.domain.StructuredContentFieldTemplateImpl")
}
)
@Column(name = "SC_FLD_TMPLT_ID")
protected Long id;
@Column (name = "NAME")
@AdminPresentation(friendlyName = "StructuredContentFieldTemplateImpl_Field_Template_Name", order = 1, gridOrder = 2, group = "StructuredContentFieldTemplateImpl_Details", prominent = true)
protected String name;
@ManyToMany(targetEntity = FieldGroupImpl.class, cascade = {CascadeType.ALL})
@JoinTable(name = "BLC_SC_FLDGRP_XREF", joinColumns = @JoinColumn(name = "SC_FLD_TMPLT_ID", referencedColumnName = "SC_FLD_TMPLT_ID"), inverseJoinColumns = @JoinColumn(name = "FLD_GROUP_ID", referencedColumnName = "FLD_GROUP_ID"))
@Cascade(value={org.hibernate.annotations.CascadeType.ALL, org.hibernate.annotations.CascadeType.DELETE_ORPHAN})
@Cache(usage = CacheConcurrencyStrategy.READ_WRITE, region="blCMSElements")
@OrderColumn(name = "GROUP_ORDER")
@BatchSize(size = 20)
protected List<FieldGroup> fieldGroups;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public List<FieldGroup> getFieldGroups() {
return fieldGroups;
}
@Override
public void setFieldGroups(List<FieldGroup> fieldGroups) {
this.fieldGroups = fieldGroups;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentFieldTemplateImpl.java
|
3,463 |
public class ShardGetService extends AbstractIndexShardComponent {
private final ScriptService scriptService;
private final MapperService mapperService;
private final IndexFieldDataService fieldDataService;
private IndexShard indexShard;
private final MeanMetric existsMetric = new MeanMetric();
private final MeanMetric missingMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
@Inject
public ShardGetService(ShardId shardId, @IndexSettings Settings indexSettings, ScriptService scriptService,
MapperService mapperService, IndexFieldDataService fieldDataService) {
super(shardId, indexSettings);
this.scriptService = scriptService;
this.mapperService = mapperService;
this.fieldDataService = fieldDataService;
}
public GetStats stats() {
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
}
// sadly, to overcome cyclic dep, we need to do this and inject it ourselves...
public ShardGetService setIndexShard(IndexShard indexShard) {
this.indexShard = indexShard;
return this;
}
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext)
throws ElasticsearchException {
currentMetric.inc();
try {
long now = System.nanoTime();
GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext);
if (getResult.isExists()) {
existsMetric.inc(System.nanoTime() - now);
} else {
missingMetric.inc(System.nanoTime() - now);
}
return getResult;
} finally {
currentMetric.dec();
}
}
/**
* Returns {@link GetResult} based on the specified {@link Engine.GetResult} argument.
* This method basically loads specified fields for the associated document in the engineGetResult.
* This method load the fields from the Lucene index and not from transaction log and therefore isn't realtime.
* <p/>
* Note: Call <b>must</b> release engine searcher associated with engineGetResult!
*/
public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) {
if (!engineGetResult.exists()) {
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
currentMetric.inc();
try {
long now = System.nanoTime();
DocumentMapper docMapper = mapperService.documentMapper(type);
if (docMapper == null) {
missingMetric.inc(System.nanoTime() - now);
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, fields);
GetResult getResult = innerGetLoadFromStoredFields(type, id, fields, fetchSourceContext, engineGetResult, docMapper);
if (getResult.isExists()) {
existsMetric.inc(System.nanoTime() - now);
} else {
missingMetric.inc(System.nanoTime() - now); // This shouldn't happen...
}
return getResult;
} finally {
currentMetric.dec();
}
}
/**
* decides what needs to be done based on the request input and always returns a valid non-null FetchSourceContext
*/
protected FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceContext context, @Nullable String[] gFields) {
if (context != null) {
return context;
}
if (gFields == null) {
return FetchSourceContext.FETCH_SOURCE;
}
for (String field : gFields) {
if (SourceFieldMapper.NAME.equals(field)) {
return FetchSourceContext.FETCH_SOURCE;
}
}
return FetchSourceContext.DO_NOT_FETCH_SOURCE;
}
public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) throws ElasticsearchException {
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
boolean loadSource = (gFields != null && gFields.length > 0) || fetchSourceContext.fetchSource();
Engine.GetResult get = null;
if (type == null || type.equals("_all")) {
for (String typeX : mapperService.types()) {
get = indexShard.get(new Engine.Get(realtime, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(typeX, id)))
.loadSource(loadSource).version(version).versionType(versionType));
if (get.exists()) {
type = typeX;
break;
} else {
get.release();
}
}
if (get == null) {
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
if (!get.exists()) {
// no need to release here as well..., we release in the for loop for non exists
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
} else {
get = indexShard.get(new Engine.Get(realtime, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(type, id)))
.loadSource(loadSource).version(version).versionType(versionType));
if (!get.exists()) {
get.release();
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
}
DocumentMapper docMapper = mapperService.documentMapper(type);
if (docMapper == null) {
get.release();
return new GetResult(shardId.index().name(), type, id, -1, false, null, null);
}
try {
// break between having loaded it from translog (so we only have _source), and having a document to load
if (get.docIdAndVersion() != null) {
return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, docMapper);
} else {
Translog.Source source = get.source();
Map<String, GetField> fields = null;
SearchLookup searchLookup = null;
// we can only load scripts that can run against the source
if (gFields != null && gFields.length > 0) {
Map<String, Object> sourceAsMap = null;
for (String field : gFields) {
if (SourceFieldMapper.NAME.equals(field)) {
// dealt with when normalizing fetchSourceContext.
continue;
}
Object value = null;
if (field.equals(RoutingFieldMapper.NAME) && docMapper.routingFieldMapper().fieldType().stored()) {
value = source.routing;
} else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper().active() && docMapper.parentFieldMapper().fieldType().stored()) {
value = source.parent;
} else if (field.equals(TimestampFieldMapper.NAME) && docMapper.timestampFieldMapper().fieldType().stored()) {
value = source.timestamp;
} else if (field.equals(TTLFieldMapper.NAME) && docMapper.TTLFieldMapper().fieldType().stored()) {
// Call value for search with timestamp + ttl here to display the live remaining ttl value and be consistent with the search result display
if (source.ttl > 0) {
value = docMapper.TTLFieldMapper().valueForSearch(source.timestamp + source.ttl);
}
} else if (field.equals(SizeFieldMapper.NAME) && docMapper.rootMapper(SizeFieldMapper.class).fieldType().stored()) {
value = source.source.length();
} else {
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService, fieldDataService, new String[]{type});
searchLookup.source().setNextSource(source.source);
}
FieldMapper<?> x = docMapper.mappers().smartNameFieldMapper(field);
if (x == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field");
}
} else if (docMapper.sourceMapper().enabled() || x.fieldType().stored()) {
List<Object> values = searchLookup.source().extractRawValues(field);
if (!values.isEmpty()) {
for (int i = 0; i < values.size(); i++) {
values.set(i, x.valueForSearch(values.get(i)));
}
value = values;
}
}
}
if (value != null) {
if (fields == null) {
fields = newHashMapWithExpectedSize(2);
}
if (value instanceof List) {
fields.put(field, new GetField(field, (List) value));
} else {
fields.put(field, new GetField(field, ImmutableList.of(value)));
}
}
}
}
// deal with source, but only if it's enabled (we always have it from the translog)
BytesReference sourceToBeReturned = null;
SourceFieldMapper sourceFieldMapper = docMapper.sourceMapper();
if (fetchSourceContext.fetchSource() && sourceFieldMapper.enabled()) {
sourceToBeReturned = source.source;
// Cater for source excludes/includes at the cost of performance
// We must first apply the field mapper filtering to make sure we get correct results
// in the case that the fetchSourceContext white lists something that's not included by the field mapper
Map<String, Object> filteredSource = null;
XContentType sourceContentType = null;
if (sourceFieldMapper.includes().length > 0 || sourceFieldMapper.excludes().length > 0) {
// TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
sourceContentType = typeMapTuple.v1();
filteredSource = XContentMapValues.filter(typeMapTuple.v2(), sourceFieldMapper.includes(), sourceFieldMapper.excludes());
}
if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
if (filteredSource == null) {
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source.source, true);
sourceContentType = typeMapTuple.v1();
filteredSource = typeMapTuple.v2();
}
filteredSource = XContentMapValues.filter(filteredSource, fetchSourceContext.includes(), fetchSourceContext.excludes());
}
if (filteredSource != null) {
try {
sourceToBeReturned = XContentFactory.contentBuilder(sourceContentType).map(filteredSource).bytes();
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
}
}
}
return new GetResult(shardId.index().name(), type, id, get.version(), get.exists(), sourceToBeReturned, fields);
}
} finally {
get.release();
}
}
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, DocumentMapper docMapper) {
Map<String, GetField> fields = null;
BytesReference source = null;
Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext);
if (fieldVisitor != null) {
try {
docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor);
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e);
}
source = fieldVisitor.source();
if (!fieldVisitor.fields().isEmpty()) {
fieldVisitor.postProcess(docMapper);
fields = new HashMap<String, GetField>(fieldVisitor.fields().size());
for (Map.Entry<String, List<Object>> entry : fieldVisitor.fields().entrySet()) {
fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue()));
}
}
}
// now, go and do the script thingy if needed
if (gFields != null && gFields.length > 0) {
SearchLookup searchLookup = null;
for (String field : gFields) {
Object value = null;
FieldMappers x = docMapper.mappers().smartName(field);
if (x == null) {
if (docMapper.objectMappers().get(field) != null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field");
}
} else if (!x.mapper().fieldType().stored()) {
if (searchLookup == null) {
searchLookup = new SearchLookup(mapperService, fieldDataService, new String[]{type});
searchLookup.setNextReader(docIdAndVersion.context);
searchLookup.source().setNextSource(source);
searchLookup.setNextDocId(docIdAndVersion.docId);
}
List<Object> values = searchLookup.source().extractRawValues(field);
if (!values.isEmpty()) {
for (int i = 0; i < values.size(); i++) {
values.set(i, x.mapper().valueForSearch(values.get(i)));
}
value = values;
}
}
if (value != null) {
if (fields == null) {
fields = newHashMapWithExpectedSize(2);
}
if (value instanceof List) {
fields.put(field, new GetField(field, (List) value));
} else {
fields.put(field, new GetField(field, ImmutableList.of(value)));
}
}
}
}
if (!fetchSourceContext.fetchSource()) {
source = null;
} else if (fetchSourceContext.includes().length > 0 || fetchSourceContext.excludes().length > 0) {
Map<String, Object> filteredSource;
XContentType sourceContentType = null;
// TODO: The source might parsed and available in the sourceLookup but that one uses unordered maps so different. Do we care?
Tuple<XContentType, Map<String, Object>> typeMapTuple = XContentHelper.convertToMap(source, true);
sourceContentType = typeMapTuple.v1();
filteredSource = XContentMapValues.filter(typeMapTuple.v2(), fetchSourceContext.includes(), fetchSourceContext.excludes());
try {
source = XContentFactory.contentBuilder(sourceContentType).map(filteredSource).bytes();
} catch (IOException e) {
throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "] with includes/excludes set", e);
}
}
return new GetResult(shardId.index().name(), type, id, get.version(), get.exists(), source, fields);
}
private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext) {
if (fields == null || fields.length == 0) {
return fetchSourceContext.fetchSource() ? new JustSourceFieldsVisitor() : null;
}
return new CustomFieldsVisitor(Sets.newHashSet(fields), fetchSourceContext.fetchSource());
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_get_ShardGetService.java
|
5 |
public class NetworkReceiver
implements MessageSource, Lifecycle
{
public interface Configuration
{
HostnamePort clusterServer();
int defaultPort();
String name(); // Name of this cluster instance. Null in most cases, but tools may use e.g. "Backup"
}
public interface NetworkChannelsListener
{
void listeningAt( URI me );
void channelOpened( URI to );
void channelClosed( URI to );
}
public static final String CLUSTER_SCHEME = "cluster";
public static final String INADDR_ANY = "0.0.0.0";
private ChannelGroup channels;
// Receiving
private NioServerSocketChannelFactory nioChannelFactory;
private ServerBootstrap serverBootstrap;
private Iterable<MessageProcessor> processors = Listeners.newListeners();
private Configuration config;
private StringLogger msgLog;
private Map<URI, Channel> connections = new ConcurrentHashMap<URI, Channel>();
private Iterable<NetworkChannelsListener> listeners = Listeners.newListeners();
volatile boolean bindingDetected = false;
public NetworkReceiver( Configuration config, Logging logging )
{
this.config = config;
this.msgLog = logging.getMessagesLog( getClass() );
}
@Override
public void init()
throws Throwable
{
ThreadRenamingRunnable.setThreadNameDeterminer( ThreadNameDeterminer.CURRENT );
}
@Override
public void start()
throws Throwable
{
channels = new DefaultChannelGroup();
// Listen for incoming connections
nioChannelFactory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool( new NamedThreadFactory( "Cluster boss" ) ),
Executors.newFixedThreadPool( 2, new NamedThreadFactory( "Cluster worker" ) ), 2 );
serverBootstrap = new ServerBootstrap( nioChannelFactory );
serverBootstrap.setOption("child.tcpNoDelay", true);
serverBootstrap.setPipelineFactory( new NetworkNodePipelineFactory() );
int[] ports = config.clusterServer().getPorts();
int minPort = ports[0];
int maxPort = ports.length == 2 ? ports[1] : minPort;
// Try all ports in the given range
listen( minPort, maxPort );
}
@Override
public void stop()
throws Throwable
{
msgLog.debug( "Shutting down NetworkReceiver" );
channels.close().awaitUninterruptibly();
serverBootstrap.releaseExternalResources();
msgLog.debug( "Shutting down NetworkReceiver complete" );
}
@Override
public void shutdown()
throws Throwable
{
}
private void listen( int minPort, int maxPort )
throws URISyntaxException, ChannelException, UnknownHostException
{
ChannelException ex = null;
for ( int checkPort = minPort; checkPort <= maxPort; checkPort++ )
{
try
{
InetAddress host;
String address = config.clusterServer().getHost();
InetSocketAddress localAddress;
if ( address == null || address.equals( INADDR_ANY ))
{
localAddress = new InetSocketAddress( checkPort );
}
else
{
host = InetAddress.getByName( address );
localAddress = new InetSocketAddress( host, checkPort );
}
Channel listenChannel = serverBootstrap.bind( localAddress );
listeningAt( getURI( localAddress ) );
channels.add( listenChannel );
return;
}
catch ( ChannelException e )
{
ex = e;
}
}
nioChannelFactory.releaseExternalResources();
throw ex;
}
// MessageSource implementation
public void addMessageProcessor( MessageProcessor processor )
{
processors = Listeners.addListener( processor, processors );
}
public void receive( Message message )
{
for ( MessageProcessor processor : processors )
{
try
{
if ( !processor.process( message ) )
{
break;
}
}
catch ( Exception e )
{
// Ignore
}
}
}
private URI getURI( InetSocketAddress address ) throws URISyntaxException
{
String uri;
if (address.getAddress().getHostAddress().startsWith( "0" ))
uri = CLUSTER_SCHEME + "://0.0.0.0:"+address.getPort(); // Socket.toString() already prepends a /
else
uri = CLUSTER_SCHEME + "://" + address.getAddress().getHostAddress()+":"+address.getPort(); // Socket.toString() already prepends a /
// Add name if given
if (config.name() != null)
uri += "/?name="+config.name();
return URI.create( uri );
}
public void listeningAt( final URI me )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<NetworkChannelsListener>()
{
@Override
public void notify( NetworkChannelsListener listener )
{
listener.listeningAt( me );
}
} );
}
protected void openedChannel( final URI uri, Channel ctxChannel )
{
connections.put( uri, ctxChannel );
Listeners.notifyListeners( listeners, new Listeners.Notification<NetworkChannelsListener>()
{
@Override
public void notify( NetworkChannelsListener listener )
{
listener.channelOpened( uri );
}
} );
}
protected void closedChannel( final URI uri )
{
Channel channel = connections.remove( uri );
if ( channel != null )
{
channel.close();
}
Listeners.notifyListeners( listeners, new Listeners.Notification<NetworkChannelsListener>()
{
@Override
public void notify( NetworkChannelsListener listener )
{
listener.channelClosed( uri );
}
} );
}
public void addNetworkChannelsListener( NetworkChannelsListener listener )
{
listeners = Listeners.addListener( listener, listeners );
}
private class NetworkNodePipelineFactory
implements ChannelPipelineFactory
{
@Override
public ChannelPipeline getPipeline() throws Exception
{
ChannelPipeline pipeline = Channels.pipeline();
pipeline.addLast( "frameDecoder",new ObjectDecoder( 1024 * 1000, NetworkNodePipelineFactory.this.getClass().getClassLoader() ) );
pipeline.addLast( "serverHandler", new MessageReceiver() );
return pipeline;
}
}
private class MessageReceiver
extends SimpleChannelHandler
{
@Override
public void channelOpen( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
Channel ctxChannel = ctx.getChannel();
openedChannel( getURI( (InetSocketAddress) ctxChannel.getRemoteAddress() ), ctxChannel );
channels.add( ctxChannel );
}
@Override
public void messageReceived( ChannelHandlerContext ctx, MessageEvent event ) throws Exception
{
if (!bindingDetected)
{
InetSocketAddress local = ((InetSocketAddress)event.getChannel().getLocalAddress());
bindingDetected = true;
listeningAt( getURI( local ) );
}
final Message message = (Message) event.getMessage();
// Fix FROM header since sender cannot know it's correct IP/hostname
InetSocketAddress remote = (InetSocketAddress) ctx.getChannel().getRemoteAddress();
String remoteAddress = remote.getAddress().getHostAddress();
URI fromHeader = URI.create( message.getHeader( Message.FROM ) );
fromHeader = URI.create(fromHeader.getScheme()+"://"+remoteAddress + ":" + fromHeader.getPort());
message.setHeader( Message.FROM, fromHeader.toASCIIString() );
msgLog.debug( "Received:" + message );
receive( message );
}
@Override
public void channelDisconnected( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
closedChannel( getURI( (InetSocketAddress) ctx.getChannel().getRemoteAddress() ) );
}
@Override
public void channelClosed( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
closedChannel( getURI( (InetSocketAddress) ctx.getChannel().getRemoteAddress() ) );
channels.remove( ctx.getChannel() );
}
@Override
public void exceptionCaught( ChannelHandlerContext ctx, ExceptionEvent e ) throws Exception
{
if ( !(e.getCause() instanceof ConnectException) )
{
msgLog.error( "Receive exception:", e.getCause() );
}
}
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_com_NetworkReceiver.java
|
200 |
public class OClusterRemote implements OCluster {
private String name;
private int id;
private int dataSegmentId;
private String type;
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#configure(com.orientechnologies.orient.core.storage.OStorage, int,
* java.lang.String, java.lang.String, int, java.lang.Object[])
*/
public void configure(OStorage iStorage, int iId, String iClusterName, String iLocation, int iDataSegmentId,
Object... iParameters) throws IOException {
id = iId;
name = iClusterName;
dataSegmentId = iDataSegmentId;
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#configure(com.orientechnologies.orient.core.storage.OStorage,
* com.orientechnologies.orient.core.config.OStorageClusterConfiguration)
*/
public void configure(OStorage iStorage, OStorageClusterConfiguration iConfig) throws IOException {
id = iConfig.getId();
name = iConfig.getName();
dataSegmentId = iConfig.getDataSegmentId();
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#create(int)
*/
public void create(int iStartSize) throws IOException {
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.OCluster#open()
*/
public void open() throws IOException {
}
public void close() throws IOException {
}
@Override
public OModificationLock getExternalModificationLock() {
throw new UnsupportedOperationException("getExternalModificationLock");
}
@Override
public void close(boolean flush) throws IOException {
}
@Override
public OPhysicalPosition createRecord(byte[] content, ORecordVersion recordVersion, byte recordType) throws IOException {
throw new UnsupportedOperationException("createRecord");
}
@Override
public boolean deleteRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("deleteRecord");
}
@Override
public void updateRecord(OClusterPosition clusterPosition, byte[] content, ORecordVersion recordVersion, byte recordType)
throws IOException {
throw new UnsupportedOperationException("updateRecord");
}
@Override
public ORawBuffer readRecord(OClusterPosition clusterPosition) throws IOException {
throw new UnsupportedOperationException("readRecord");
}
@Override
public boolean exists() {
throw new UnsupportedOperationException("exists");
}
public void delete() throws IOException {
}
public void set(ATTRIBUTES iAttribute, Object iValue) throws IOException {
}
public void truncate() throws IOException {
}
public String getType() {
return type;
}
public int getDataSegmentId() {
return dataSegmentId;
}
public boolean addPhysicalPosition(OPhysicalPosition iPPosition) throws IOException {
return false;
}
public OPhysicalPosition getPhysicalPosition(OPhysicalPosition iPPosition) throws IOException {
return null;
}
public void updateDataSegmentPosition(OClusterPosition iPosition, int iDataSegmentId, long iDataPosition) throws IOException {
}
public void removePhysicalPosition(OClusterPosition iPosition) throws IOException {
}
public void updateRecordType(OClusterPosition iPosition, byte iRecordType) throws IOException {
}
public void updateVersion(OClusterPosition iPosition, ORecordVersion iVersion) throws IOException {
}
public long getEntries() {
return 0;
}
@Override
public long getTombstonesCount() {
throw new UnsupportedOperationException("getTombstonesCount()");
}
@Override
public void convertToTombstone(OClusterPosition iPosition) throws IOException {
throw new UnsupportedOperationException("convertToTombstone()");
}
@Override
public boolean hasTombstonesSupport() {
throw new UnsupportedOperationException("hasTombstonesSupport()");
}
public OClusterPosition getFirstPosition() {
return OClusterPositionFactory.INSTANCE.valueOf(0);
}
public OClusterPosition getLastPosition() {
return OClusterPositionFactory.INSTANCE.valueOf(0);
}
public void lock() {
}
public void unlock() {
}
public int getId() {
return id;
}
public void synch() throws IOException {
}
public void setSoftlyClosed(boolean softlyClosed) throws IOException {
}
@Override
public boolean wasSoftlyClosed() throws IOException {
return true;
}
public String getName() {
return name;
}
public long getRecordsSize() {
throw new UnsupportedOperationException("getRecordsSize()");
}
public boolean isHashBased() {
return false;
}
public OClusterEntryIterator absoluteIterator() {
throw new UnsupportedOperationException("getRecordsSize()");
}
public void setType(String type) {
this.type = type;
}
@Override
public OPhysicalPosition[] higherPositions(OPhysicalPosition position) {
throw new UnsupportedOperationException("higherPositions()");
}
@Override
public OPhysicalPosition[] lowerPositions(OPhysicalPosition position) {
throw new UnsupportedOperationException("lowerPositions()");
}
@Override
public OPhysicalPosition[] ceilingPositions(OPhysicalPosition position) throws IOException {
throw new UnsupportedOperationException("ceilingPositions()");
}
@Override
public OPhysicalPosition[] floorPositions(OPhysicalPosition position) throws IOException {
throw new UnsupportedOperationException("floorPositions()");
}
@Override
public boolean useWal() {
throw new UnsupportedOperationException("useWal()");
}
@Override
public float recordGrowFactor() {
throw new UnsupportedOperationException("recordGrowFactor()");
}
@Override
public float recordOverflowGrowFactor() {
throw new UnsupportedOperationException("recordOverflowGrowFactor()");
}
@Override
public String compression() {
throw new UnsupportedOperationException("compression()");
}
}
| 0true
|
client_src_main_java_com_orientechnologies_orient_client_remote_OClusterRemote.java
|
784 |
execute(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse result) {
try {
channel.sendResponse(result);
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(e);
} catch (Exception e1) {
logger.warn("Failed to send response for get", e1);
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_mlt_TransportMoreLikeThisAction.java
|
149 |
public interface StructuredContentType extends Serializable {
/**
* Gets the primary key.
*
* @return the primary key
*/
@Nullable
public Long getId();
/**
* Sets the primary key.
*
* @param id the new primary key
*/
public void setId(@Nullable Long id);
/**
* Gets the name.
*
* @return the name
*/
@Nonnull
String getName();
/**
* Sets the name.
*/
void setName(@Nonnull String name);
/**
* Gets the description.
* @return
*/
@Nullable
String getDescription();
/**
* Sets the description.
*/
void setDescription(@Nullable String description);
/**
* Returns the template associated with this content type.
* @return
*/
@Nonnull
StructuredContentFieldTemplate getStructuredContentFieldTemplate();
/**
* Sets the template associated with this content type.
* @param scft
*/
void setStructuredContentFieldTemplate(@Nonnull StructuredContentFieldTemplate scft);
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_domain_StructuredContentType.java
|
1,165 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_GIFT_CARD_PAYMENT")
public class GiftCardPaymentInfoImpl implements GiftCardPaymentInfo {
private static final long serialVersionUID = 1L;
protected GiftCardPaymentInfoImpl() {
// do not allow direct instantiation -- must at least be package private
// for bytecode instrumentation
// this complies with JPA specification requirements for entity
// construction
}
@Transient
protected EncryptionModule encryptionModule;
@Id
@GeneratedValue(generator = "GiftCardPaymentId")
@GenericGenerator(
name="GiftCardPaymentId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="GiftCardPaymentInfoImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.core.payment.domain.GiftCardPaymentInfoImpl")
}
)
@Column(name = "PAYMENT_ID")
protected Long id;
@Column(name = "REFERENCE_NUMBER", nullable = false)
@Index(name="GIFTCARD_INDEX", columnNames={"REFERENCE_NUMBER"})
protected String referenceNumber;
@Column(name = "PAN", nullable = false)
protected String pan;
@Column(name = "PIN")
protected String pin;
@Override
public Long getId() {
return id;
}
@Override
public String getPan() {
return encryptionModule.decrypt(pan);
}
@Override
public String getPin() {
return encryptionModule.decrypt(pin);
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public void setPan(String pan) {
this.pan = encryptionModule.encrypt(pan);
}
@Override
public void setPin(String pin) {
this.pin = encryptionModule.encrypt(pin);
}
@Override
public String getReferenceNumber() {
return referenceNumber;
}
@Override
public void setReferenceNumber(String referenceNumber) {
this.referenceNumber = referenceNumber;
}
@Override
public EncryptionModule getEncryptionModule() {
return encryptionModule;
}
@Override
public void setEncryptionModule(EncryptionModule encryptionModule) {
this.encryptionModule = encryptionModule;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((pan == null) ? 0 : pan.hashCode());
result = prime * result + ((pin == null) ? 0 : pin.hashCode());
result = prime * result + ((referenceNumber == null) ? 0 : referenceNumber.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
GiftCardPaymentInfoImpl other = (GiftCardPaymentInfoImpl) obj;
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (pan == null) {
if (other.pan != null)
return false;
} else if (!pan.equals(other.pan))
return false;
if (pin == null) {
if (other.pin != null)
return false;
} else if (!pin.equals(other.pin))
return false;
if (referenceNumber == null) {
if (other.referenceNumber != null)
return false;
} else if (!referenceNumber.equals(other.referenceNumber))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_domain_GiftCardPaymentInfoImpl.java
|
478 |
public class ProtoChannelBeanPostProcessor implements BeanPostProcessor, Ordered {
Log LOG = LogFactory.getLog(ProtoChannelBeanPostProcessor.class);
protected List<ChannelProcessor> channelProcessorOverrides;
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
return bean;
}
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
if (bean instanceof ChannelDecisionManagerImpl) {
try {
ChannelDecisionManagerImpl manager = (ChannelDecisionManagerImpl) bean;
Field channelProcessors = manager.getClass().getDeclaredField("channelProcessors");
channelProcessors.setAccessible(true);
List<ChannelProcessor> list = (List<ChannelProcessor>) channelProcessors.get(manager);
list.clear();
manager.setChannelProcessors(channelProcessorOverrides);
LOG.info("Replacing the standard Spring Security channel processors with custom processors that look for a " +
"'X-Forwarded-Proto' request header. This allows Spring Security to sit behind a load balancer with SSL termination.");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return bean;
}
@Override
public int getOrder() {
return 9999;
}
/**
* @return the channelProcessors
*/
public List<ChannelProcessor> getChannelProcessorOverrides() {
return channelProcessorOverrides;
}
/**
* @param channelProcessors the channelProcessors to set
*/
public void setChannelProcessorOverrides(List<ChannelProcessor> channelProcessorOverrides) {
this.channelProcessorOverrides = channelProcessorOverrides;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_security_channel_ProtoChannelBeanPostProcessor.java
|
1,218 |
public class TitanSchemaVertex extends CacheVertex implements SchemaSource {
public TitanSchemaVertex(StandardTitanTx tx, long id, byte lifecycle) {
super(tx, id, lifecycle);
}
private String name = null;
@Override
public String getName() {
if (name == null) {
TitanProperty p;
if (isLoaded()) {
StandardTitanTx tx = tx();
p = (TitanProperty) Iterables.getOnlyElement(RelationConstructor.readRelation(this,
tx.getGraph().getSchemaCache().getSchemaRelations(getLongId(), BaseKey.SchemaName, Direction.OUT, tx()),
tx), null);
} else {
p = Iterables.getOnlyElement(query().type(BaseKey.SchemaName).properties(), null);
}
Preconditions.checkState(p!=null,"Could not find type for id: %s", getLongId());
name = p.getValue();
}
assert name != null;
return TitanSchemaCategory.getName(name);
}
@Override
protected Vertex getVertexLabelInternal() {
return null;
}
private TypeDefinitionMap definition = null;
@Override
public TypeDefinitionMap getDefinition() {
TypeDefinitionMap def = definition;
if (def == null) {
def = new TypeDefinitionMap();
Iterable<TitanProperty> ps;
if (isLoaded()) {
StandardTitanTx tx = tx();
ps = (Iterable)RelationConstructor.readRelation(this,
tx.getGraph().getSchemaCache().getSchemaRelations(getLongId(), BaseKey.SchemaDefinitionProperty, Direction.OUT, tx()),
tx);
} else {
ps = query().type(BaseKey.SchemaDefinitionProperty).properties();
}
for (TitanProperty property : ps) {
TypeDefinitionDescription desc = property.getProperty(BaseKey.SchemaDefinitionDesc);
Preconditions.checkArgument(desc!=null && desc.getCategory().isProperty());
def.setValue(desc.getCategory(), property.getValue());
}
assert def.size()>0;
definition = def;
}
assert def!=null;
return def;
}
private ListMultimap<TypeDefinitionCategory,Entry> outRelations = null;
private ListMultimap<TypeDefinitionCategory,Entry> inRelations = null;
@Override
public Iterable<Entry> getRelated(TypeDefinitionCategory def, Direction dir) {
assert dir==Direction.OUT || dir==Direction.IN;
ListMultimap<TypeDefinitionCategory,Entry> rels = dir==Direction.OUT?outRelations:inRelations;
if (rels==null) {
ImmutableListMultimap.Builder<TypeDefinitionCategory,Entry> b = ImmutableListMultimap.builder();
Iterable<TitanEdge> edges;
if (isLoaded()) {
StandardTitanTx tx = tx();
edges = (Iterable)RelationConstructor.readRelation(this,
tx.getGraph().getSchemaCache().getSchemaRelations(getLongId(), BaseLabel.SchemaDefinitionEdge, dir, tx()),
tx);
} else {
edges = query().type(BaseLabel.SchemaDefinitionEdge).direction(dir).titanEdges();
}
for (TitanEdge edge: edges) {
TitanVertex oth = edge.getVertex(dir.opposite());
assert oth instanceof TitanSchemaVertex;
TypeDefinitionDescription desc = edge.getProperty(BaseKey.SchemaDefinitionDesc);
Object modifier = null;
if (desc.getCategory().hasDataType()) {
assert desc.getModifier()!=null && desc.getModifier().getClass().equals(desc.getCategory().getDataType());
modifier = desc.getModifier();
}
b.put(desc.getCategory(), new Entry((TitanSchemaVertex) oth, modifier));
}
rels = b.build();
if (dir==Direction.OUT) outRelations=rels;
else inRelations=rels;
}
assert rels!=null;
return rels.get(def);
}
/**
* Resets the internal caches used to speed up lookups on this index type.
* This is needed when the type gets modified in the {@link com.thinkaurelius.titan.graphdb.database.management.ManagementSystem}.
*/
public void resetCache() {
name = null;
definition=null;
outRelations=null;
inRelations=null;
}
public Iterable<TitanEdge> getEdges(final TypeDefinitionCategory def, final Direction dir) {
return getEdges(def,dir,null);
}
public Iterable<TitanEdge> getEdges(final TypeDefinitionCategory def, final Direction dir, TitanSchemaVertex other) {
TitanVertexQuery query = query().type(BaseLabel.SchemaDefinitionEdge).direction(dir);
if (other!=null) query.adjacent(other);
return Iterables.filter(query.titanEdges(),new Predicate<TitanEdge>() {
@Override
public boolean apply(@Nullable TitanEdge edge) {
TypeDefinitionDescription desc = edge.getProperty(BaseKey.SchemaDefinitionDesc);
return desc.getCategory()==def;
}
});
}
@Override
public String toString() {
return getName();
}
@Override
public SchemaStatus getStatus() {
return getDefinition().getValue(TypeDefinitionCategory.STATUS,SchemaStatus.class);
}
@Override
public IndexType asIndexType() {
Preconditions.checkArgument(getDefinition().containsKey(TypeDefinitionCategory.INTERNAL_INDEX),"Schema vertex is not a type vertex: [%s,%s]", getLongId(),getName());
if (getDefinition().getValue(TypeDefinitionCategory.INTERNAL_INDEX)) {
return new CompositeIndexTypeWrapper(this);
} else {
return new MixedIndexTypeWrapper(this);
}
}
}
| 1no label
|
titan-core_src_main_java_com_thinkaurelius_titan_graphdb_types_vertices_TitanSchemaVertex.java
|
146 |
class FileChannelWithChoppyDisk extends StoreFileChannel
{
ByteBuffer buff = ByteBuffer.allocate(1024);
private int chunkSize;
public FileChannelWithChoppyDisk(int writeThisMuchAtATime)
{
super( (FileChannel) null );
this.chunkSize = writeThisMuchAtATime;
}
@Override
public int write( ByteBuffer byteBuffer, long l ) throws IOException
{
int bytesToWrite = chunkSize > (byteBuffer.limit() - byteBuffer.position()) ? byteBuffer.limit() - byteBuffer.position() : chunkSize;
buff.position( (int)l );
// Remember original limit
int originalLimit = byteBuffer.limit();
// Set limit to not be bigger than chunk size
byteBuffer.limit(byteBuffer.position() + bytesToWrite);
// Write
buff.put( byteBuffer );
// Restore limit
byteBuffer.limit(originalLimit);
return bytesToWrite;
}
@Override
public long position() throws IOException
{
return buff.position();
}
@Override
public StoreFileChannel position( long l ) throws IOException
{
buff.position( (int) l );
return this;
}
@Override
public long size() throws IOException
{
return buff.capacity();
}
@Override
public StoreFileChannel truncate( long l ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public void force( boolean b ) throws IOException { }
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_TestDirectMappedLogBuffer.java
|
393 |
public class MediaDto implements Media {
private static final long serialVersionUID = 1L;
protected long id;
protected String url = "";
protected String title = "";
protected String altText = "";
protected String tags = "";
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public String getUrl() {
return url;
}
@Override
public void setUrl(String url) {
this.url = url;
}
@Override
public String getTitle() {
return title;
}
@Override
public void setTitle(String title) {
this.title = title;
}
@Override
public String getAltText() {
return altText;
}
@Override
public void setAltText(String altText) {
this.altText = altText;
}
@Override
public String getTags() {
return tags;
}
@Override
public void setTags(String tags) {
this.tags = tags;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_media_domain_MediaDto.java
|
85 |
public enum Contain implements TitanPredicate {
/**
* Whether an element is in a collection
*/
IN {
@Override
public boolean evaluate(Object value, Object condition) {
Preconditions.checkArgument(isValidCondition(condition), "Invalid condition provided: %s", condition);
Collection col = (Collection) condition;
return col.contains(value);
}
@Override
public TitanPredicate negate() {
return NOT_IN;
}
},
/**
* Whether an element is not in a collection
*/
NOT_IN {
@Override
public boolean evaluate(Object value, Object condition) {
Preconditions.checkArgument(isValidCondition(condition), "Invalid condition provided: %s", condition);
Collection col = (Collection) condition;
return !col.contains(value);
}
@Override
public TitanPredicate negate() {
return IN;
}
};
private static final Logger log = LoggerFactory.getLogger(Contain.class);
@Override
public boolean isValidValueType(Class<?> clazz) {
return true;
}
@Override
public boolean isValidCondition(Object condition) {
return condition != null && (condition instanceof Collection) && !((Collection) condition).isEmpty();
}
@Override
public boolean hasNegation() {
return true;
}
@Override
public boolean isQNF() {
return false;
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Contain.java
|
661 |
public class ProductDaoTest extends BaseTest {
@Resource
private ProductDao productDao;
@Resource
private CatalogService catalogService;
private List<Product> savedProducts = new ArrayList<Product>();
private static RelatedProduct getRelatedUpSaleProduct(Product prod, Product prodToRelate, List<RelatedProduct> upSales){
RelatedProduct rp1 = new UpSaleProductImpl();
rp1.setProduct(prod);
rp1.setPromotionMessage("brand new coffee");
rp1.setRelatedProduct(prodToRelate);
upSales.add(rp1);
return rp1;
}
private static RelatedProduct getRelatedCrossProduct(Product prod, Product prodToRelate, List<RelatedProduct> upSales){
RelatedProduct rp1 = new CrossSaleProductImpl();
rp1.setProduct(prod);
rp1.setPromotionMessage("brand new coffee");
rp1.setRelatedProduct(prodToRelate);
upSales.add(rp1);
return rp1;
}
@Test(groups="createProducts", dataProvider="setupProducts", dataProviderClass=ProductDataProvider.class)
@Rollback(false)
@Transactional
public void createProducts(Product product) {
product = catalogService.saveProduct(product);
assert(product.getId() != null);
savedProducts.add(product);
}
@Test(groups="createUpSaleValues", dependsOnGroups="createProducts")
@Rollback(false)
@Transactional
public void createUpSaleValues(){
Product prod1 = savedProducts.get(0);
List<RelatedProduct> upSales = new ArrayList<RelatedProduct>();
getRelatedUpSaleProduct(prod1, savedProducts.get(2), upSales);
getRelatedUpSaleProduct(prod1, savedProducts.get(3), upSales);
getRelatedUpSaleProduct(prod1, savedProducts.get(4), upSales);
prod1.setUpSaleProducts(upSales);
prod1 = catalogService.saveProduct(prod1);
assert(prod1.getId() != null);
Product prod2 = savedProducts.get(1);
List<RelatedProduct> upSales2 = new ArrayList<RelatedProduct>();
getRelatedUpSaleProduct(prod2, savedProducts.get(5), upSales2);
getRelatedUpSaleProduct(prod2, savedProducts.get(6), upSales2);
prod2.setUpSaleProducts(upSales2);
prod2 = catalogService.saveProduct(prod2);
assert(prod2.getId() != null);
}
@Test(groups="testReadProductsWithUpSaleValues", dependsOnGroups="createUpSaleValues")
@Transactional
public void testReadProductsWithUpSaleValues() {
Product result = productDao.readProductById(savedProducts.get(0).getId());
List<RelatedProduct> related = result.getUpSaleProducts();
assert(related != null);
assert(!related.isEmpty());
assert(related.size() == 2 || related.size() == 3);
for(RelatedProduct rp : related){
assert(rp instanceof UpSaleProductImpl);
}
}
@Test(groups="createCrossSaleValues", dependsOnGroups="testReadProductsWithUpSaleValues")
@Rollback(false)
@Transactional
public void createCrossSaleValues(){
Product prod1 = savedProducts.get(0);
List<RelatedProduct> crossSale = new ArrayList<RelatedProduct>();
getRelatedCrossProduct(prod1, savedProducts.get(2), crossSale);
getRelatedCrossProduct(prod1, savedProducts.get(3), crossSale);
getRelatedCrossProduct(prod1, savedProducts.get(4), crossSale);
prod1.setCrossSaleProducts(crossSale);
prod1 = catalogService.saveProduct(prod1);
assert(prod1.getId() != null);
Product prod2 = savedProducts.get(1);
List<RelatedProduct> crossSale2 = new ArrayList<RelatedProduct>();
getRelatedCrossProduct(prod2, savedProducts.get(5), crossSale2);
getRelatedCrossProduct(prod2, savedProducts.get(6), crossSale2);
prod2.setCrossSaleProducts(crossSale2);
prod2 = catalogService.saveProduct(prod2);
assert(prod2.getId() != null);
}
@Test(groups="testReadProductsWithCrossSaleValues", dependsOnGroups="createCrossSaleValues")
@Transactional
public void testReadProductsWithCrossSaleValues() {
Product result = productDao.readProductById(savedProducts.get(1).getId());
List<RelatedProduct> related = result.getCrossSaleProducts();
assert(related != null);
assert(!related.isEmpty());
assert(related.size() == 2 || related.size() == 3);
for(RelatedProduct rp : related){
assert(rp instanceof CrossSaleProductImpl);
}
}
@Test(dataProvider="basicProduct", dataProviderClass=ProductDataProvider.class)
@Transactional
public void testReadProductsById(Product product) {
product = catalogService.saveProduct(product);
Product result = productDao.readProductById(product.getId());
assert product.equals(result);
}
@Test(dataProvider="basicProduct", dataProviderClass=ProductDataProvider.class)
@Transactional
public void testReadProductsByName(Product product) {
String name = product.getName();
product = catalogService.saveProduct(product);
List<Product> result = productDao.readProductsByName(name);
assert result.contains(product);
}
@Test(dataProvider="basicProduct", dataProviderClass=ProductDataProvider.class)
@Transactional
public void testFeaturedProduct(Product product) {
product = catalogService.saveProduct(product);
Long productId = product.getId();
product.setFeaturedProduct(true);
catalogService.saveProduct(product);
Product testProduct = productDao.readProductById(productId);
assert (testProduct.isFeaturedProduct() == true);
}
}
| 0true
|
integration_src_test_java_org_broadleafcommerce_core_catalog_dao_ProductDaoTest.java
|
446 |
public static class ProcessStats implements ToXContent, Streamable {
int count;
int cpuPercent;
long totalOpenFileDescriptors;
long minOpenFileDescriptors = Long.MAX_VALUE;
long maxOpenFileDescriptors = Long.MIN_VALUE;
public void addNodeStats(NodeStats nodeStats) {
if (nodeStats.getProcess() == null) {
return;
}
count++;
if (nodeStats.getProcess().cpu() != null) {
// with no sigar, this may not be available
cpuPercent += nodeStats.getProcess().cpu().getPercent();
}
long fd = nodeStats.getProcess().openFileDescriptors();
if (fd > 0) {
// fd can be -1 if not supported on platform
totalOpenFileDescriptors += fd;
}
// we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes.
minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
}
/**
* Cpu usage in percentages - 100 is 1 core.
*/
public int getCpuPercent() {
return cpuPercent;
}
public long getAvgOpenFileDescriptors() {
if (count == 0) {
return -1;
}
return totalOpenFileDescriptors / count;
}
public long getMaxOpenFileDescriptors() {
if (count == 0) {
return -1;
}
return maxOpenFileDescriptors;
}
public long getMinOpenFileDescriptors() {
if (count == 0) {
return -1;
}
return minOpenFileDescriptors;
}
@Override
public void readFrom(StreamInput in) throws IOException {
count = in.readVInt();
cpuPercent = in.readVInt();
totalOpenFileDescriptors = in.readVLong();
minOpenFileDescriptors = in.readLong();
maxOpenFileDescriptors = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(count);
out.writeVInt(cpuPercent);
out.writeVLong(totalOpenFileDescriptors);
out.writeLong(minOpenFileDescriptors);
out.writeLong(maxOpenFileDescriptors);
}
public static ProcessStats readStats(StreamInput in) throws IOException {
ProcessStats cpu = new ProcessStats();
cpu.readFrom(in);
return cpu;
}
static final class Fields {
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
static final XContentBuilderString OPEN_FILE_DESCRIPTORS = new XContentBuilderString("open_file_descriptors");
static final XContentBuilderString MIN = new XContentBuilderString("min");
static final XContentBuilderString MAX = new XContentBuilderString("max");
static final XContentBuilderString AVG = new XContentBuilderString("avg");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject();
if (count > 0) {
builder.startObject(Fields.OPEN_FILE_DESCRIPTORS);
builder.field(Fields.MIN, getMinOpenFileDescriptors());
builder.field(Fields.MAX, getMaxOpenFileDescriptors());
builder.field(Fields.AVG, getAvgOpenFileDescriptors());
builder.endObject();
}
return builder;
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsNodes.java
|
1,155 |
iMethodParams = OMultiValue.array(iMethodParams, Object.class, new OCallable<Object, Object>() {
@Override
public Object call(final Object iArgument) {
if (iArgument instanceof String && ((String) iArgument).startsWith("$"))
return iContext.getVariable((String) iArgument);
return iArgument;
}
});
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodRemoveAll.java
|
226 |
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
/**
* Tells to OrientDB to call the method AFTER the record is read and unmarshalled from database.
* Applies only to the entity Objects reachable by the OrientDB engine after have registered them.
*/
public @interface OAfterDeserialization {
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_annotation_OAfterDeserialization.java
|
89 |
private static class DeadlockProneTransactionState extends WritableTransactionState
{
private final DoubleLatch latch;
public DeadlockProneTransactionState( LockManager lockManager, NodeManager nodeManager,
Logging logging, javax.transaction.Transaction tx, RemoteTxHook txHook, TxIdGenerator txIdGenerator, DoubleLatch latch )
{
super( lockManager, nodeManager, logging, tx, txHook, txIdGenerator );
this.latch = latch;
}
@Override
public void commitCows()
{
latch.startAndAwaitFinish();
super.commitCows();
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestCacheUpdateDeadlock.java
|
421 |
return new EventHandler<PortableEntryEvent>() {
public void handle(PortableEntryEvent event) {
V value = null;
V oldValue = null;
if (includeValue) {
value = toObject(event.getValue());
oldValue = toObject(event.getOldValue());
}
K key = toObject(event.getKey());
Member member = getContext().getClusterService().getMember(event.getUuid());
EntryEvent<K, V> entryEvent = new EntryEvent<K, V>(name, member,
event.getEventType().getType(), key, oldValue, value);
switch (event.getEventType()) {
case ADDED:
listener.entryAdded(entryEvent);
break;
case REMOVED:
listener.entryRemoved(entryEvent);
break;
case UPDATED:
listener.entryUpdated(entryEvent);
break;
case EVICTED:
listener.entryEvicted(entryEvent);
break;
default:
throw new IllegalArgumentException("Not a known event type " + event.getEventType());
}
}
@Override
public void onListenerRegister() {
}
};
| 1no label
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientMapProxy.java
|
1,230 |
public abstract class OAbstractFile implements OFile {
private FileLock fileLock;
protected File osFile;
protected RandomAccessFile accessFile;
protected FileChannel channel;
protected volatile boolean dirty = false;
protected volatile boolean headerDirty = false;
protected int version;
protected int incrementSize = DEFAULT_INCREMENT_SIZE;
protected long maxSize;
protected byte[] securityCode = new byte[32]; // PART OF HEADER (32 bytes)
protected String mode;
protected boolean failCheck = true;
protected volatile long size; // PART OF HEADER (4 bytes)
public static final int HEADER_SIZE = 1024;
protected static final int HEADER_DATA_OFFSET = 128;
protected static final int DEFAULT_SIZE = 1024000;
protected static final int DEFAULT_INCREMENT_SIZE = -50; // NEGATIVE NUMBER MEANS AS PERCENT OF
// CURRENT
// SIZE
private static final int OPEN_RETRY_MAX = 10;
private static final int OPEN_DELAY_RETRY = 100;
private static final long LOCK_WAIT_TIME = 300;
private static final int LOCK_MAX_RETRIES = 10;
protected static final int SIZE_OFFSET_V_0 = 0;
protected static final int FILLEDUPTO_OFFSET_V_0 = 4;
protected static final int SOFTLY_CLOSED_OFFSET_V_0 = 8;
protected static final int SIZE_OFFSET = 0;
protected static final int FILLEDUPTO_OFFSET = 8;
protected static final int SOFTLY_CLOSED_OFFSET = 16;
protected static final int VERSION_OFFSET = 48;
protected static final int CURRENT_VERSION = 1;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private boolean wasSoftlyClosed = true;
public abstract long getFileSize();
public abstract long getFilledUpTo();
public abstract void setSize(long iSize) throws IOException;
public abstract void writeHeaderLong(int iPosition, long iValue) throws IOException;
public abstract long readHeaderLong(int iPosition) throws IOException;
public abstract boolean synch() throws IOException;
public abstract void read(long iOffset, byte[] iDestBuffer, int iLenght) throws IOException;
public abstract short readShort(long iLogicalPosition) throws IOException;
public abstract int readInt(long iLogicalPosition) throws IOException;
public abstract long readLong(long iOffset) throws IOException;
public abstract byte readByte(long iOffset) throws IOException;
public abstract void writeInt(long iOffset, int iValue) throws IOException;
public abstract void writeLong(long iOffset, long iValue) throws IOException;
public abstract void writeShort(long iOffset, short iValue) throws IOException;
public abstract void writeByte(long iOffset, byte iValue) throws IOException;
public abstract void write(long iOffset, byte[] iSourceBuffer) throws IOException;
protected abstract void init() throws IOException;
protected abstract void setFilledUpTo(long iHow) throws IOException;
protected abstract void flushHeader() throws IOException;
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#open()
*/
public boolean open() throws IOException {
acquireWriteLock();
try {
if (!osFile.exists())
throw new FileNotFoundException("File: " + osFile.getAbsolutePath());
openChannel(osFile.length());
OLogManager.instance().debug(this, "Checking file integrity of " + osFile.getName() + "...");
init();
long filledUpTo = getFilledUpTo();
long fileSize = getFileSize();
if (fileSize == 0) {
// CORRUPTED? GET THE OS FILE SIZE
final long newFileSize = osFile.length() - HEADER_SIZE;
if (newFileSize != fileSize) {
OLogManager
.instance()
.error(
this,
"Invalid fileSize=%d for file %s. Resetting it to the os file size: %d. Probably the file was not closed correctly last time. The number of records has been set to the maximum value. It's strongly suggested to export and reimport the database before using it",
fileSize, getOsFile().getAbsolutePath(), newFileSize);
setFilledUpTo(newFileSize, true);
setSize(newFileSize, true);
fileSize = newFileSize;
}
}
if (filledUpTo > 0 && filledUpTo > fileSize) {
OLogManager
.instance()
.error(
this,
"Invalid filledUp=%d for file %s. Resetting it to the os file size: %d. Probably the file was not closed correctly last time. The number of records has been set to the maximum value. It's strongly suggested to export and reimport the database before using it",
filledUpTo, getOsFile().getAbsolutePath(), fileSize);
setSize(fileSize);
setFilledUpTo(fileSize);
filledUpTo = getFilledUpTo();
}
if (filledUpTo > fileSize || filledUpTo < 0)
OLogManager.instance().error(this, "Invalid filledUp size (=" + filledUpTo + "). The file could be corrupted", null,
OStorageException.class);
if (failCheck) {
wasSoftlyClosed = isSoftlyClosed();
if (wasSoftlyClosed)
setSoftlyClosed(false);
}
if (version < CURRENT_VERSION) {
setSize(fileSize, true);
setFilledUpTo(filledUpTo, true);
setVersion(CURRENT_VERSION);
version = CURRENT_VERSION;
setSoftlyClosed(!failCheck);
}
if (failCheck)
return wasSoftlyClosed;
return true;
} finally {
releaseWriteLock();
}
}
public boolean wasSoftlyClosed() {
acquireReadLock();
try {
return wasSoftlyClosed;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#create(int)
*/
public void create(int iStartSize) throws IOException {
acquireWriteLock();
try {
if (iStartSize == -1)
iStartSize = DEFAULT_SIZE;
openChannel(iStartSize);
setFilledUpTo(0, true);
setSize(maxSize > 0 && iStartSize > maxSize ? maxSize : iStartSize, true);
setVersion(CURRENT_VERSION);
version = CURRENT_VERSION;
setSoftlyClosed(!failCheck);
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#close()
*/
public void close() throws IOException {
acquireWriteLock();
try {
try {
setSoftlyClosed(true);
if (OGlobalConfiguration.FILE_LOCK.getValueAsBoolean())
unlock();
if (channel != null && channel.isOpen()) {
channel.close();
channel = null;
}
if (accessFile != null) {
accessFile.close();
accessFile = null;
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error on closing file " + osFile.getAbsolutePath(), e, OIOException.class);
}
} finally {
releaseWriteLock();
}
}
public void close(boolean softlyClosed) throws IOException {
acquireWriteLock();
try {
try {
setSoftlyClosed(softlyClosed);
if (OGlobalConfiguration.FILE_LOCK.getValueAsBoolean())
unlock();
if (channel != null && channel.isOpen()) {
channel.close();
channel = null;
}
if (accessFile != null) {
accessFile.close();
accessFile = null;
}
} catch (Exception e) {
OLogManager.instance().error(this, "Error on closing file " + osFile.getAbsolutePath(), e, OIOException.class);
}
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#delete()
*/
public void delete() throws IOException {
acquireWriteLock();
try {
close();
if (osFile != null) {
boolean deleted = osFile.delete();
while (!deleted) {
OMemoryWatchDog.freeMemoryForResourceCleanup(100);
deleted = !osFile.exists() || osFile.delete();
}
}
} finally {
releaseWriteLock();
}
}
/*
* Locks a portion of file.
*/
public FileLock lock(final long iRangeFrom, final long iRangeSize, final boolean iShared) throws IOException {
acquireWriteLock();
try {
return channel.lock(iRangeFrom, iRangeSize, iShared);
} finally {
releaseWriteLock();
}
}
/*
* Unlocks a portion of file.
*/
public OFile unlock(final FileLock iLock) throws IOException {
acquireWriteLock();
try {
if (iLock != null) {
try {
iLock.release();
} catch (ClosedChannelException e) {
}
}
return this;
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#lock()
*/
public void lock() throws IOException {
if (channel == null)
return;
acquireWriteLock();
try {
for (int i = 0; i < LOCK_MAX_RETRIES; ++i) {
try {
fileLock = channel.tryLock(0, 1, true);
if (fileLock != null)
break;
} catch (OverlappingFileLockException e) {
OLogManager.instance().debug(this,
"Cannot open file '" + osFile.getAbsolutePath() + "' because it is locked. Waiting %d ms and retrying %d/%d...",
LOCK_WAIT_TIME, i, LOCK_MAX_RETRIES);
// FORCE FINALIZATION TO COLLECT ALL THE PENDING BUFFERS
OMemoryWatchDog.freeMemoryForResourceCleanup(LOCK_WAIT_TIME);
}
if (fileLock == null)
throw new OLockException(
"File '"
+ osFile.getPath()
+ "' is locked by another process, maybe the database is in use by another process. Use the remote mode with a OrientDB server to allow multiple access to the same database.");
}
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#unlock()
*/
public void unlock() throws IOException {
acquireWriteLock();
try {
if (fileLock != null) {
try {
fileLock.release();
} catch (ClosedChannelException e) {
}
fileLock = null;
}
} finally {
releaseWriteLock();
}
}
protected void checkSize(final long iSize) throws IOException {
acquireReadLock();
try {
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance().debug(this, "Changing file size to " + iSize + " bytes. " + toString());
final long filledUpTo = getFilledUpTo();
if (iSize < filledUpTo)
OLogManager.instance().error(
this,
"You cannot resize down the file to " + iSize + " bytes, since it is less than current space used: " + filledUpTo
+ " bytes", OIOException.class);
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#removeTail(int)
*/
public void removeTail(long iSizeToShrink) throws IOException {
acquireWriteLock();
try {
final long filledUpTo = getFilledUpTo();
if (filledUpTo < iSizeToShrink)
iSizeToShrink = 0;
setFilledUpTo(filledUpTo - iSizeToShrink);
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#shrink(int)
*/
public void shrink(final long iSize) throws IOException {
acquireWriteLock();
try {
final long filledUpTo = getFilledUpTo();
if (iSize >= filledUpTo)
return;
OLogManager.instance().debug(this, "Shrinking filled file from " + filledUpTo + " to " + iSize + " bytes. " + toString());
setFilledUpTo(iSize);
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#allocateSpace(int)
*/
public long allocateSpace(final long iSize) throws IOException {
acquireWriteLock();
try {
final long offset = getFilledUpTo();
final long size = getFileSize();
if (getFreeSpace() < iSize) {
if (maxSize > 0 && maxSize - size < iSize)
throw new IllegalArgumentException("Cannot enlarge file since the configured max size ("
+ OFileUtils.getSizeAsString(maxSize) + ") was reached! " + toString());
// MAKE ROOM
long newFileSize = size;
if (newFileSize == 0)
// PROBABLY HAS BEEN LOST WITH HARD KILLS
newFileSize = DEFAULT_SIZE;
// GET THE STEP SIZE IN BYTES
long stepSizeInBytes = incrementSize > 0 ? incrementSize : -1 * size / 100 * incrementSize;
// FIND THE BEST SIZE TO ALLOCATE (BASED ON INCREMENT-SIZE)
while (newFileSize - offset <= iSize) {
newFileSize += stepSizeInBytes;
if (newFileSize == 0)
// EMPTY FILE: ALLOCATE REQUESTED SIZE ONLY
newFileSize = iSize;
if (newFileSize > maxSize && maxSize > 0)
// TOO BIG: ROUND TO THE MAXIMUM FILE SIZE
newFileSize = maxSize;
}
setSize(newFileSize);
}
// THERE IS SPACE IN FILE: RETURN THE UPPER BOUND OFFSET AND UPDATE THE FILLED THRESHOLD
setFilledUpTo(offset + iSize);
return offset;
} finally {
releaseWriteLock();
}
}
protected long checkRegions(final long iOffset, final long iLength) {
acquireReadLock();
try {
if (iOffset < 0 || iOffset + iLength > getFilledUpTo())
throw new OIOException("You cannot access outside the file size (" + getFilledUpTo()
+ " bytes). You have requested portion " + iOffset + "-" + (iOffset + iLength) + " bytes. File: " + toString());
return iOffset;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getFreeSpace()
*/
public long getFreeSpace() {
acquireReadLock();
try {
return getFileSize() - getFilledUpTo();
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#canOversize(int)
*/
public boolean canOversize(final int iRecordSize) {
acquireReadLock();
try {
return maxSize - getFileSize() > iRecordSize;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#toString()
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("File: ");
builder.append(osFile.getName());
if (accessFile != null) {
builder.append(" os-size=");
try {
builder.append(accessFile.length());
} catch (IOException e) {
builder.append("?");
}
}
builder.append(", stored=");
builder.append(getFileSize());
builder.append(", filled=");
builder.append(getFilledUpTo());
builder.append(", max=");
builder.append(maxSize);
builder.append("");
return builder.toString();
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getOsFile()
*/
public File getOsFile() {
acquireReadLock();
try {
return osFile;
} finally {
releaseReadLock();
}
}
public OAbstractFile init(final String iFileName, final String iMode) {
acquireWriteLock();
try {
mode = iMode;
osFile = new File(iFileName);
return this;
} finally {
releaseWriteLock();
}
}
protected void openChannel(final long newSize) throws IOException {
acquireWriteLock();
try {
OLogManager.instance().debug(this, "[OFile.openChannel] opening channel for file '%s' of size: %d", osFile, osFile.length());
for (int i = 0; i < OPEN_RETRY_MAX; ++i)
try {
accessFile = new RandomAccessFile(osFile, mode);
break;
} catch (FileNotFoundException e) {
if (i == OPEN_DELAY_RETRY)
throw e;
// TRY TO RE-CREATE THE DIRECTORY (THIS HAPPENS ON WINDOWS AFTER A DELETE IS PENDING, USUALLY WHEN REOPEN THE DB VERY
// FREQUENTLY)
osFile.getParentFile().mkdirs();
try {
Thread.sleep(OPEN_DELAY_RETRY);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
}
if (accessFile == null)
throw new FileNotFoundException(osFile.getAbsolutePath());
if (accessFile.length() != newSize)
accessFile.setLength(newSize);
accessFile.seek(VERSION_OFFSET);
version = accessFile.read();
accessFile.seek(0);
channel = accessFile.getChannel();
if (OGlobalConfiguration.FILE_LOCK.getValueAsBoolean())
lock();
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getMaxSize()
*/
public long getMaxSize() {
acquireReadLock();
try {
return maxSize;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#setMaxSize(int)
*/
public void setMaxSize(int maxSize) {
acquireWriteLock();
try {
this.maxSize = maxSize;
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#getIncrementSize()
*/
public int getIncrementSize() {
acquireReadLock();
try {
return incrementSize;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#setIncrementSize(int)
*/
public void setIncrementSize(int incrementSize) {
acquireWriteLock();
try {
this.incrementSize = incrementSize;
} finally {
releaseWriteLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#isOpen()
*/
public boolean isOpen() {
acquireReadLock();
try {
return accessFile != null;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#exists()
*/
public boolean exists() {
acquireReadLock();
try {
return osFile != null && osFile.exists();
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#isFailCheck()
*/
public boolean isFailCheck() {
acquireReadLock();
try {
return failCheck;
} finally {
releaseReadLock();
}
}
/*
* (non-Javadoc)
*
* @see com.orientechnologies.orient.core.storage.fs.OFileAAA#setFailCheck(boolean)
*/
public void setFailCheck(boolean failCheck) {
acquireWriteLock();
try {
this.failCheck = failCheck;
} finally {
releaseWriteLock();
}
}
protected void setDirty() {
acquireWriteLock();
try {
if (!dirty)
dirty = true;
} finally {
releaseWriteLock();
}
}
protected void setHeaderDirty() {
acquireWriteLock();
try {
if (!headerDirty)
headerDirty = true;
} finally {
releaseWriteLock();
}
}
public String getName() {
acquireReadLock();
try {
return osFile.getName();
} finally {
releaseReadLock();
}
}
public String getPath() {
acquireReadLock();
try {
return osFile.getPath();
} finally {
releaseReadLock();
}
}
public String getAbsolutePath() {
acquireReadLock();
try {
return osFile.getAbsolutePath();
} finally {
releaseReadLock();
}
}
public boolean renameTo(final File newFile) {
acquireWriteLock();
try {
return osFile.renameTo(newFile);
} finally {
releaseWriteLock();
}
}
protected void acquireWriteLock() {
lock.writeLock().lock();
}
protected void releaseWriteLock() {
lock.writeLock().unlock();
}
protected void acquireReadLock() {
lock.readLock().lock();
}
protected void releaseReadLock() {
lock.readLock().unlock();
}
protected abstract void setVersion(int version) throws IOException;
protected abstract void setFilledUpTo(final long iHow, boolean force);
protected abstract void setSize(final long size, final boolean force) throws IOException;
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_storage_fs_OAbstractFile.java
|
189 |
public class ClientConfig {
/**
* To pass properties
*/
private Properties properties = new Properties();
/**
* The Group Configuration properties like:
* Name and Password that is used to connect to the cluster.
*/
private GroupConfig groupConfig = new GroupConfig();
/**
* The Security Configuration for custom Credentials:
* Name and Password that is used to connect to the cluster.
*/
private ClientSecurityConfig securityConfig = new ClientSecurityConfig();
/**
* The Network Configuration properties like:
* addresses to connect, smart-routing, socket-options...
*/
private ClientNetworkConfig networkConfig = new ClientNetworkConfig();
/**
* Used to distribute the operations to multiple Endpoints.
*/
private LoadBalancer loadBalancer;
/**
* List of listeners that Hazelcast will automatically add as a part of initialization process.
* Currently only supports {@link com.hazelcast.core.LifecycleListener}.
*/
private List<ListenerConfig> listenerConfigs = new LinkedList<ListenerConfig>();
/**
* pool-size for internal ExecutorService which handles responses etc.
*/
private int executorPoolSize = -1;
private SerializationConfig serializationConfig = new SerializationConfig();
private List<ProxyFactoryConfig> proxyFactoryConfigs = new LinkedList<ProxyFactoryConfig>();
private ManagedContext managedContext;
private ClassLoader classLoader;
public String getProperty(String name) {
String value = properties.getProperty(name);
return value != null ? value : System.getProperty(name);
}
public ClientConfig setProperty(String name, String value) {
properties.put(name, value);
return this;
}
public Properties getProperties() {
return properties;
}
private Map<String, NearCacheConfig> nearCacheConfigMap = new HashMap<String, NearCacheConfig>();
public ClientConfig setProperties(final Properties properties) {
this.properties = properties;
return this;
}
public ClientSecurityConfig getSecurityConfig() {
return securityConfig;
}
public void setSecurityConfig(ClientSecurityConfig securityConfig) {
this.securityConfig = securityConfig;
}
public ClientNetworkConfig getNetworkConfig() {
return networkConfig;
}
public void setNetworkConfig(ClientNetworkConfig networkConfig) {
this.networkConfig = networkConfig;
}
/**
* please use {@link ClientConfig#addNearCacheConfig(NearCacheConfig)}
*
* @param mapName
* @param nearCacheConfig
* @return
*/
@Deprecated
public ClientConfig addNearCacheConfig(String mapName, NearCacheConfig nearCacheConfig) {
nearCacheConfig.setName(mapName);
return addNearCacheConfig(nearCacheConfig);
}
public ClientConfig addNearCacheConfig(NearCacheConfig nearCacheConfig) {
nearCacheConfigMap.put(nearCacheConfig.getName(), nearCacheConfig);
return this;
}
public ClientConfig addListenerConfig(ListenerConfig listenerConfig) {
getListenerConfigs().add(listenerConfig);
return this;
}
public ClientConfig addProxyFactoryConfig(ProxyFactoryConfig proxyFactoryConfig) {
this.proxyFactoryConfigs.add(proxyFactoryConfig);
return this;
}
public NearCacheConfig getNearCacheConfig(String mapName) {
return lookupByPattern(nearCacheConfigMap, mapName);
}
public Map<String, NearCacheConfig> getNearCacheConfigMap() {
return nearCacheConfigMap;
}
public ClientConfig setNearCacheConfigMap(Map<String, NearCacheConfig> nearCacheConfigMap) {
this.nearCacheConfigMap = nearCacheConfigMap;
return this;
}
/**
* Use {@link ClientNetworkConfig#isSmartRouting} instead
*/
@Deprecated
public boolean isSmartRouting() {
return networkConfig.isSmartRouting();
}
/**
* Use {@link ClientNetworkConfig#setSmartRouting} instead
*/
@Deprecated
public ClientConfig setSmartRouting(boolean smartRouting) {
networkConfig.setSmartRouting(smartRouting);
return this;
}
/**
* Use {@link ClientNetworkConfig#getSocketInterceptorConfig} instead
*/
@Deprecated
public SocketInterceptorConfig getSocketInterceptorConfig() {
return networkConfig.getSocketInterceptorConfig();
}
/**
* Use {@link ClientNetworkConfig#setSocketInterceptorConfig} instead
*/
@Deprecated
public ClientConfig setSocketInterceptorConfig(SocketInterceptorConfig socketInterceptorConfig) {
networkConfig.setSocketInterceptorConfig(socketInterceptorConfig);
return this;
}
/**
* Use {@link ClientNetworkConfig#getConnectionAttemptPeriod} instead
*/
@Deprecated
public int getConnectionAttemptPeriod() {
return networkConfig.getConnectionAttemptPeriod();
}
/**
* Use {@link ClientNetworkConfig#setConnectionAttemptPeriod} instead
*/
@Deprecated
public ClientConfig setConnectionAttemptPeriod(int connectionAttemptPeriod) {
networkConfig.setConnectionAttemptPeriod(connectionAttemptPeriod);
return this;
}
/**
* Use {@link ClientNetworkConfig#getConnectionAttemptLimit} instead
*/
@Deprecated
public int getConnectionAttemptLimit() {
return networkConfig.getConnectionAttemptLimit();
}
/**
* Use {@link ClientNetworkConfig#setConnectionAttemptLimit} instead
*/
@Deprecated
public ClientConfig setConnectionAttemptLimit(int connectionAttemptLimit) {
networkConfig.setConnectionAttemptLimit(connectionAttemptLimit);
return this;
}
/**
* Use {@link ClientNetworkConfig#getConnectionTimeout} instead
*/
@Deprecated
public int getConnectionTimeout() {
return networkConfig.getConnectionTimeout();
}
/**
* Use {@link ClientNetworkConfig#setConnectionTimeout} instead
*/
@Deprecated
public ClientConfig setConnectionTimeout(int connectionTimeout) {
networkConfig.setConnectionTimeout(connectionTimeout);
return this;
}
public Credentials getCredentials() {
return securityConfig.getCredentials();
}
public ClientConfig setCredentials(Credentials credentials) {
securityConfig.setCredentials(credentials);
return this;
}
/**
* Use {@link ClientNetworkConfig#addAddress} instead
*/
@Deprecated
public ClientConfig addAddress(String... addresses) {
networkConfig.addAddress(addresses);
return this;
}
/**
* Use {@link ClientNetworkConfig#setAddresses} instead
*/
@Deprecated
public ClientConfig setAddresses(List<String> addresses) {
networkConfig.setAddresses(addresses);
return this;
}
/**
* Use {@link ClientNetworkConfig#getAddresses} instead
*/
@Deprecated
public List<String> getAddresses() {
return networkConfig.getAddresses();
}
public GroupConfig getGroupConfig() {
return groupConfig;
}
public ClientConfig setGroupConfig(GroupConfig groupConfig) {
this.groupConfig = groupConfig;
return this;
}
public List<ListenerConfig> getListenerConfigs() {
return listenerConfigs;
}
public ClientConfig setListenerConfigs(List<ListenerConfig> listenerConfigs) {
this.listenerConfigs = listenerConfigs;
return this;
}
public LoadBalancer getLoadBalancer() {
return loadBalancer;
}
public ClientConfig setLoadBalancer(LoadBalancer loadBalancer) {
this.loadBalancer = loadBalancer;
return this;
}
/**
* Use {@link ClientNetworkConfig#isRedoOperation} instead
*/
@Deprecated
public boolean isRedoOperation() {
return networkConfig.isRedoOperation();
}
/**
* Use {@link ClientNetworkConfig#setRedoOperation} instead
*/
@Deprecated
public ClientConfig setRedoOperation(boolean redoOperation) {
networkConfig.setRedoOperation(redoOperation);
return this;
}
/**
* Use {@link ClientNetworkConfig#getSocketOptions} instead
*/
@Deprecated
public SocketOptions getSocketOptions() {
return networkConfig.getSocketOptions();
}
/**
* Use {@link ClientNetworkConfig#setSocketOptions} instead
*/
@Deprecated
public ClientConfig setSocketOptions(SocketOptions socketOptions) {
networkConfig.setSocketOptions(socketOptions);
return this;
}
public ClassLoader getClassLoader() {
return classLoader;
}
public ClientConfig setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
}
public ManagedContext getManagedContext() {
return managedContext;
}
public ClientConfig setManagedContext(ManagedContext managedContext) {
this.managedContext = managedContext;
return this;
}
public int getExecutorPoolSize() {
return executorPoolSize;
}
public ClientConfig setExecutorPoolSize(int executorPoolSize) {
this.executorPoolSize = executorPoolSize;
return this;
}
public List<ProxyFactoryConfig> getProxyFactoryConfigs() {
return proxyFactoryConfigs;
}
public ClientConfig setProxyFactoryConfigs(List<ProxyFactoryConfig> proxyFactoryConfigs) {
this.proxyFactoryConfigs = proxyFactoryConfigs;
return this;
}
public SerializationConfig getSerializationConfig() {
return serializationConfig;
}
public ClientConfig setSerializationConfig(SerializationConfig serializationConfig) {
this.serializationConfig = serializationConfig;
return this;
}
private static <T> T lookupByPattern(Map<String, T> map, String name) {
T t = map.get(name);
if (t == null) {
int lastMatchingPoint = -1;
for (Map.Entry<String, T> entry : map.entrySet()) {
String pattern = entry.getKey();
T value = entry.getValue();
final int matchingPoint = getMatchingPoint(name, pattern);
if (matchingPoint > lastMatchingPoint) {
lastMatchingPoint = matchingPoint;
t = value;
}
}
}
return t;
}
/**
* higher values means more specific matching
*
* @param name
* @param pattern
* @return -1 if name does not match at all, zero or positive otherwise
*/
private static int getMatchingPoint(final String name, final String pattern) {
final int index = pattern.indexOf('*');
if (index == -1) {
return -1;
}
final String firstPart = pattern.substring(0, index);
final int indexFirstPart = name.indexOf(firstPart, 0);
if (indexFirstPart == -1) {
return -1;
}
final String secondPart = pattern.substring(index + 1);
final int indexSecondPart = name.indexOf(secondPart, index + 1);
if (indexSecondPart == -1) {
return -1;
}
return firstPart.length() + secondPart.length();
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_config_ClientConfig.java
|
859 |
public class AlterOperation extends AbstractAlterOperation {
public AlterOperation() {
}
public AlterOperation(String name, Data function) {
super(name, function);
}
@Override
public void run() throws Exception {
NodeEngine nodeEngine = getNodeEngine();
IFunction f = nodeEngine.toObject(function);
ReferenceWrapper reference = getReference();
Object input = nodeEngine.toObject(reference.get());
//noinspection unchecked
Object output = f.apply(input);
shouldBackup = !isEquals(input, output);
if (shouldBackup) {
backup = nodeEngine.toData(output);
reference.set(backup);
}
}
@Override
public int getId() {
return AtomicReferenceDataSerializerHook.ALTER;
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_concurrent_atomicreference_operations_AlterOperation.java
|
1,018 |
threadPool.executor(executor).execute(new Runnable() {
@Override
public void run() {
try {
shardOperation(request, listener);
} catch (Throwable e) {
if (retryOnFailure(e)) {
operationStarted.set(false);
// we already marked it as started when we executed it (removed the listener) so pass false
// to re-add to the cluster listener
retry(false, null);
} else {
listener.onFailure(e);
}
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_support_single_instance_TransportInstanceSingleOperationAction.java
|
683 |
public class PutWarmerResponse extends AcknowledgedResponse {
PutWarmerResponse() {
super();
}
PutWarmerResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_warmer_put_PutWarmerResponse.java
|
465 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClientSemaphoreThreadedTest {
protected static HazelcastInstance client;
protected static HazelcastInstance server;
@BeforeClass
public static void init(){
server = Hazelcast.newHazelcastInstance();
client = HazelcastClient.newHazelcastClient();
}
@AfterClass
public static void destroy() {
HazelcastClient.shutdownAll();
Hazelcast.shutdownAll();
}
@Test
public void concurrent_trySemaphoreTest() {
concurrent_trySemaphoreTest(false);
}
@Test
public void concurrent_trySemaphoreWithTimeOutTest() {
concurrent_trySemaphoreTest(true);
}
public void concurrent_trySemaphoreTest(final boolean tryWithTimeOut) {
final ISemaphore semaphore = client.getSemaphore(randomString());
semaphore.init(1);
final AtomicInteger upTotal = new AtomicInteger(0);
final AtomicInteger downTotal = new AtomicInteger(0);
final SemaphoreTestThread threads[] = new SemaphoreTestThread[8];
for(int i=0; i<threads.length; i++){
SemaphoreTestThread t;
if(tryWithTimeOut){
t = new TrySemaphoreTimeOutThread(semaphore, upTotal, downTotal);
}else{
t = new TrySemaphoreThread(semaphore, upTotal, downTotal);
}
t.start();
threads[i] = t;
}
HazelcastTestSupport.assertJoinable(threads);
for(SemaphoreTestThread t : threads){
assertNull("thread "+ t +" has error "+t.error, t.error);
}
assertEquals("concurrent access to locked code caused wrong total", 0, upTotal.get() + downTotal.get());
}
static class TrySemaphoreThread extends SemaphoreTestThread{
public TrySemaphoreThread(ISemaphore semaphore, AtomicInteger upTotal, AtomicInteger downTotal){
super(semaphore, upTotal, downTotal);
}
public void iterativelyRun() throws Exception{
if(semaphore.tryAcquire()){
work();
semaphore.release();
}
}
}
static class TrySemaphoreTimeOutThread extends SemaphoreTestThread{
public TrySemaphoreTimeOutThread(ISemaphore semaphore, AtomicInteger upTotal, AtomicInteger downTotal){
super(semaphore, upTotal, downTotal);
}
public void iterativelyRun() throws Exception{
if(semaphore.tryAcquire(1, TimeUnit.MILLISECONDS )){
work();
semaphore.release();
}
}
}
static abstract class SemaphoreTestThread extends Thread{
static private final int MAX_ITTERATIONS = 1000*10;
private final Random random = new Random();
protected final ISemaphore semaphore ;
protected final AtomicInteger upTotal;
protected final AtomicInteger downTotal;
public volatile Throwable error;
public SemaphoreTestThread(ISemaphore semaphore, AtomicInteger upTotal, AtomicInteger downTotal){
this.semaphore = semaphore;
this.upTotal = upTotal;
this.downTotal = downTotal;
}
final public void run(){
try{
for ( int i=0; i<MAX_ITTERATIONS; i++ ) {
iterativelyRun();
}
}catch (Throwable e){
error = e;
}
}
abstract void iterativelyRun() throws Exception;
protected void work(){
final int delta = random.nextInt(1000);
upTotal.addAndGet(delta);
downTotal.addAndGet(-delta);
}
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_semaphore_ClientSemaphoreThreadedTest.java
|
166 |
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
| 0true
|
src_main_java_jsr166y_ForkJoinPool.java
|
510 |
public class OEntityManagerClassHandler {
private Map<String, Class<?>> entityClasses = new HashMap<String, Class<?>>();
/**
* Returns the Java class by its name
*
* @param iClassName
* Simple class name without the package
* @return Returns the Java class by its name
*/
public synchronized Class<?> getEntityClass(final String iClassName) {
return entityClasses.get(iClassName);
}
public synchronized void registerEntityClass(final Class<?> iClass) {
entityClasses.put(iClass.getSimpleName(), iClass);
}
public synchronized void registerEntityClass(final String iClassName, final Class<?> iClass) {
entityClasses.put(iClassName, iClass);
}
public synchronized void deregisterEntityClass(final String iClassName) {
entityClasses.remove(iClassName);
}
public synchronized void deregisterEntityClass(final Class<?> iClass) {
entityClasses.remove(iClass.getSimpleName());
}
public synchronized Set<Entry<String, Class<?>>> getClassesEntrySet() {
return entityClasses.entrySet();
}
public synchronized boolean containsEntityClass(final String iClassName) {
return entityClasses.containsKey(iClassName);
}
public synchronized boolean containsEntityClass(final Class<?> iClass) {
return entityClasses.containsKey(iClass.getSimpleName());
}
public synchronized Object createInstance(final Class<?> iClass) throws InstantiationException, IllegalAccessException,
InvocationTargetException {
Constructor<?> defaultConstructor = null;
for (Constructor<?> c : iClass.getConstructors()) {
if (c.getParameterTypes().length == 0) {
defaultConstructor = c;
break;
}
}
if (defaultConstructor == null)
throw new IllegalArgumentException("Cannot create an object of class '" + iClass.getName()
+ "' because it has no default constructor. Please define the method: " + iClass.getSimpleName() + "()");
if (!defaultConstructor.isAccessible())
// OVERRIDE PROTECTION
defaultConstructor.setAccessible(true);
return defaultConstructor.newInstance();
}
public synchronized Collection<Class<?>> getRegisteredEntities() {
return entityClasses.values();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_entity_OEntityManagerClassHandler.java
|
562 |
public class StaticLB implements LoadBalancer {
private final Member member;
public StaticLB(Member member) {
this.member = member;
}
@Override
public void init(Cluster cluster, ClientConfig config) {
}
@Override
public Member next() {
return member;
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_util_StaticLB.java
|
1,992 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_ID_GENERATION")
public class IdGenerationImpl implements IdGeneration {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "ID_TYPE", nullable=false)
protected String type;
@Column(name = "ID_MIN", nullable = true)
protected Long begin;
@Column(name = "ID_MAX", nullable = true)
protected Long end;
@Column(name = "BATCH_START", nullable = false)
protected Long batchStart;
@Column(name = "BATCH_SIZE", nullable=false)
protected Long batchSize;
@Version
protected Integer version;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public Long getBegin() {
return begin;
}
public void setBegin(Long begin) {
this.begin = begin;
}
public Long getEnd() {
return end;
}
public void setEnd(Long end) {
this.end = end;
}
public Long getBatchStart() {
return batchStart;
}
public void setBatchStart(Long batchStart) {
this.batchStart = batchStart;
}
public Long getBatchSize() {
return batchSize;
}
public void setBatchSize(Long batchSize) {
this.batchSize = batchSize;
}
public Integer getVersion() {
return version;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((batchSize == null) ? 0 : batchSize.hashCode());
result = prime * result + ((batchStart == null) ? 0 : batchStart.hashCode());
result = prime * result + ((begin == null) ? 0 : begin.hashCode());
result = prime * result + ((end == null) ? 0 : end.hashCode());
result = prime * result + ((type == null) ? 0 : type.hashCode());
result = prime * result + ((version == null) ? 0 : version.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
IdGenerationImpl other = (IdGenerationImpl) obj;
if (batchSize == null) {
if (other.batchSize != null)
return false;
} else if (!batchSize.equals(other.batchSize))
return false;
if (batchStart == null) {
if (other.batchStart != null)
return false;
} else if (!batchStart.equals(other.batchStart))
return false;
if (begin == null) {
if (other.begin != null)
return false;
} else if (!begin.equals(other.begin))
return false;
if (end == null) {
if (other.end != null)
return false;
} else if (!end.equals(other.end))
return false;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
if (version == null) {
if (other.version != null)
return false;
} else if (!version.equals(other.version))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-profile_src_main_java_org_broadleafcommerce_profile_core_domain_IdGenerationImpl.java
|
1,405 |
public static interface Listener {
void onResponse(Response response);
void onFailure(Throwable t);
}
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaDataDeleteIndexService.java
|
1,509 |
public class FailedRerouteAllocation extends RoutingAllocation {
private final List<ShardRouting> failedShards;
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List<ShardRouting> failedShards, ClusterInfo clusterInfo) {
super(deciders, routingNodes, nodes, clusterInfo);
this.failedShards = failedShards;
}
public List<ShardRouting> failedShards() {
return failedShards;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_routing_allocation_FailedRerouteAllocation.java
|
1,379 |
public class TitanCassandraInputFormat extends TitanInputFormat {
// Copied these private constants from Cassandra's ConfigHelper circa 2.0.9
private static final String INPUT_WIDEROWS_CONFIG = "cassandra.input.widerows";
private static final String RANGE_BATCH_SIZE_CONFIG = "cassandra.range.batch.size";
private final ColumnFamilyInputFormat columnFamilyInputFormat = new ColumnFamilyInputFormat();
private TitanCassandraHadoopGraph graph;
private Configuration config;
@Override
public List<InputSplit> getSplits(final JobContext jobContext) throws IOException, InterruptedException {
return this.columnFamilyInputFormat.getSplits(jobContext);
}
@Override
public RecordReader<NullWritable, FaunusVertex> createRecordReader(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
return new TitanCassandraRecordReader(this.graph, this.vertexQuery,
(ColumnFamilyRecordReader) this.columnFamilyInputFormat.createRecordReader(inputSplit, taskAttemptContext));
}
@Override
public void setConf(final Configuration config) {
super.setConf(config);
this.graph = new TitanCassandraHadoopGraph(titanSetup);
// Copy some Titan configuration keys to the Hadoop Configuration keys used by Cassandra's ColumnFamilyInputFormat
ConfigHelper.setInputInitialAddress(config, inputConf.get(GraphDatabaseConfiguration.STORAGE_HOSTS)[0]);
if (inputConf.has(GraphDatabaseConfiguration.STORAGE_PORT))
ConfigHelper.setInputRpcPort(config, String.valueOf(inputConf.get(GraphDatabaseConfiguration.STORAGE_PORT)));
if (inputConf.has(GraphDatabaseConfiguration.AUTH_USERNAME))
ConfigHelper.setInputKeyspaceUserName(config, inputConf.get(GraphDatabaseConfiguration.AUTH_USERNAME));
if (inputConf.has(GraphDatabaseConfiguration.AUTH_PASSWORD))
ConfigHelper.setInputKeyspacePassword(config, inputConf.get(GraphDatabaseConfiguration.AUTH_PASSWORD));
// Copy keyspace, force the CF setting to edgestore, honor widerows when set
final boolean wideRows = config.getBoolean(INPUT_WIDEROWS_CONFIG, false);
// Use the setInputColumnFamily overload that includes a widerows argument; using the overload without this argument forces it false
ConfigHelper.setInputColumnFamily(config, inputConf.get(AbstractCassandraStoreManager.CASSANDRA_KEYSPACE), Backend.EDGESTORE_NAME, wideRows);
// Set the column slice bounds via Faunus's vertex query filter
final SlicePredicate predicate = new SlicePredicate();
final int rangeBatchSize = config.getInt(RANGE_BATCH_SIZE_CONFIG, Integer.MAX_VALUE);
predicate.setSlice_range(getSliceRange(titanSetup.inputSlice(vertexQuery), rangeBatchSize));
ConfigHelper.setInputSlicePredicate(config, predicate);
this.config = config;
}
private SliceRange getSliceRange(final SliceQuery slice, final int limit) {
final SliceRange sliceRange = new SliceRange();
sliceRange.setStart(slice.getSliceStart().asByteBuffer());
sliceRange.setFinish(slice.getSliceEnd().asByteBuffer());
sliceRange.setCount(Math.min(limit, slice.getLimit()));
return sliceRange;
}
@Override
public Configuration getConf() {
return this.config;
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_cassandra_TitanCassandraInputFormat.java
|
3 |
public class AbbreviationServiceImplTest {
@Mock private ComponentContext context;
AbbreviationServiceImpl abbrev;
Dictionary<String, String> properties;
@BeforeMethod
public void init() {
MockitoAnnotations.initMocks(this);
abbrev = new AbbreviationServiceImpl();
properties = new Hashtable<String, String>();
properties.put("component.name", "AbbreviationService");
when(context.getProperties()).thenReturn(properties);
}
@Test
public void testActivateGoodFile() {
properties.put("abbreviations-file", "/default-abbreviations.properties");
abbrev.activate(context);
Abbreviations abbreviations = abbrev.getAbbreviations("Fiber Optic MDM System");
assertEquals(abbreviations.getPhrases().size(), 3);
assertEquals(abbreviations.getPhrases().get(0), "Fiber Optic");
assertEquals(abbreviations.getPhrases().get(1), "MDM");
assertEquals(abbreviations.getPhrases().get(2), "System");
}
@Test
public void testActivateNoFileProperty() {
abbrev.activate(context);
Abbreviations abbreviations = abbrev.getAbbreviations("Fiber Optic MDM System");
assertEquals(abbreviations.getPhrases().size(), 4);
assertEquals(abbreviations.getPhrases().get(0), "Fiber");
assertEquals(abbreviations.getPhrases().get(1), "Optic");
assertEquals(abbreviations.getPhrases().get(2), "MDM");
assertEquals(abbreviations.getPhrases().get(3), "System");
}
@Test
public void testActivateNonexistentAbbreviationsFile() {
properties.put("abbreviations-file", "/file-does-not-exist.properties");
abbrev.activate(context);
Abbreviations abbreviations = abbrev.getAbbreviations("Fiber Optic MDM System");
assertEquals(abbreviations.getPhrases().size(), 4);
assertEquals(abbreviations.getPhrases().get(0), "Fiber");
assertEquals(abbreviations.getPhrases().get(1), "Optic");
assertEquals(abbreviations.getPhrases().get(2), "MDM");
assertEquals(abbreviations.getPhrases().get(3), "System");
}
@Test(dataProvider="findFileTests")
public void testFindFile(String path, String fileProperty) throws IOException {
InputStream in;
Properties p;
p = new Properties();
in = abbrev.findFile(path);
assertNotNull(in);
p.load(in);
assertEquals(p.getProperty("file"), fileProperty);
}
@DataProvider(name = "findFileTests")
public Object[][] getFindFileTests() {
return new Object[][] {
// A file path
{ "src/test/data/abbreviations.properties", "in file system" },
// A resource in the bundle using an absolute name
{ "/test-abbreviations.properties", "root of bundle" },
// A resource in the bundle using a relative name
{ "package-abbreviations.properties", "in bundle package" },
};
}
}
| 0true
|
tableViews_src_test_java_gov_nasa_arc_mct_abbreviation_impl_AbbreviationServiceImplTest.java
|
350 |
public interface ODatabaseThreadLocalFactory {
public ODatabaseRecord getThreadDatabase();
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_ODatabaseThreadLocalFactory.java
|
232 |
public interface ModuleConfiguration extends Serializable {
public Long getId();
public void setId(Long id);
public String getModuleName();
public void setModuleName(String name);
public void setActiveStartDate(Date startDate);
public Date getActiveStartDate();
public void setActiveEndDate(Date startDate);
public Date getActiveEndDate();
public void setIsDefault(Boolean isDefault);
public Boolean getIsDefault();
public void setPriority(Integer priority);
public Integer getPriority();
public ModuleConfigurationType getModuleConfigurationType();
public void setAuditable(Auditable auditable);
public Auditable getAuditable();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_domain_ModuleConfiguration.java
|
855 |
@Service("blOfferService")
public class OfferServiceImpl implements OfferService {
private static final Log LOG = LogFactory.getLog(OfferServiceImpl.class);
// should be called outside of Offer service after Offer service is executed
@Resource(name="blCustomerOfferDao")
protected CustomerOfferDao customerOfferDao;
@Resource(name="blOfferCodeDao")
protected OfferCodeDao offerCodeDao;
@Resource(name="blOfferAuditService")
protected OfferAuditService offerAuditService;
@Resource(name="blOfferDao")
protected OfferDao offerDao;
@Resource(name="blOrderOfferProcessor")
protected OrderOfferProcessor orderOfferProcessor;
@Resource(name="blItemOfferProcessor")
protected ItemOfferProcessor itemOfferProcessor;
@Resource(name="blFulfillmentGroupOfferProcessor")
protected FulfillmentGroupOfferProcessor fulfillmentGroupOfferProcessor;
@Resource(name="blPromotableItemFactory")
protected PromotableItemFactory promotableItemFactory;
@Resource(name = "blOfferServiceExtensionManager")
protected OfferServiceExtensionManager extensionManager;
@Resource(name = "blOrderService")
protected OrderService orderService;
@Override
public List<Offer> findAllOffers() {
return offerDao.readAllOffers();
}
@Override
@Transactional("blTransactionManager")
public Offer save(Offer offer) {
return offerDao.save(offer);
}
@Override
@Transactional("blTransactionManager")
public OfferCode saveOfferCode(OfferCode offerCode) {
offerCode.setOffer(offerDao.save(offerCode.getOffer()));
return offerCodeDao.save(offerCode);
}
/**
* Creates a list of offers that applies to this order. All offers that are assigned to the customer,
* entered during checkout, or has a delivery type of automatic are added to the list. The same offer
* cannot appear more than once in the list.
*
* @param code
* @return a List of offers that may apply to this order
*/
@Override
public Offer lookupOfferByCode(String code) {
Offer offer = null;
OfferCode offerCode = offerCodeDao.readOfferCodeByCode(code);
if (offerCode != null) {
offer = offerCode.getOffer();
}
return offer;
}
@Override
public OfferCode lookupOfferCodeByCode(String code){
return offerCodeDao.readOfferCodeByCode(code);
}
/**
* Creates a list of offers that applies to this order. All offers that are assigned to the customer,
* entered during checkout, or has a delivery type of automatic are added to the list. The same offer
* cannot appear more than once in the list.
*
* @param order
* @return a List of offers that may apply to this order
*/
@Override
public List<Offer> buildOfferListForOrder(Order order) {
List<Offer> offers = new ArrayList<Offer>();
List<CustomerOffer> customerOffers = lookupOfferCustomerByCustomer(order.getCustomer());
for (CustomerOffer customerOffer : customerOffers) {
if (!offers.contains(customerOffer.getOffer())) {
offers.add(customerOffer.getOffer());
}
}
List<OfferCode> orderOfferCodes = order.getAddedOfferCodes();
orderOfferCodes = removeOutOfDateOfferCodes(orderOfferCodes);
for (OfferCode orderOfferCode : orderOfferCodes) {
if (!offers.contains(orderOfferCode.getOffer())) {
offers.add(orderOfferCode.getOffer());
}
}
List<Offer> globalOffers = lookupAutomaticDeliveryOffers();
for (Offer globalOffer : globalOffers) {
if (!offers.contains(globalOffer) && verifyMaxCustomerUsageThreshold(order.getCustomer(), globalOffer)) {
offers.add(globalOffer);
}
}
if (extensionManager != null) {
extensionManager.getProxy().applyAdditionalFilters(offers);
}
return offers;
}
/**
* Private method used to retrieve all offers assigned to this customer. These offers
* have a DeliveryType of MANUAL and are programmatically assigned to the customer.
*
* @param customer
* @return a List of offers assigned to the customer
*/
protected List<CustomerOffer> lookupOfferCustomerByCustomer(Customer customer) {
List<CustomerOffer> offerCustomers = customerOfferDao.readCustomerOffersByCustomer(customer);
return offerCustomers;
}
/**
* Private method used to retrieve all offers with DeliveryType of AUTOMATIC
*
* @return a List of automatic delivery offers
*/
protected List<Offer> lookupAutomaticDeliveryOffers() {
List<Offer> globalOffers = offerDao.readOffersByAutomaticDeliveryType();
return globalOffers;
}
/**
* Removes all out of date offerCodes based on the offerCode and its offer's start and end
* date. If an offerCode has a later start date, that offerCode will be removed.
* OfferCodes without a start date will still be processed. If the offerCode
* has a end date that has already passed, that offerCode will be removed. OfferCodes
* without a end date will be processed. The start and end dates on the offer will
* still need to be evaluated.
*
* @param offerCodes
* @return a List of non-expired offers
*/
protected List<OfferCode> removeOutOfDateOfferCodes(List<OfferCode> offerCodes){
Date now = SystemTime.asDate();
List<OfferCode> offerCodesToRemove = new ArrayList<OfferCode>();
for (OfferCode offerCode : offerCodes) {
if ((offerCode.getStartDate() != null) && (offerCode.getStartDate().after(now))){
offerCodesToRemove.add(offerCode);
} else if (offerCode.getEndDate() != null && offerCode.getEndDate().before(now)){
offerCodesToRemove.add(offerCode);
}
}
// remove all offers in the offersToRemove list from original offers list
for (OfferCode offerCode : offerCodesToRemove) {
offerCodes.remove(offerCode);
}
return offerCodes;
}
/*
*
* Offers Logic:
* 1) Remove all existing offers in the Order (order, item, and fulfillment)
* 2) Check and remove offers
* a) Remove out of date offers
* b) Remove offers that do not apply to this customer
* 3) Loop through offers
* a) Verifies type of offer (order, order item, fulfillment)
* b) Verifies if offer can be applies
* c) Assign offer to type (order, order item, or fulfillment)
* 4) Sorts the order and item offers list by priority and then discount
* 5) Identify the best offers to apply to order item and create adjustments for each item offer
* 6) Compare order item adjustment price to sales price, and remove adjustments if sale price is better
* 7) Identify the best offers to apply to the order and create adjustments for each order offer
* 8) If item contains non-combinable offers remove either the item or order adjustments based on discount value
* 9) Set final order item prices and reapply order offers
*
* Assumptions:
* 1) % off all items will be created as an item offer with no expression
* 2) $ off order will be created as an order offer
* 3) Order offers applies to the best price for each item (not just retail price)
* 4) Fulfillment offers apply to best price for each item (not just retail price)
* 5) Stackable only applies to the same offer type (i.e. a not stackable order offer can be used with item offers)
* 6) Fulfillment offers cannot be not combinable
* 7) Order offers cannot be FIXED_PRICE
* 8) FIXED_PRICE offers cannot be stackable
* 9) Non-combinable offers only apply to the order and order items, fulfillment group offers will always apply
*
*/
@Override
@Transactional("blTransactionManager")
public void applyOffersToOrder(List<Offer> offers, Order order) throws PricingException {
/*
TODO rather than a threadlocal, we should update the "shouldPrice" boolean on the service API to
use a richer object to describe the parameters of the pricing call. This object would include
the pricing boolean, but would also include a list of activities to include or exclude in the
call - see http://jira.broadleafcommerce.org/browse/BLC-664
*/
OfferContext offerContext = OfferContext.getOfferContext();
if (offerContext == null || offerContext.executePromotionCalculation) {
order.updatePrices();
PromotableOrder promotableOrder = promotableItemFactory.createPromotableOrder(order, false);
List<Offer> filteredOffers = orderOfferProcessor.filterOffers(offers, order.getCustomer());
if ((filteredOffers == null) || (filteredOffers.isEmpty())) {
if (LOG.isTraceEnabled()) {
LOG.trace("No offers applicable to this order.");
}
} else {
List<PromotableCandidateOrderOffer> qualifiedOrderOffers = new ArrayList<PromotableCandidateOrderOffer>();
List<PromotableCandidateItemOffer> qualifiedItemOffers = new ArrayList<PromotableCandidateItemOffer>();
itemOfferProcessor.filterOffers(promotableOrder, filteredOffers, qualifiedOrderOffers, qualifiedItemOffers);
if (! (qualifiedItemOffers.isEmpty() && qualifiedOrderOffers.isEmpty())) {
// At this point, we should have a PromotableOrder that contains PromotableItems each of which
// has a list of candidatePromotions that might be applied.
// We also have a list of orderOffers that might apply and a list of itemOffers that might apply.
itemOfferProcessor.applyAndCompareOrderAndItemOffers(promotableOrder, qualifiedOrderOffers, qualifiedItemOffers);
}
}
orderOfferProcessor.synchronizeAdjustmentsAndPrices(promotableOrder);
order.setSubTotal(order.calculateSubTotal());
order.finalizeItemPrices();
orderService.save(order, false);
}
}
@Override
@Transactional("blTransactionManager")
public void applyFulfillmentGroupOffersToOrder(List<Offer> offers, Order order) throws PricingException {
OfferContext offerContext = OfferContext.getOfferContext();
if (offerContext == null || offerContext.executePromotionCalculation) {
PromotableOrder promotableOrder =
promotableItemFactory.createPromotableOrder(order, true);
List<Offer> possibleFGOffers = new ArrayList<Offer>();
for (Offer offer : offers) {
if (offer.getType().getType().equals(OfferType.FULFILLMENT_GROUP.getType())) {
possibleFGOffers.add(offer);
}
}
List<Offer> filteredOffers = orderOfferProcessor.filterOffers(possibleFGOffers, order.getCustomer());
List<PromotableCandidateFulfillmentGroupOffer> qualifiedFGOffers = new ArrayList<PromotableCandidateFulfillmentGroupOffer>();
for (Offer offer : filteredOffers) {
fulfillmentGroupOfferProcessor.filterFulfillmentGroupLevelOffer(promotableOrder, qualifiedFGOffers, offer);
}
if (!qualifiedFGOffers.isEmpty()) {
fulfillmentGroupOfferProcessor.applyAllFulfillmentGroupOffers(qualifiedFGOffers, promotableOrder);
fulfillmentGroupOfferProcessor.calculateFulfillmentGroupTotal(promotableOrder);
orderOfferProcessor.synchronizeAdjustmentsAndPrices(promotableOrder);
}
orderService.save(order, false);
}
}
@Override
public boolean verifyMaxCustomerUsageThreshold(Customer customer, Offer offer) {
if (offer.isLimitedUsePerCustomer()) {
Long currentUses = offerAuditService.countUsesByCustomer(customer.getId(), offer.getId());
if (currentUses >= offer.getMaxUsesPerCustomer()) {
return false;
}
}
return true;
}
@Override
public boolean verifyMaxCustomerUsageThreshold(@NotNull Customer customer, OfferCode code) {
boolean underCodeMaxUses = true;
if (code.isLimitedUse()) {
Long currentCodeUses = offerAuditService.countOfferCodeUses(code.getId());
underCodeMaxUses = currentCodeUses < code.getMaxUses();
}
return underCodeMaxUses && verifyMaxCustomerUsageThreshold(customer, code.getOffer());
}
@Override
@SuppressWarnings("unchecked")
public Set<Offer> getUniqueOffersFromOrder(Order order) {
HashSet<Offer> result = new HashSet<Offer>();
Transformer adjustmentToOfferTransformer = new Transformer() {
@Override
public Object transform(Object input) {
return ((Adjustment)input).getOffer();
}
};
result.addAll(CollectionUtils.collect(order.getOrderAdjustments(), adjustmentToOfferTransformer));
if (order.getOrderItems() != null) {
for (OrderItem item : order.getOrderItems()) {
result.addAll(CollectionUtils.collect(item.getOrderItemAdjustments(), adjustmentToOfferTransformer));
//record usage for price details on the item as well
if (item.getOrderItemPriceDetails() != null) {
for (OrderItemPriceDetail detail : item.getOrderItemPriceDetails()) {
result.addAll(CollectionUtils.collect(detail.getOrderItemPriceDetailAdjustments(), adjustmentToOfferTransformer));
}
}
}
}
if (order.getFulfillmentGroups() != null) {
for (FulfillmentGroup fg : order.getFulfillmentGroups()) {
result.addAll(CollectionUtils.collect(fg.getFulfillmentGroupAdjustments(), adjustmentToOfferTransformer));
}
}
return result;
}
@Override
public Map<Offer, OfferCode> getOffersRetrievedFromCodes(Order order) {
return getOffersRetrievedFromCodes(order.getAddedOfferCodes(), getUniqueOffersFromOrder(order));
}
@Override
public Map<Offer, OfferCode> getOffersRetrievedFromCodes(List<OfferCode> codes, Set<Offer> appliedOffers) {
HashMap<Offer, OfferCode> offerToCodeMapping = new HashMap<Offer, OfferCode>();
for (OfferCode code : codes) {
if (appliedOffers.contains(code.getOffer())) {
offerToCodeMapping.put(code.getOffer(), code);
}
}
return offerToCodeMapping;
}
@Override
public CustomerOfferDao getCustomerOfferDao() {
return customerOfferDao;
}
@Override
public void setCustomerOfferDao(CustomerOfferDao customerOfferDao) {
this.customerOfferDao = customerOfferDao;
}
@Override
public OfferCodeDao getOfferCodeDao() {
return offerCodeDao;
}
@Override
public void setOfferCodeDao(OfferCodeDao offerCodeDao) {
this.offerCodeDao = offerCodeDao;
}
@Override
public OfferDao getOfferDao() {
return offerDao;
}
@Override
public void setOfferDao(OfferDao offerDao) {
this.offerDao = offerDao;
}
@Override
public OrderOfferProcessor getOrderOfferProcessor() {
return orderOfferProcessor;
}
@Override
public void setOrderOfferProcessor(OrderOfferProcessor orderOfferProcessor) {
this.orderOfferProcessor = orderOfferProcessor;
}
@Override
public ItemOfferProcessor getItemOfferProcessor() {
return itemOfferProcessor;
}
@Override
public void setItemOfferProcessor(ItemOfferProcessor itemOfferProcessor) {
this.itemOfferProcessor = itemOfferProcessor;
}
@Override
public FulfillmentGroupOfferProcessor getFulfillmentGroupOfferProcessor() {
return fulfillmentGroupOfferProcessor;
}
@Override
public void setFulfillmentGroupOfferProcessor(FulfillmentGroupOfferProcessor fulfillmentGroupOfferProcessor) {
this.fulfillmentGroupOfferProcessor = fulfillmentGroupOfferProcessor;
}
@Override
public PromotableItemFactory getPromotableItemFactory() {
return promotableItemFactory;
}
@Override
public void setPromotableItemFactory(PromotableItemFactory promotableItemFactory) {
this.promotableItemFactory = promotableItemFactory;
}
@Override
public OfferCode findOfferCodeById(Long id) {
return offerCodeDao.readOfferCodeById(id);
}
@Override
public OrderService getOrderService() {
return orderService;
}
@Override
public void setOrderService(OrderService orderService) {
this.orderService = orderService;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_service_OfferServiceImpl.java
|
4,510 |
public class TransportNodesListShardStoreMetaData extends TransportNodesOperationAction<TransportNodesListShardStoreMetaData.Request, TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData, TransportNodesListShardStoreMetaData.NodeRequest, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> {
private final IndicesService indicesService;
private final NodeEnvironment nodeEnv;
@Inject
public TransportNodesListShardStoreMetaData(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, NodeEnvironment nodeEnv) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.indicesService = indicesService;
this.nodeEnv = nodeEnv;
}
public ActionFuture<NodesStoreFilesMetaData> list(ShardId shardId, boolean onlyUnallocated, String[] nodesIds, @Nullable TimeValue timeout) {
return execute(new Request(shardId, onlyUnallocated, nodesIds).timeout(timeout));
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return "/cluster/nodes/indices/shard/store";
}
@Override
protected Request newRequest() {
return new Request();
}
@Override
protected NodeRequest newNodeRequest() {
return new NodeRequest();
}
@Override
protected NodeRequest newNodeRequest(String nodeId, Request request) {
return new NodeRequest(nodeId, request);
}
@Override
protected NodeStoreFilesMetaData newNodeResponse() {
return new NodeStoreFilesMetaData();
}
@Override
protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) {
final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList();
final List<FailedNodeException> failures = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodeStoreFilesMetaData) { // will also filter out null response for unallocated ones
nodeStoreFilesMetaDatas.add((NodeStoreFilesMetaData) resp);
} else if (resp instanceof FailedNodeException) {
failures.add((FailedNodeException) resp);
}
}
return new NodesStoreFilesMetaData(clusterName, nodeStoreFilesMetaDatas.toArray(new NodeStoreFilesMetaData[nodeStoreFilesMetaDatas.size()]),
failures.toArray(new FailedNodeException[failures.size()]));
}
@Override
protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) throws ElasticsearchException {
if (request.unallocated) {
IndexService indexService = indicesService.indexService(request.shardId.index().name());
if (indexService == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
if (!indexService.hasShard(request.shardId.id())) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
}
IndexMetaData metaData = clusterService.state().metaData().index(request.shardId.index().name());
if (metaData == null) {
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
}
try {
return new NodeStoreFilesMetaData(clusterService.localNode(), listStoreMetaData(request.shardId));
} catch (IOException e) {
throw new ElasticsearchException("Failed to list store metadata for shard [" + request.shardId + "]", e);
}
}
private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) {
InternalIndexShard indexShard = (InternalIndexShard) indexService.shard(shardId.id());
if (indexShard != null) {
return new StoreFilesMetaData(true, shardId, indexShard.store().list());
}
}
// try and see if we an list unallocated
IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name());
if (metaData == null) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
String storeType = metaData.settings().get("index.store.type", "fs");
if (!storeType.contains("fs")) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
File[] shardLocations = nodeEnv.shardLocations(shardId);
File[] shardIndexLocations = new File[shardLocations.length];
for (int i = 0; i < shardLocations.length; i++) {
shardIndexLocations[i] = new File(shardLocations[i], "index");
}
boolean exists = false;
for (File shardIndexLocation : shardIndexLocations) {
if (shardIndexLocation.exists()) {
exists = true;
break;
}
}
if (!exists) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
Map<String, String> checksums = Store.readChecksums(shardIndexLocations);
if (checksums == null) {
checksums = ImmutableMap.of();
}
Map<String, StoreFileMetaData> files = Maps.newHashMap();
for (File shardIndexLocation : shardIndexLocations) {
File[] listedFiles = shardIndexLocation.listFiles();
if (listedFiles == null) {
continue;
}
for (File file : listedFiles) {
// BACKWARD CKS SUPPORT
if (file.getName().endsWith(".cks")) {
continue;
}
if (Store.isChecksum(file.getName())) {
continue;
}
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), checksums.get(file.getName())));
}
}
return new StoreFilesMetaData(false, shardId, files);
}
@Override
protected boolean accumulateExceptions() {
return true;
}
public static class StoreFilesMetaData implements Iterable<StoreFileMetaData>, Streamable {
private boolean allocated;
private ShardId shardId;
private Map<String, StoreFileMetaData> files;
StoreFilesMetaData() {
}
public StoreFilesMetaData(boolean allocated, ShardId shardId, Map<String, StoreFileMetaData> files) {
this.allocated = allocated;
this.shardId = shardId;
this.files = files;
}
public boolean allocated() {
return allocated;
}
public ShardId shardId() {
return this.shardId;
}
public long totalSizeInBytes() {
long totalSizeInBytes = 0;
for (StoreFileMetaData file : this) {
totalSizeInBytes += file.length();
}
return totalSizeInBytes;
}
@Override
public Iterator<StoreFileMetaData> iterator() {
return files.values().iterator();
}
public boolean fileExists(String name) {
return files.containsKey(name);
}
public StoreFileMetaData file(String name) {
return files.get(name);
}
public static StoreFilesMetaData readStoreFilesMetaData(StreamInput in) throws IOException {
StoreFilesMetaData md = new StoreFilesMetaData();
md.readFrom(in);
return md;
}
@Override
public void readFrom(StreamInput in) throws IOException {
allocated = in.readBoolean();
shardId = ShardId.readShardId(in);
int size = in.readVInt();
files = Maps.newHashMapWithExpectedSize(size);
for (int i = 0; i < size; i++) {
StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
files.put(md.name(), md);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(allocated);
shardId.writeTo(out);
out.writeVInt(files.size());
for (StoreFileMetaData md : files.values()) {
md.writeTo(out);
}
}
}
static class Request extends NodesOperationRequest<Request> {
private ShardId shardId;
private boolean unallocated;
public Request() {
}
public Request(ShardId shardId, boolean unallocated, Set<String> nodesIds) {
super(nodesIds.toArray(new String[nodesIds.size()]));
this.shardId = shardId;
this.unallocated = unallocated;
}
public Request(ShardId shardId, boolean unallocated, String... nodesIds) {
super(nodesIds);
this.shardId = shardId;
this.unallocated = unallocated;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
unallocated = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeBoolean(unallocated);
}
}
public static class NodesStoreFilesMetaData extends NodesOperationResponse<NodeStoreFilesMetaData> {
private FailedNodeException[] failures;
NodesStoreFilesMetaData() {
}
public NodesStoreFilesMetaData(ClusterName clusterName, NodeStoreFilesMetaData[] nodes, FailedNodeException[] failures) {
super(clusterName, nodes);
this.failures = failures;
}
public FailedNodeException[] failures() {
return failures;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
nodes = new NodeStoreFilesMetaData[in.readVInt()];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeStoreFilesMetaData.readListShardStoreNodeOperationResponse(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(nodes.length);
for (NodeStoreFilesMetaData response : nodes) {
response.writeTo(out);
}
}
}
static class NodeRequest extends NodeOperationRequest {
private ShardId shardId;
private boolean unallocated;
NodeRequest() {
}
NodeRequest(String nodeId, TransportNodesListShardStoreMetaData.Request request) {
super(request, nodeId);
this.shardId = request.shardId;
this.unallocated = request.unallocated;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
unallocated = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeBoolean(unallocated);
}
}
public static class NodeStoreFilesMetaData extends NodeOperationResponse {
private StoreFilesMetaData storeFilesMetaData;
NodeStoreFilesMetaData() {
}
public NodeStoreFilesMetaData(DiscoveryNode node, StoreFilesMetaData storeFilesMetaData) {
super(node);
this.storeFilesMetaData = storeFilesMetaData;
}
public StoreFilesMetaData storeFilesMetaData() {
return storeFilesMetaData;
}
public static NodeStoreFilesMetaData readListShardStoreNodeOperationResponse(StreamInput in) throws IOException {
NodeStoreFilesMetaData resp = new NodeStoreFilesMetaData();
resp.readFrom(in);
return resp;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.readBoolean()) {
storeFilesMetaData = StoreFilesMetaData.readStoreFilesMetaData(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (storeFilesMetaData == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
storeFilesMetaData.writeTo(out);
}
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_indices_store_TransportNodesListShardStoreMetaData.java
|
394 |
@SuppressWarnings({ "serial", "unchecked" })
public class ORecordLazyMap extends OTrackedMap<OIdentifiable> implements ORecordLazyMultiValue {
final private byte recordType;
private ORecordMultiValueHelper.MULTIVALUE_CONTENT_TYPE status = MULTIVALUE_CONTENT_TYPE.EMPTY;
protected boolean marshalling = false;
private boolean autoConvertToRecord = true;
public ORecordLazyMap(final ODocument iSourceRecord) {
super(iSourceRecord);
this.recordType = ODocument.RECORD_TYPE;
}
public ORecordLazyMap(final ODocument iSourceRecord, final byte iRecordType) {
super(iSourceRecord);
this.recordType = iRecordType;
if (iSourceRecord != null) {
if (!iSourceRecord.isLazyLoad())
// SET AS NON-LAZY LOAD THE COLLECTION TOO
autoConvertToRecord = false;
}
}
public ORecordLazyMap(final ODocument iSourceRecord, final Map<Object, OIdentifiable> iOrigin) {
this(iSourceRecord);
if (iOrigin != null && !iOrigin.isEmpty())
putAll(iOrigin);
}
@Override
public boolean containsValue(final Object o) {
return super.containsValue(o);
}
@Override
public OIdentifiable get(final Object iKey) {
if (iKey == null)
return null;
final String key = iKey.toString();
if (autoConvertToRecord)
convertLink2Record(key);
return super.get(key);
}
@Override
public OIdentifiable put(final Object iKey, OIdentifiable iValue) {
if (status == MULTIVALUE_CONTENT_TYPE.ALL_RIDS && iValue instanceof ORecord<?> && !iValue.getIdentity().isNew())
// IT'S BETTER TO LEAVE ALL RIDS AND EXTRACT ONLY THIS ONE
iValue = iValue.getIdentity();
else
status = ORecordMultiValueHelper.updateContentType(status, iValue);
return super.put(iKey, iValue);
}
@Override
public Collection<OIdentifiable> values() {
convertLinks2Records();
return super.values();
}
@Override
public OIdentifiable remove(Object o) {
final OIdentifiable result = super.remove(o);
if (size() == 0)
status = MULTIVALUE_CONTENT_TYPE.EMPTY;
return result;
}
@Override
public void clear() {
super.clear();
status = MULTIVALUE_CONTENT_TYPE.EMPTY;
}
@Override
public String toString() {
return ORecordMultiValueHelper.toString(this);
}
public boolean isAutoConvertToRecord() {
return autoConvertToRecord;
}
public void setAutoConvertToRecord(boolean convertToRecord) {
this.autoConvertToRecord = convertToRecord;
}
public void convertLinks2Records() {
if (status == MULTIVALUE_CONTENT_TYPE.ALL_RECORDS || !autoConvertToRecord)
// PRECONDITIONS
return;
for (Object k : keySet())
convertLink2Record(k);
status = MULTIVALUE_CONTENT_TYPE.ALL_RECORDS;
}
public boolean convertRecords2Links() {
if (status == MULTIVALUE_CONTENT_TYPE.ALL_RIDS)
// PRECONDITIONS
return true;
boolean allConverted = true;
for (Object k : keySet())
if (!convertRecord2Link(k))
allConverted = false;
if (allConverted)
status = MULTIVALUE_CONTENT_TYPE.ALL_RIDS;
return allConverted;
}
private boolean convertRecord2Link(final Object iKey) {
if (status == MULTIVALUE_CONTENT_TYPE.ALL_RIDS)
return true;
final Object value = super.get(iKey);
if (value != null)
if (value instanceof ORecord<?> && !((ORecord<?>) value).getIdentity().isNew()) {
if (((ORecord<?>) value).isDirty())
ODatabaseRecordThreadLocal.INSTANCE.get().save((ORecordInternal<?>) value);
marshalling = true;
try {
// OVERWRITE
super.put(iKey, ((ORecord<?>) value).getIdentity());
} finally {
marshalling = false;
}
// CONVERTED
return true;
} else if (value instanceof ORID)
// ALREADY CONVERTED
return true;
return false;
}
/**
* Convert the item with the received key to a record.
*
* @param iKey
* Key of the item to convert
*/
private void convertLink2Record(final Object iKey) {
if (status == MULTIVALUE_CONTENT_TYPE.ALL_RECORDS)
return;
final Object value;
if (iKey instanceof ORID)
value = iKey;
else
value = super.get(iKey);
if (value != null && value instanceof ORID) {
final ORID rid = (ORID) value;
marshalling = true;
try {
try {
// OVERWRITE IT
super.put(iKey, rid.getRecord());
} catch (ORecordNotFoundException e) {
// IGNORE THIS
}
} finally {
marshalling = false;
}
}
}
@Override
public OTrackedMap<OIdentifiable> setDirty() {
if (!marshalling)
return super.setDirty();
return this;
}
@Override
protected void fireCollectionChangedEvent(final OMultiValueChangeEvent<Object, OIdentifiable> event) {
if (!marshalling)
super.fireCollectionChangedEvent(event);
}
public byte getRecordType() {
return recordType;
}
public Iterator<OIdentifiable> rawIterator() {
return new OLazyRecordIterator(sourceRecord, super.values().iterator(), false);
}
public boolean detach() {
return convertRecords2Links();
}
@Override
public int size() {
return super.size();
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordLazyMap.java
|
771 |
public class IndexResponse extends ActionResponse {
private String index;
private String id;
private String type;
private long version;
private boolean created;
public IndexResponse() {
}
public IndexResponse(String index, String type, String id, long version, boolean created) {
this.index = index;
this.id = id;
this.type = type;
this.version = version;
this.created = created;
}
/**
* The index the document was indexed into.
*/
public String getIndex() {
return this.index;
}
/**
* The type of the document indexed.
*/
public String getType() {
return this.type;
}
/**
* The id of the document indexed.
*/
public String getId() {
return this.id;
}
/**
* Returns the current version of the doc indexed.
*/
public long getVersion() {
return this.version;
}
/**
* Returns true if the document was created, false if updated.
*/
public boolean isCreated() {
return this.created;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readSharedString();
type = in.readSharedString();
id = in.readString();
version = in.readLong();
created = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeSharedString(index);
out.writeSharedString(type);
out.writeString(id);
out.writeLong(version);
out.writeBoolean(created);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_index_IndexResponse.java
|
1,326 |
public class JDTModuleManager extends LazyModuleManager {
private JDTModelLoader modelLoader;
private IJavaProject javaProject;
private Set<String> sourceModules;
private TypeChecker typeChecker;
private boolean loadDependenciesFromModelLoaderFirst;
public Set<String> getSourceModules() {
return sourceModules;
}
public IJavaProject getJavaProject() {
return javaProject;
}
public JDTModuleManager(Context context, IJavaProject javaProject) {
super(context);
this.javaProject = javaProject;
if (javaProject == null) {
loadDependenciesFromModelLoaderFirst = false;
} else {
loadDependenciesFromModelLoaderFirst = CeylonBuilder.loadDependenciesFromModelLoaderFirst(javaProject.getProject());
}
sourceModules = new HashSet<String>();
if (! loadDependenciesFromModelLoaderFirst) {
sourceModules.add(Module.LANGUAGE_MODULE_NAME);
}
}
/*
* TODO : Remove when the package creation (and module binding) in ModuleManager will be done with a method
* that can be overriden (createPackage, as suggested here - a "" name parameter correspond to the empty package)
* Then we can only override this new createPackage method with our already-existing one
*/
@Override
public void initCoreModules() {
Modules modules = getContext().getModules();
if ( modules == null ) {
modules = new Modules();
//build default module (module in which packages belong to when not explicitly under a module
final List<String> defaultModuleName = Collections.singletonList(Module.DEFAULT_MODULE_NAME);
final JDTModule defaultModule = createModule(defaultModuleName, "unversioned");
defaultModule.setDefault(true);
defaultModule.setAvailable(true);
defaultModule.setProjectModule();
modules.getListOfModules().add(defaultModule);
modules.setDefaultModule(defaultModule);
//create language module and add it as a dependency of defaultModule
//since packages outside a module cannot declare dependencies
final List<String> languageName = Arrays.asList("ceylon", "language");
Module languageModule = createModule(languageName, TypeChecker.LANGUAGE_MODULE_VERSION);
languageModule.setLanguageModule(languageModule);
languageModule.setAvailable(false); //not available yet
modules.setLanguageModule(languageModule);
modules.getListOfModules().add(languageModule);
defaultModule.addImport(new ModuleImport(languageModule, false, false));
defaultModule.setLanguageModule(languageModule);
getContext().setModules(modules);
//build empty package
createPackage("", defaultModule);
}
super.initCoreModules();
}
@Override
protected Package createPackage(String pkgName, Module module) {
return getModelLoader().findOrCreatePackage(module, pkgName);
}
@Override
public synchronized JDTModelLoader getModelLoader() {
if(modelLoader == null){
Modules modules = getContext().getModules();
modelLoader = new JDTModelLoader(this, modules);
}
return modelLoader;
}
public boolean isExternalModuleLoadedFromSource(String moduleName){
return sourceModules.contains(moduleName);
}
/**
* Return true if this module should be loaded from source we are compiling
* and not from its compiled artifact at all. Returns false by default, so
* modules will be loaded from their compiled artifact.
*/
@Override
protected boolean isModuleLoadedFromSource(String moduleName){
if (isExternalModuleLoadedFromSource(moduleName)) {
return true;
}
if (isModuleLoadedFromCompiledSource(moduleName)) {
return true;
}
return false;
}
public boolean isModuleLoadedFromCompiledSource(String moduleName) {
if (javaProject == null) {
return false;
}
if (moduleFileInProject(moduleName, javaProject)) {
return true;
}
if (!loadDependenciesFromModelLoaderFirst) {
try {
IProject project = javaProject.getProject();
for (IProject p: project.getReferencedProjects()) {
if (p.isAccessible() &&
moduleFileInProject(moduleName, JavaCore.create(p))) {
return true;
}
}
}
catch (CoreException e) {
e.printStackTrace();
}
}
return false;
}
private static boolean moduleFileInProject(String moduleName, IJavaProject p) {
if (p == null) {
return false;
}
try {
for (IPackageFragmentRoot sourceFolder: p.getPackageFragmentRoots()) {
if (!sourceFolder.isArchive() &&
sourceFolder.exists() &&
sourceFolder.getKind()==IPackageFragmentRoot.K_SOURCE &&
sourceFolder.getPackageFragment(moduleName).exists()) {
return true;
}
/*IPath moduleFile = sourceFolder.append(moduleName.replace('.', '/') +
"/module.ceylon").makeRelativeTo(p.getFullPath());
if (p.getFile(moduleFile).exists()) {
return true;
}*/
}
}
catch (JavaModelException e) {
e.printStackTrace();
}
return false;
}
@Override
protected JDTModule createModule(List<String> moduleName, String version) {
JDTModule module = null;
String moduleNameString = Util.getName(moduleName);
List<IPackageFragmentRoot> roots = new ArrayList<IPackageFragmentRoot>();
if (javaProject != null) {
try {
if(moduleNameString.equals(Module.DEFAULT_MODULE_NAME)){
// Add the list of source package fragment roots
for (IPackageFragmentRoot root : javaProject.getPackageFragmentRoots()) {
if (root.exists()
&& javaProject.isOnClasspath(root)) {
IClasspathEntry entry = root.getResolvedClasspathEntry();
if (entry.getEntryKind() == IClasspathEntry.CPE_SOURCE && !root.isExternal()) {
roots.add(root);
}
}
}
} else {
for (IPackageFragmentRoot root : javaProject.getPackageFragmentRoots()) {
if (root.exists()
&& javaProject.isOnClasspath(root)) {
if(JDKUtils.isJDKModule(moduleNameString)){
// find the first package that exists in this root
for(String pkg : JDKUtils.getJDKPackagesByModule(moduleNameString)){
if (root.getPackageFragment(pkg).exists()) {
roots.add(root);
break;
}
}
}else if(JDKUtils.isOracleJDKModule(moduleNameString)){
// find the first package that exists in this root
for(String pkg : JDKUtils.getOracleJDKPackagesByModule(moduleNameString)){
if (root.getPackageFragment(pkg).exists()) {
roots.add(root);
break;
}
}
}else if (! (root instanceof JarPackageFragmentRoot)
&& ! CeylonBuilder.isInCeylonClassesOutputFolder(root.getPath())) {
String packageToSearch = moduleNameString;
if (root.getPackageFragment(packageToSearch).exists()) {
roots.add(root);
}
}
}
}
}
} catch (JavaModelException e) {
e.printStackTrace();
}
}
module = new JDTModule(this, roots);
module.setName(moduleName);
module.setVersion(version);
setupIfJDKModule(module);
return module;
}
@Override
public void resolveModule(ArtifactResult artifact, Module module, ModuleImport moduleImport,
LinkedList<Module> dependencyTree, List<PhasedUnits> phasedUnitsOfDependencies, boolean forCompiledModule) {
File artifactFile = artifact.artifact();
if (isModuleLoadedFromSource(module.getNameAsString()) && artifactFile.getName().endsWith(ArtifactContext.CAR)) {
ArtifactContext artifactContext = new ArtifactContext(module.getNameAsString(), module.getVersion(), ArtifactContext.SRC);
RepositoryManager repositoryManager = getContext().getRepositoryManager();
Exception exceptionOnGetArtifact = null;
ArtifactResult sourceArtifact = null;
try {
sourceArtifact = repositoryManager.getArtifactResult(artifactContext);
} catch (Exception e) {
exceptionOnGetArtifact = e;
}
if ( sourceArtifact == null ) {
ModuleHelper.buildErrorOnMissingArtifact(artifactContext, module, moduleImport, dependencyTree, exceptionOnGetArtifact, this);
} else {
artifact = sourceArtifact;
}
}
if (module instanceof JDTModule) {
((JDTModule) module).setArtifact(artifact);
}
if (! isModuleLoadedFromCompiledSource(module.getNameAsString())) {
File file = artifact.artifact();
if (artifact.artifact().getName().endsWith(".src")) {
sourceModules.add(module.getNameAsString());
file = new File(file.getAbsolutePath().replaceAll("\\.src$", ".car"));
}
}
try {
super.resolveModule(artifact, module, moduleImport, dependencyTree, phasedUnitsOfDependencies, forCompiledModule);
} catch(Exception e) {
if (module instanceof JDTModule) {
CeylonPlugin.getInstance().getLog().log(new Status(IStatus.ERROR, CeylonPlugin.PLUGIN_ID, "Failed resolving module " + module.getSignature(), e));
((JDTModule) module).setResolutionException(e);
}
}
}
@Override
public void prepareForTypeChecking() {
getModelLoader().loadStandardModules();
}
@Override
public Iterable<String> getSearchedArtifactExtensions() {
if (loadDependenciesFromModelLoaderFirst) {
return Arrays.asList("car", "jar", "src");
}
else {
return Arrays.asList("jar", "src", "car");
}
}
public void visitModuleFile() {
Package currentPkg = getCurrentPackage();
sourceModules.add(currentPkg.getNameAsString());
super.visitModuleFile();
}
// Todo : to push into the base ModelManager class
public void addTopLevelModuleError() {
addErrorToModule(new ArrayList<String>(),
"A module cannot be defined at the top level of the hierarchy");
}
public class ExternalModulePhasedUnits extends PhasedUnits {
private IProject referencedProject = null;
// private VirtualFile sourceDirectory = null;
public ExternalModulePhasedUnits(Context context,
ModuleManagerFactory moduleManagerFactory) {
super(context, moduleManagerFactory);
}
@Override
protected void parseFile(final VirtualFile file, final VirtualFile srcDir)
throws Exception {
if (file.getName().endsWith(".ceylon")) {
parseFile(file, srcDir, getModuleManager().getCurrentPackage());
}
}
/*
* This additional method is when we have to parse a new file, into a specific package of an existing archive
*/
public void parseFile(final VirtualFile file, final VirtualFile srcDir, final Package pkg) {
PhasedUnit phasedUnit = new CeylonSourceParser<PhasedUnit>() {
@Override
protected String getCharset() {
try {
//TODO: is this correct? does this file actually
// live in the project, or is it external?
// should VirtualFile have a getCharset()?
return javaProject != null ?
javaProject.getProject().getDefaultCharset()
: ResourcesPlugin.getWorkspace().getRoot().getDefaultCharset();
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
protected PhasedUnit createPhasedUnit(CompilationUnit cu, Package pkg, CommonTokenStream tokenStream) {
if (referencedProject == null) {
return new ExternalPhasedUnit(file, srcDir, cu,
pkg, getModuleManager(),
getTypeChecker(), tokenStream.getTokens());
}
else {
return new CrossProjectPhasedUnit(file, srcDir, cu,
pkg, getModuleManager(),
getTypeChecker(), tokenStream.getTokens(), referencedProject);
}
}
}.parseFileToPhasedUnit(getModuleManager(), getTypeChecker(), file, srcDir, pkg);
addPhasedUnit(file, phasedUnit);
}
@Override
public void parseUnit(VirtualFile srcDir) {
// sourceDirectory = srcDir;
if (srcDir instanceof ZipFileVirtualFile && javaProject != null) {
ZipFileVirtualFile zipFileVirtualFile = (ZipFileVirtualFile) srcDir;
String archiveName = zipFileVirtualFile.getPath();
try {
for (IProject refProject : javaProject.getProject().getReferencedProjects()) {
if (archiveName.contains(CeylonBuilder.getCeylonModulesOutputDirectory(refProject).getAbsolutePath())) {
referencedProject = refProject;
break;
}
}
} catch (CoreException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
super.parseUnit(srcDir);
}
}
@Override
protected PhasedUnits createPhasedUnits() {
ModuleManagerFactory moduleManagerFactory = new ModuleManagerFactory() {
@Override
public ModuleManager createModuleManager(Context context) {
return JDTModuleManager.this;
}
};
return new ExternalModulePhasedUnits(getContext(), moduleManagerFactory);
}
public TypeChecker getTypeChecker() {
return typeChecker;
}
public void setTypeChecker(TypeChecker typeChecker) {
this.typeChecker = typeChecker;
}
public boolean isLoadDependenciesFromModelLoaderFirst() {
return loadDependenciesFromModelLoaderFirst;
}
public JDTModule getArchiveModuleFromSourcePath(String sourceUnitPath) {
for (Module m : typeChecker.getContext().getModules().getListOfModules()) {
if (m instanceof JDTModule) {
JDTModule module = (JDTModule) m;
if (module.isCeylonArchive()) {
if (sourceUnitPath.startsWith(module.getSourceArchivePath() + "!")) {
return module;
}
}
}
}
return null;
}
public JDTModule getArchiveModuleFromSourcePath(IPath sourceUnitPath) {
return getArchiveModuleFromSourcePath(sourceUnitPath.toOSString());
}
@Override
protected void addToPhasedUnitsOfDependencies(
PhasedUnits modulePhasedUnits,
List<PhasedUnits> phasedUnitsOfDependencies, Module module) {
super.addToPhasedUnitsOfDependencies(modulePhasedUnits,
phasedUnitsOfDependencies, module);
if (module instanceof JDTModule) {
((JDTModule) module).setSourcePhasedUnits((ExternalModulePhasedUnits) modulePhasedUnits);
}
}
@Override
public void visitedModule(Module module, boolean forCompiledModule) {
if(forCompiledModule && AbstractModelLoader.isJDKModule(module.getNameAsString()))
modelLoader.addJDKModuleToClassPath(module);
}
}
| 1no label
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_core_model_JDTModuleManager.java
|
277 |
@SuppressWarnings("serial")
public abstract class OCommandRequestTextAbstract extends OCommandRequestAbstract implements OCommandRequestText {
protected String text;
protected OCommandRequestTextAbstract() {
}
protected OCommandRequestTextAbstract(final String iText) {
if (iText == null)
throw new IllegalArgumentException("Text cannot be null");
text = iText.trim();
}
/**
* Delegates the execution to the configured command executor.
*/
@SuppressWarnings("unchecked")
public <RET> RET execute(final Object... iArgs) {
setParameters(iArgs);
return (RET) ODatabaseRecordThreadLocal.INSTANCE.get().getStorage().command(this);
}
public String getText() {
return text;
}
public OCommandRequestText setText(final String iText) {
this.text = iText;
return this;
}
public OSerializableStream fromStream(byte[] iStream) throws OSerializationException {
final OMemoryStream buffer = new OMemoryStream(iStream);
fromStream(buffer);
return this;
}
public byte[] toStream() throws OSerializationException {
final OMemoryStream buffer = new OMemoryStream();
return toStream(buffer);
}
@Override
public String toString() {
return "?." + text;
}
protected byte[] toStream(final OMemoryStream buffer) {
buffer.set(text);
if (parameters == null || parameters.size() == 0) {
// simple params are absent
buffer.set(false);
// composite keys are absent
buffer.set(false);
} else {
final Map<Object, Object> params = new HashMap<Object, Object>();
final Map<Object, byte[]> compositeKeyParams = new HashMap<Object, byte[]>();
for (final Entry<Object, Object> paramEntry : parameters.entrySet())
if (paramEntry.getValue() instanceof OCompositeKey) {
final OCompositeKey compositeKey = (OCompositeKey) paramEntry.getValue();
final int bufferSize = OCompositeKeySerializer.INSTANCE.getObjectSize(compositeKey);
final byte[] stream = new byte[bufferSize];
OCompositeKeySerializer.INSTANCE.serialize(compositeKey, stream, 0);
compositeKeyParams.put(paramEntry.getKey(), stream);
} else if (paramEntry.getValue() instanceof String) {
final StringBuilder builder = new StringBuilder();
ORecordSerializerStringAbstract.simpleValueToStream(builder, OType.STRING, paramEntry.getValue());
params.put(paramEntry.getKey(), builder.toString());
} else
params.put(paramEntry.getKey(), paramEntry.getValue());
buffer.set(!params.isEmpty());
if (!params.isEmpty()) {
final ODocument param = new ODocument();
param.field("params", params);
buffer.set(param.toStream());
}
buffer.set(!compositeKeyParams.isEmpty());
if (!compositeKeyParams.isEmpty()) {
final ODocument compositeKey = new ODocument();
compositeKey.field("compositeKeyParams", compositeKeyParams);
buffer.set(compositeKey.toStream());
}
}
return buffer.toByteArray();
}
protected void fromStream(final OMemoryStream buffer) {
text = buffer.getAsString();
parameters = null;
final boolean simpleParams = buffer.getAsBoolean();
if (simpleParams) {
final byte[] paramBuffer = buffer.getAsByteArray();
final ODocument param = new ODocument();
param.fromStream(paramBuffer);
Map<String, Object> params = param.field("params");
parameters = new HashMap<Object, Object>();
for (Entry<String, Object> p : params.entrySet()) {
final Object value;
if (p.getValue() instanceof String)
value = ORecordSerializerStringAbstract.getTypeValue((String) p.getValue());
else
value = p.getValue();
if (Character.isDigit(p.getKey().charAt(0)))
parameters.put(Integer.parseInt(p.getKey()), value);
else
parameters.put(p.getKey(), value);
}
}
final boolean compositeKeyParamsPresent = buffer.getAsBoolean();
if (compositeKeyParamsPresent) {
final byte[] paramBuffer = buffer.getAsByteArray();
final ODocument param = new ODocument();
param.fromStream(paramBuffer);
final Map<String, Object> compositeKeyParams = param.field("compositeKeyParams");
if (parameters == null)
parameters = new HashMap<Object, Object>();
for (final Entry<String, Object> p : compositeKeyParams.entrySet()) {
final Object value = OCompositeKeySerializer.INSTANCE
.deserialize(OStringSerializerHelper.getBinaryContent(p.getValue()), 0);
if (Character.isDigit(p.getKey().charAt(0)))
parameters.put(Integer.parseInt(p.getKey()), value);
else
parameters.put(p.getKey(), value);
}
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandRequestTextAbstract.java
|
1,338 |
public abstract class IndicesClusterStateUpdateRequest<T extends IndicesClusterStateUpdateRequest<T>> extends ClusterStateUpdateRequest<T> {
private String[] indices;
/**
* Returns the indices the operation needs to be executed on
*/
public String[] indices() {
return indices;
}
/**
* Sets the indices the operation needs to be executed on
*/
@SuppressWarnings("unchecked")
public T indices(String[] indices) {
this.indices = indices;
return (T)this;
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_ack_IndicesClusterStateUpdateRequest.java
|
405 |
snapshotsService.addListener(new SnapshotsService.SnapshotCompletionListener() {
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
@Override
public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
if (this.snapshotId.equals(snapshotId)) {
listener.onResponse(new CreateSnapshotResponse(snapshot));
snapshotsService.removeListener(this);
}
}
@Override
public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
if (this.snapshotId.equals(snapshotId)) {
listener.onFailure(t);
snapshotsService.removeListener(this);
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_create_TransportCreateSnapshotAction.java
|
160 |
public class BackendTransaction implements LoggableTransaction {
private static final Logger log =
LoggerFactory.getLogger(BackendTransaction.class);
public static final int MIN_TASKS_TO_PARALLELIZE = 2;
//Assumes 64 bit key length as specified in IDManager
public static final StaticBuffer EDGESTORE_MIN_KEY = BufferUtil.zeroBuffer(8);
public static final StaticBuffer EDGESTORE_MAX_KEY = BufferUtil.oneBuffer(8);
private final CacheTransaction storeTx;
private final BaseTransactionConfig txConfig;
private final StoreFeatures storeFeatures;
private final KCVSCache edgeStore;
private final KCVSCache indexStore;
private final KCVSCache txLogStore;
private final Duration maxReadTime;
private final Executor threadPool;
private final Map<String, IndexTransaction> indexTx;
private boolean acquiredLock = false;
private boolean cacheEnabled = true;
public BackendTransaction(CacheTransaction storeTx, BaseTransactionConfig txConfig,
StoreFeatures features, KCVSCache edgeStore, KCVSCache indexStore,
KCVSCache txLogStore, Duration maxReadTime,
Map<String, IndexTransaction> indexTx, Executor threadPool) {
this.storeTx = storeTx;
this.txConfig = txConfig;
this.storeFeatures = features;
this.edgeStore = edgeStore;
this.indexStore = indexStore;
this.txLogStore = txLogStore;
this.maxReadTime = maxReadTime;
this.indexTx = indexTx;
this.threadPool = threadPool;
}
public boolean hasAcquiredLock() {
return acquiredLock;
}
public StoreTransaction getStoreTransaction() {
return storeTx;
}
public ExternalCachePersistor getTxLogPersistor() {
return new ExternalCachePersistor(txLogStore,storeTx);
}
public BaseTransactionConfig getBaseTransactionConfig() {
return txConfig;
}
public IndexTransaction getIndexTransaction(String index) {
Preconditions.checkArgument(StringUtils.isNotBlank(index));
IndexTransaction itx = indexTx.get(index);
Preconditions.checkNotNull(itx, "Unknown index: " + index);
return itx;
}
public void disableCache() {
this.cacheEnabled = false;
}
public void enableCache() {
this.cacheEnabled = true;
}
public void commitStorage() throws BackendException {
storeTx.commit();
}
public Map<String,Throwable> commitIndexes() {
Map<String,Throwable> exceptions = new HashMap<String, Throwable>(indexTx.size());
for (Map.Entry<String,IndexTransaction> txentry : indexTx.entrySet()) {
try {
txentry.getValue().commit();
} catch (Throwable e) {
exceptions.put(txentry.getKey(),e);
}
}
return exceptions;
}
@Override
public void commit() throws BackendException {
storeTx.commit();
for (IndexTransaction itx : indexTx.values()) itx.commit();
}
/**
* Rolls back all transactions and makes sure that this does not get cut short
* by exceptions. If exceptions occur, the storage exception takes priority on re-throw.
* @throws BackendException
*/
@Override
public void rollback() throws BackendException {
Throwable excep = null;
for (IndexTransaction itx : indexTx.values()) {
try {
itx.rollback();
} catch (Throwable e) {
excep = e;
}
}
storeTx.rollback();
if (excep!=null) { //throw any encountered index transaction rollback exceptions
if (excep instanceof BackendException) throw (BackendException)excep;
else throw new PermanentBackendException("Unexpected exception",excep);
}
}
@Override
public void logMutations(DataOutput out) {
//Write
storeTx.logMutations(out);
for (Map.Entry<String, IndexTransaction> itx : indexTx.entrySet()) {
out.writeObjectNotNull(itx.getKey());
itx.getValue().logMutations(out);
}
}
/* ###################################################
Convenience Write Methods
*/
/**
* Applies the specified insertion and deletion mutations on the edge store to the provided key.
* Both, the list of additions or deletions, may be empty or NULL if there is nothing to be added and/or deleted.
*
* @param key Key
* @param additions List of entries (column + value) to be added
* @param deletions List of columns to be removed
*/
public void mutateEdges(StaticBuffer key, List<Entry> additions, List<Entry> deletions) throws BackendException {
edgeStore.mutateEntries(key, additions, deletions, storeTx);
}
/**
* Applies the specified insertion and deletion mutations on the property index to the provided key.
* Both, the list of additions or deletions, may be empty or NULL if there is nothing to be added and/or deleted.
*
* @param key Key
* @param additions List of entries (column + value) to be added
* @param deletions List of columns to be removed
*/
public void mutateIndex(StaticBuffer key, List<Entry> additions, List<Entry> deletions) throws BackendException {
indexStore.mutateEntries(key, additions, deletions, storeTx);
}
/**
* Acquires a lock for the key-column pair on the edge store which ensures that nobody else can take a lock on that
* respective entry for the duration of this lock (but somebody could potentially still overwrite
* the key-value entry without taking a lock).
* The expectedValue defines the value expected to match the value at the time the lock is acquired (or null if it is expected
* that the key-column pair does not exist).
* <p/>
* If this method is called multiple times with the same key-column pair in the same transaction, all but the first invocation are ignored.
* <p/>
* The lock has to be released when the transaction closes (commits or aborts).
*
* @param key Key on which to lock
* @param column Column the column on which to lock
*/
public void acquireEdgeLock(StaticBuffer key, StaticBuffer column) throws BackendException {
acquiredLock = true;
edgeStore.acquireLock(key, column, null, storeTx);
}
public void acquireEdgeLock(StaticBuffer key, Entry entry) throws BackendException {
acquiredLock = true;
edgeStore.acquireLock(key, entry.getColumnAs(StaticBuffer.STATIC_FACTORY), entry.getValueAs(StaticBuffer.STATIC_FACTORY), storeTx);
}
/**
* Acquires a lock for the key-column pair on the property index which ensures that nobody else can take a lock on that
* respective entry for the duration of this lock (but somebody could potentially still overwrite
* the key-value entry without taking a lock).
* The expectedValue defines the value expected to match the value at the time the lock is acquired (or null if it is expected
* that the key-column pair does not exist).
* <p/>
* If this method is called multiple times with the same key-column pair in the same transaction, all but the first invocation are ignored.
* <p/>
* The lock has to be released when the transaction closes (commits or aborts).
*
* @param key Key on which to lock
* @param column Column the column on which to lock
* @param expectedValue The expected value for the specified key-column pair on which to lock. Null if it is expected that the pair does not exist
*/
public void acquireIndexLock(StaticBuffer key, StaticBuffer column) throws BackendException {
acquiredLock = true;
indexStore.acquireLock(key, column, null, storeTx);
}
public void acquireIndexLock(StaticBuffer key, Entry entry) throws BackendException {
acquiredLock = true;
indexStore.acquireLock(key, entry.getColumnAs(StaticBuffer.STATIC_FACTORY), entry.getValueAs(StaticBuffer.STATIC_FACTORY), storeTx);
}
/* ###################################################
Convenience Read Methods
*/
public EntryList edgeStoreQuery(final KeySliceQuery query) {
return executeRead(new Callable<EntryList>() {
@Override
public EntryList call() throws Exception {
return cacheEnabled?edgeStore.getSlice(query, storeTx):
edgeStore.getSliceNoCache(query,storeTx);
}
@Override
public String toString() {
return "EdgeStoreQuery";
}
});
}
public Map<StaticBuffer,EntryList> edgeStoreMultiQuery(final List<StaticBuffer> keys, final SliceQuery query) {
if (storeFeatures.hasMultiQuery()) {
return executeRead(new Callable<Map<StaticBuffer,EntryList>>() {
@Override
public Map<StaticBuffer,EntryList> call() throws Exception {
return cacheEnabled?edgeStore.getSlice(keys, query, storeTx):
edgeStore.getSliceNoCache(keys, query, storeTx);
}
@Override
public String toString() {
return "MultiEdgeStoreQuery";
}
});
} else {
final Map<StaticBuffer,EntryList> results = new HashMap<StaticBuffer,EntryList>(keys.size());
if (threadPool == null || keys.size() < MIN_TASKS_TO_PARALLELIZE) {
for (StaticBuffer key : keys) {
results.put(key,edgeStoreQuery(new KeySliceQuery(key, query)));
}
} else {
final CountDownLatch doneSignal = new CountDownLatch(keys.size());
final AtomicInteger failureCount = new AtomicInteger(0);
EntryList[] resultArray = new EntryList[keys.size()];
for (int i = 0; i < keys.size(); i++) {
threadPool.execute(new SliceQueryRunner(new KeySliceQuery(keys.get(i), query),
doneSignal, failureCount, resultArray, i));
}
try {
doneSignal.await();
} catch (InterruptedException e) {
throw new TitanException("Interrupted while waiting for multi-query to complete", e);
}
if (failureCount.get() > 0) {
throw new TitanException("Could not successfully complete multi-query. " + failureCount.get() + " individual queries failed.");
}
for (int i=0;i<keys.size();i++) {
assert resultArray[i]!=null;
results.put(keys.get(i),resultArray[i]);
}
}
return results;
}
}
private class SliceQueryRunner implements Runnable {
final KeySliceQuery kq;
final CountDownLatch doneSignal;
final AtomicInteger failureCount;
final Object[] resultArray;
final int resultPosition;
private SliceQueryRunner(KeySliceQuery kq, CountDownLatch doneSignal, AtomicInteger failureCount,
Object[] resultArray, int resultPosition) {
this.kq = kq;
this.doneSignal = doneSignal;
this.failureCount = failureCount;
this.resultArray = resultArray;
this.resultPosition = resultPosition;
}
@Override
public void run() {
try {
List<Entry> result;
result = edgeStoreQuery(kq);
resultArray[resultPosition] = result;
} catch (Exception e) {
failureCount.incrementAndGet();
log.warn("Individual query in multi-transaction failed: ", e);
} finally {
doneSignal.countDown();
}
}
}
public KeyIterator edgeStoreKeys(final SliceQuery sliceQuery) {
if (!storeFeatures.hasScan())
throw new UnsupportedOperationException("The configured storage backend does not support global graph operations - use Faunus instead");
return executeRead(new Callable<KeyIterator>() {
@Override
public KeyIterator call() throws Exception {
return (storeFeatures.isKeyOrdered())
? edgeStore.getKeys(new KeyRangeQuery(EDGESTORE_MIN_KEY, EDGESTORE_MAX_KEY, sliceQuery), storeTx)
: edgeStore.getKeys(sliceQuery, storeTx);
}
@Override
public String toString() {
return "EdgeStoreKeys";
}
});
}
public KeyIterator edgeStoreKeys(final KeyRangeQuery range) {
Preconditions.checkArgument(storeFeatures.hasOrderedScan(), "The configured storage backend does not support ordered scans");
return executeRead(new Callable<KeyIterator>() {
@Override
public KeyIterator call() throws Exception {
return edgeStore.getKeys(range, storeTx);
}
@Override
public String toString() {
return "EdgeStoreKeys";
}
});
}
public EntryList indexQuery(final KeySliceQuery query) {
return executeRead(new Callable<EntryList>() {
@Override
public EntryList call() throws Exception {
return cacheEnabled?indexStore.getSlice(query, storeTx):
indexStore.getSliceNoCache(query, storeTx);
}
@Override
public String toString() {
return "VertexIndexQuery";
}
});
}
public List<String> indexQuery(final String index, final IndexQuery query) {
final IndexTransaction indexTx = getIndexTransaction(index);
return executeRead(new Callable<List<String>>() {
@Override
public List<String> call() throws Exception {
return indexTx.query(query);
}
@Override
public String toString() {
return "IndexQuery";
}
});
}
public Iterable<RawQuery.Result<String>> rawQuery(final String index, final RawQuery query) {
final IndexTransaction indexTx = getIndexTransaction(index);
return executeRead(new Callable<Iterable<RawQuery.Result<String>>>() {
@Override
public Iterable<RawQuery.Result<String>> call() throws Exception {
return indexTx.query(query);
}
@Override
public String toString() {
return "RawQuery";
}
});
}
private final <V> V executeRead(Callable<V> exe) throws TitanException {
return BackendOperation.execute(exe, maxReadTime);
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_BackendTransaction.java
|
968 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_BUNDLE_ORDER_ITEM")
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationClass(friendlyName = "BundleOrderItemImpl_bundleOrderItem")
public class BundleOrderItemImpl extends OrderItemImpl implements BundleOrderItem {
private static final long serialVersionUID = 1L;
@OneToMany(mappedBy = "bundleOrderItem", targetEntity = DiscreteOrderItemImpl.class, cascade = {CascadeType.ALL})
@Cache(usage=CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blOrderElements")
@AdminPresentationCollection(friendlyName="BundleOrderItemImpl_Discrete_Order_Items",
tab=OrderItemImpl.Presentation.Tab.Name.Advanced,
tabOrder = OrderItemImpl.Presentation.Tab.Order.Advanced)
protected List<DiscreteOrderItem> discreteOrderItems = new ArrayList<DiscreteOrderItem>();
@OneToMany(mappedBy = "bundleOrderItem", targetEntity = BundleOrderItemFeePriceImpl.class, cascade = { CascadeType.ALL }, orphanRemoval = true)
@Cache(usage = CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region = "blOrderElements")
@AdminPresentationCollection(friendlyName="BundleOrderItemImpl_Item_Fee_Prices",
tab=OrderItemImpl.Presentation.Tab.Name.Advanced,
tabOrder = OrderItemImpl.Presentation.Tab.Order.Advanced)
protected List<BundleOrderItemFeePrice> bundleOrderItemFeePrices = new ArrayList<BundleOrderItemFeePrice>();
@Column(name="BASE_RETAIL_PRICE", precision=19, scale=5)
@AdminPresentation(friendlyName = "BundleOrderItemImpl_Base_Retail_Price", order=2, group = "BundleOrderItemImpl_Pricing", fieldType= SupportedFieldType.MONEY)
protected BigDecimal baseRetailPrice;
@Column(name="BASE_SALE_PRICE", precision=19, scale=5)
@AdminPresentation(friendlyName = "BundleOrderItemImpl_Base_Sale_Price", order=2, group = "BundleOrderItemImpl_Pricing", fieldType= SupportedFieldType.MONEY)
protected BigDecimal baseSalePrice;
@ManyToOne(targetEntity = SkuImpl.class)
@JoinColumn(name = "SKU_ID")
@NotFound(action = NotFoundAction.IGNORE)
@AdminPresentation(friendlyName = "BundleOrderItemImpl_Sku", order=Presentation.FieldOrder.SKU,
group = OrderItemImpl.Presentation.Group.Name.Catalog,
groupOrder = OrderItemImpl.Presentation.Group.Order.Catalog)
@AdminPresentationToOneLookup()
protected Sku sku;
@ManyToOne(targetEntity = ProductBundleImpl.class)
@JoinColumn(name = "PRODUCT_BUNDLE_ID")
@AdminPresentation(friendlyName = "BundleOrderItemImpl_Product", order=Presentation.FieldOrder.PRODUCT,
group = OrderItemImpl.Presentation.Group.Name.Catalog,
groupOrder = OrderItemImpl.Presentation.Group.Order.Catalog)
@AdminPresentationToOneLookup()
protected ProductBundle productBundle;
@Override
public Sku getSku() {
return sku;
}
@Override
public void setSku(Sku sku) {
this.sku = sku;
if (sku != null) {
if (sku.getRetailPrice() != null) {
this.baseRetailPrice = sku.getRetailPrice().getAmount();
}
if (sku.getSalePrice() != null) {
this.baseSalePrice = sku.getSalePrice().getAmount();
}
this.itemTaxable = sku.isTaxable();
setName(sku.getName());
}
}
@Override
public ProductBundle getProductBundle() {
return productBundle;
}
@Override
public void setProductBundle(ProductBundle productBundle) {
this.productBundle = productBundle;
}
@Override
public List<? extends OrderItem> getOrderItems() {
return discreteOrderItems;
}
@Override
public boolean getAllowDiscountsOnChildItems() {
if (shouldSumItems()) {
if (productBundle != null) {
return productBundle.getItemsPromotable();
} else {
return true;
}
} else {
return false;
}
}
@Override
public boolean isPricingAtContainerLevel() {
return !shouldSumItems();
}
@Override
public boolean isDiscountingAllowed() {
if (shouldSumItems()) {
return false;
} else {
return productBundle.getBundlePromotable();
}
}
@Override
public List<DiscreteOrderItem> getDiscreteOrderItems() {
return discreteOrderItems;
}
@Override
public void setDiscreteOrderItems(List<DiscreteOrderItem> discreteOrderItems) {
this.discreteOrderItems = discreteOrderItems;
}
@Override
public List<BundleOrderItemFeePrice> getBundleOrderItemFeePrices() {
return bundleOrderItemFeePrices;
}
@Override
public void setBundleOrderItemFeePrices(List<BundleOrderItemFeePrice> bundleOrderItemFeePrices) {
this.bundleOrderItemFeePrices = bundleOrderItemFeePrices;
}
@Override
public Money getTaxablePrice() {
if (shouldSumItems()) {
Money currentBundleTaxablePrice = BroadleafCurrencyUtils.getMoney(getOrder().getCurrency());
for (DiscreteOrderItem discreteOrderItem : discreteOrderItems) {
BigDecimal currentItemTaxablePrice = discreteOrderItem.getTaxablePrice().getAmount();
BigDecimal priceWithQuantity = currentItemTaxablePrice.multiply(new BigDecimal(discreteOrderItem.getQuantity()));
currentBundleTaxablePrice = currentBundleTaxablePrice.add(BroadleafCurrencyUtils.getMoney(priceWithQuantity, getOrder().getCurrency()));
}
for (BundleOrderItemFeePrice fee : getBundleOrderItemFeePrices()) {
if (fee.isTaxable()) {
currentBundleTaxablePrice = currentBundleTaxablePrice.add(fee.getAmount());
}
}
return currentBundleTaxablePrice;
} else {
return super.getTaxablePrice();
}
}
@Override
public Boolean isTaxable() {
return (sku == null || sku.isTaxable() == null || sku.isTaxable());
}
@Override
public boolean shouldSumItems() {
if (productBundle != null) {
return ProductBundlePricingModelType.ITEM_SUM.equals(productBundle.getPricingModel());
}
return true;
}
@Override
public Money getRetailPrice() {
if (shouldSumItems()) {
Money bundleRetailPrice = BroadleafCurrencyUtils.getMoney(getOrder().getCurrency());
for (DiscreteOrderItem discreteOrderItem : discreteOrderItems) {
BigDecimal itemRetailPrice = discreteOrderItem.getRetailPrice().getAmount();
BigDecimal quantityPrice = itemRetailPrice.multiply(new BigDecimal(discreteOrderItem.getQuantity()));
bundleRetailPrice = bundleRetailPrice.add(BroadleafCurrencyUtils.getMoney(quantityPrice, getOrder().getCurrency()));
}
for (BundleOrderItemFeePrice fee : getBundleOrderItemFeePrices()) {
bundleRetailPrice = bundleRetailPrice.add(fee.getAmount());
}
return bundleRetailPrice;
} else {
return super.getRetailPrice();
}
}
@Override
public Money getSalePrice() {
if (shouldSumItems()) {
Money bundleSalePrice = null;
if (hasSaleItems()) {
bundleSalePrice = BroadleafCurrencyUtils.getMoney(getOrder().getCurrency());
for (DiscreteOrderItem discreteOrderItem : discreteOrderItems) {
BigDecimal itemSalePrice = null;
if (discreteOrderItem.getSalePrice() != null) {
itemSalePrice = discreteOrderItem.getSalePrice().getAmount();
} else {
itemSalePrice = discreteOrderItem.getRetailPrice().getAmount();
}
BigDecimal quantityPrice = itemSalePrice.multiply(new BigDecimal(discreteOrderItem.getQuantity()));
bundleSalePrice = bundleSalePrice.add(BroadleafCurrencyUtils.getMoney(quantityPrice, getOrder().getCurrency()));
}
for (BundleOrderItemFeePrice fee : getBundleOrderItemFeePrices()) {
bundleSalePrice = bundleSalePrice.add(fee.getAmount());
}
}
return bundleSalePrice;
} else {
return super.getSalePrice();
}
}
@Override
public Money getBaseRetailPrice() {
return convertToMoney(baseRetailPrice);
}
@Override
public void setBaseRetailPrice(Money baseRetailPrice) {
this.baseRetailPrice = baseRetailPrice==null?null:baseRetailPrice.getAmount();
}
@Override
public Money getBaseSalePrice() {
return convertToMoney(baseSalePrice);
}
@Override
public void setBaseSalePrice(Money baseSalePrice) {
this.baseSalePrice = baseSalePrice==null?null:baseSalePrice.getAmount();
}
private boolean hasSaleItems() {
for (DiscreteOrderItem discreteOrderItem : discreteOrderItems) {
if (discreteOrderItem.getSalePrice() != null) {
return true;
}
}
return false;
}
@Override
public boolean hasAdjustedItems() {
//TODO: Handle this for bundle order items.
return false;
}
private boolean updateSalePrice() {
if (isSalePriceOverride()) {
return false;
}
// Only need to update prices if we are not summing the contained items to determine
// the price.
if (! shouldSumItems()) {
if (getSku() != null && getSku().getSalePrice() != null && !getSku().getSalePrice().equals(salePrice)) {
baseSalePrice = getSku().getSalePrice().getAmount();
salePrice = getSku().getSalePrice().getAmount();
return true;
}
}
return false;
}
private boolean updateRetailPrice() {
if (isRetailPriceOverride()) {
return false;
}
// Only need to update prices if we are not summing the contained items to determine
// the price.
if (! shouldSumItems()) {
if (getSku() != null && !getSku().getRetailPrice().equals(retailPrice)) {
baseRetailPrice = getSku().getRetailPrice().getAmount();
retailPrice = getSku().getRetailPrice().getAmount();
return true;
}
}
return false;
}
@Override
public boolean updateSaleAndRetailPrices() {
boolean saleUpdated = updateSalePrice();
boolean retailUpdated = updateRetailPrice();
return saleUpdated || retailUpdated;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BundleOrderItemImpl other = (BundleOrderItemImpl) obj;
if (!super.equals(obj)) {
return false;
}
if (id != null && other.id != null) {
return id.equals(other.id);
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
return true;
}
@Override
public Product getProduct() {
return getProductBundle();
}
protected Money convertToMoney(BigDecimal amount) {
return amount == null ? null : BroadleafCurrencyUtils.getMoney(amount, getOrder().getCurrency());
}
@Override
public Money getTotalPrice() {
Money returnValue = convertToMoney(BigDecimal.ZERO);
if (shouldSumItems()) {
for (OrderItem containedItem : getOrderItems()) {
returnValue = returnValue.add(containedItem.getTotalPrice());
}
returnValue = returnValue.multiply(quantity);
} else {
returnValue = super.getTotalPrice();
}
return returnValue;
}
@Override
public boolean isSkuActive() {
if (getSku() != null && !getSku().isActive()) {
return false;
}
for (DiscreteOrderItem discreteItem : getDiscreteOrderItems()) {
if (!discreteItem.isSkuActive()) {
return false;
}
}
return true;
}
@Override
public OrderItem clone() {
BundleOrderItemImpl orderItem = (BundleOrderItemImpl) super.clone();
if (discreteOrderItems != null) {
for (DiscreteOrderItem discreteOrderItem : discreteOrderItems) {
DiscreteOrderItem temp = (DiscreteOrderItem) discreteOrderItem.clone();
temp.setBundleOrderItem(orderItem);
orderItem.getDiscreteOrderItems().add(temp);
}
}
if (bundleOrderItemFeePrices != null) {
for (BundleOrderItemFeePrice feePrice : bundleOrderItemFeePrices) {
BundleOrderItemFeePrice cloneFeePrice = feePrice.clone();
cloneFeePrice.setBundleOrderItem(orderItem);
orderItem.getBundleOrderItemFeePrices().add(cloneFeePrice);
}
}
orderItem.setBaseRetailPrice(convertToMoney(baseRetailPrice));
orderItem.setBaseSalePrice(convertToMoney(baseSalePrice));
orderItem.setSku(sku);
orderItem.setProductBundle(productBundle);
return orderItem;
}
@Override
public int hashCode() {
final int prime = super.hashCode();
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
public static class Presentation {
public static class Tab {
public static class Name {
public static final String OrderItems = "OrderImpl_Order_Items_Tab";
}
public static class Order {
public static final int OrderItems = 2000;
}
}
public static class Group {
public static class Name {
}
public static class Order {
}
}
public static class FieldOrder {
public static final int PRODUCT = 2000;
public static final int SKU = 3000;
}
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_domain_BundleOrderItemImpl.java
|
3,472 |
public class CacheMapLoader implements MapStore, MapLoaderLifecycleSupport {
private String type;
public void init(final HazelcastInstance hazelcastInstance,
final Properties properties, final String mapName) {
type = mapName;
}
public void destroy() {
}
public Object load(final Object key) {
return type + ":" + key;
}
public Map loadAll(final Collection keys) {
return null;
}
public Set loadAllKeys() {
return null;
}
@Override
public void store(Object key, Object value) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void storeAll(Map map) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void delete(Object key) {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void deleteAll(Collection keys) {
//To change body of implemented methods use File | Settings | File Templates.
}
}
| 1no label
|
hazelcast-spring_src_test_java_com_hazelcast_spring_cache_CacheMapLoader.java
|
4,050 |
public class ChildrenQuery extends Query {
private final String parentType;
private final String childType;
private final Filter parentFilter;
private final ScoreType scoreType;
private final Query originalChildQuery;
private final int shortCircuitParentDocSet;
private final Filter nonNestedDocsFilter;
private Query rewrittenChildQuery;
private IndexReader rewriteIndexReader;
public ChildrenQuery(String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
this.parentType = parentType;
this.childType = childType;
this.parentFilter = parentFilter;
this.originalChildQuery = childQuery;
this.scoreType = scoreType;
this.shortCircuitParentDocSet = shortCircuitParentDocSet;
this.nonNestedDocsFilter = nonNestedDocsFilter;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != this.getClass()) {
return false;
}
ChildrenQuery that = (ChildrenQuery) obj;
if (!originalChildQuery.equals(that.originalChildQuery)) {
return false;
}
if (!childType.equals(that.childType)) {
return false;
}
if (getBoost() != that.getBoost()) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = originalChildQuery.hashCode();
result = 31 * result + childType.hashCode();
result = 31 * result + Float.floatToIntBits(getBoost());
return result;
}
@Override
public String toString(String field) {
StringBuilder sb = new StringBuilder();
sb.append("ChildrenQuery[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery
.toString(field)).append(')').append(ToStringUtils.boost(getBoost()));
return sb.toString();
}
@Override
// See TopChildrenQuery#rewrite
public Query rewrite(IndexReader reader) throws IOException {
if (rewrittenChildQuery == null) {
rewriteIndexReader = reader;
rewrittenChildQuery = originalChildQuery.rewrite(reader);
}
return this;
}
@Override
public void extractTerms(Set<Term> terms) {
rewrittenChildQuery.extractTerms(terms);
}
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
SearchContext searchContext = SearchContext.current();
searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1);
Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount = null;
final Collector collector;
switch (scoreType) {
case AVG:
uidToCount = searchContext.cacheRecycler().objectIntMap(-1);
collector = new AvgChildUidCollector(scoreType, searchContext, parentType, uidToScore.v(), uidToCount.v());
break;
default:
collector = new ChildUidCollector(scoreType, searchContext, parentType, uidToScore.v());
}
final Query childQuery;
if (rewrittenChildQuery == null) {
childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
} else {
assert rewriteIndexReader == searcher.getIndexReader();
childQuery = rewrittenChildQuery;
}
IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
indexSearcher.setSimilarity(searcher.getSimilarity());
indexSearcher.search(childQuery, collector);
int size = uidToScore.v().size();
if (size == 0) {
uidToScore.release();
if (uidToCount != null) {
uidToCount.release();
}
return Queries.newMatchNoDocsQuery().createWeight(searcher);
}
final Filter parentFilter;
if (size == 1) {
BytesRef id = uidToScore.v().keys().iterator().next().value.toBytesRef();
if (nonNestedDocsFilter != null) {
List<Filter> filters = Arrays.asList(
new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))),
nonNestedDocsFilter
);
parentFilter = new AndFilter(filters);
} else {
parentFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
}
} else if (size <= shortCircuitParentDocSet) {
parentFilter = new ParentIdsFilter(parentType, uidToScore.v().keys, uidToScore.v().allocated, nonNestedDocsFilter);
} else {
parentFilter = new ApplyAcceptedDocsFilter(this.parentFilter);
}
ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentFilter, searchContext, size, uidToScore, uidToCount);
searchContext.addReleasable(parentWeight);
return parentWeight;
}
private final class ParentWeight extends Weight implements Releasable {
private final Weight childWeight;
private final Filter parentFilter;
private final SearchContext searchContext;
private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
private final Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount;
private int remaining;
private ParentWeight(Weight childWeight, Filter parentFilter, SearchContext searchContext, int remaining, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore, Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount) {
this.childWeight = childWeight;
this.parentFilter = parentFilter;
this.searchContext = searchContext;
this.remaining = remaining;
this.uidToScore = uidToScore;
this.uidToCount= uidToCount;
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");
}
@Override
public Query getQuery() {
return ChildrenQuery.this;
}
@Override
public float getValueForNormalization() throws IOException {
float sum = childWeight.getValueForNormalization();
sum *= getBoost() * getBoost();
return sum;
}
@Override
public void normalize(float norm, float topLevelBoost) {
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs);
if (DocIdSets.isEmpty(parentsSet) || remaining == 0) {
return null;
}
IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
// We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
// count down (short circuit) logic will then work as expected.
DocIdSetIterator parentsIterator = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();
switch (scoreType) {
case AVG:
return new AvgParentScorer(this, idTypeCache, uidToScore.v(), uidToCount.v(), parentsIterator);
default:
return new ParentScorer(this, idTypeCache, uidToScore.v(), parentsIterator);
}
}
@Override
public boolean release() throws ElasticsearchException {
Releasables.release(uidToScore, uidToCount);
return true;
}
private class ParentScorer extends Scorer {
final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
final IdReaderTypeCache idTypeCache;
final DocIdSetIterator parentsIterator;
int currentDocId = -1;
float currentScore;
ParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator parentsIterator) {
super(weight);
this.idTypeCache = idTypeCache;
this.parentsIterator = parentsIterator;
this.uidToScore = uidToScore;
}
@Override
public float score() throws IOException {
return currentScore;
}
@Override
public int freq() throws IOException {
// We don't have the original child query hit info here...
// But the freq of the children could be collector and returned here, but makes this Scorer more expensive.
return 1;
}
@Override
public int docID() {
return currentDocId;
}
@Override
public int nextDoc() throws IOException {
if (remaining == 0) {
return currentDocId = NO_MORE_DOCS;
}
while (true) {
currentDocId = parentsIterator.nextDoc();
if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
return currentDocId;
}
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
if (uidToScore.containsKey(uid)) {
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
currentScore = uidToScore.lget();
remaining--;
return currentDocId;
}
}
}
@Override
public int advance(int target) throws IOException {
if (remaining == 0) {
return currentDocId = NO_MORE_DOCS;
}
currentDocId = parentsIterator.advance(target);
if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
return currentDocId;
}
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
if (uidToScore.containsKey(uid)) {
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
currentScore = uidToScore.lget();
remaining--;
return currentDocId;
} else {
return nextDoc();
}
}
@Override
public long cost() {
return parentsIterator.cost();
}
}
private final class AvgParentScorer extends ParentScorer {
final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
AvgParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount, DocIdSetIterator parentsIterator) {
super(weight, idTypeCache, uidToScore, parentsIterator);
this.uidToCount = uidToCount;
}
@Override
public int nextDoc() throws IOException {
if (remaining == 0) {
return currentDocId = NO_MORE_DOCS;
}
while (true) {
currentDocId = parentsIterator.nextDoc();
if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
return currentDocId;
}
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
if (uidToScore.containsKey(uid)) {
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
currentScore = uidToScore.lget();
currentScore /= uidToCount.get(uid);
remaining--;
return currentDocId;
}
}
}
@Override
public int advance(int target) throws IOException {
if (remaining == 0) {
return currentDocId = NO_MORE_DOCS;
}
currentDocId = parentsIterator.advance(target);
if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) {
return currentDocId;
}
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
if (uidToScore.containsKey(uid)) {
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
currentScore = uidToScore.lget();
currentScore /= uidToCount.get(uid);
remaining--;
return currentDocId;
} else {
return nextDoc();
}
}
}
}
private static class ChildUidCollector extends ParentIdCollector {
protected final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
private final ScoreType scoreType;
protected Scorer scorer;
ChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore) {
super(childType, searchContext);
this.uidToScore = uidToScore;
this.scoreType = scoreType;
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
float currentScore = scorer.score();
switch (scoreType) {
case SUM:
uidToScore.addTo(parentUid, currentScore);
break;
case MAX:
if (uidToScore.containsKey(parentUid)) {
float previousScore = uidToScore.lget();
if (currentScore > previousScore) {
uidToScore.lset(currentScore);
}
} else {
uidToScore.put(parentUid, currentScore);
}
break;
case AVG:
assert false : "AVG has its own collector";
default:
assert false : "Are we missing a score type here? -- " + scoreType;
break;
}
}
}
private final static class AvgChildUidCollector extends ChildUidCollector {
private final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
AvgChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount) {
super(scoreType, searchContext, childType, uidToScore);
this.uidToCount = uidToCount;
assert scoreType == ScoreType.AVG;
}
@Override
protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
float currentScore = scorer.score();
uidToCount.addTo(parentUid, 1);
uidToScore.addTo(parentUid, currentScore);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_child_ChildrenQuery.java
|
339 |
public class TransportNodesRestartAction extends TransportNodesOperationAction<NodesRestartRequest, NodesRestartResponse, TransportNodesRestartAction.NodeRestartRequest, NodesRestartResponse.NodeRestartResponse> {
private final Node node;
private final boolean disabled;
private AtomicBoolean restartRequested = new AtomicBoolean();
@Inject
public TransportNodesRestartAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
Node node) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.node = node;
disabled = componentSettings.getAsBoolean("disabled", false);
}
@Override
protected void doExecute(NodesRestartRequest nodesRestartRequest, ActionListener<NodesRestartResponse> listener) {
listener.onFailure(new ElasticsearchIllegalStateException("restart is disabled (for now) ...."));
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected String transportAction() {
return NodesRestartAction.NAME;
}
@Override
protected NodesRestartResponse newResponse(NodesRestartRequest nodesShutdownRequest, AtomicReferenceArray responses) {
final List<NodesRestartResponse.NodeRestartResponse> nodeRestartResponses = newArrayList();
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof NodesRestartResponse.NodeRestartResponse) {
nodeRestartResponses.add((NodesRestartResponse.NodeRestartResponse) resp);
}
}
return new NodesRestartResponse(clusterName, nodeRestartResponses.toArray(new NodesRestartResponse.NodeRestartResponse[nodeRestartResponses.size()]));
}
@Override
protected NodesRestartRequest newRequest() {
return new NodesRestartRequest();
}
@Override
protected NodeRestartRequest newNodeRequest() {
return new NodeRestartRequest();
}
@Override
protected NodeRestartRequest newNodeRequest(String nodeId, NodesRestartRequest request) {
return new NodeRestartRequest(nodeId, request);
}
@Override
protected NodesRestartResponse.NodeRestartResponse newNodeResponse() {
return new NodesRestartResponse.NodeRestartResponse();
}
@Override
protected NodesRestartResponse.NodeRestartResponse nodeOperation(NodeRestartRequest request) throws ElasticsearchException {
if (disabled) {
throw new ElasticsearchIllegalStateException("Restart is disabled");
}
if (!restartRequested.compareAndSet(false, true)) {
return new NodesRestartResponse.NodeRestartResponse(clusterService.localNode());
}
logger.info("Restarting in [{}]", request.delay);
threadPool.schedule(request.delay, ThreadPool.Names.GENERIC, new Runnable() {
@Override
public void run() {
boolean restartWithWrapper = false;
if (System.getProperty("elasticsearch-service") != null) {
try {
Class wrapperManager = settings.getClassLoader().loadClass("org.tanukisoftware.wrapper.WrapperManager");
logger.info("Initiating requested restart (using service)");
wrapperManager.getMethod("restartAndReturn").invoke(null);
restartWithWrapper = true;
} catch (Throwable e) {
logger.error("failed to initial restart on service wrapper", e);
}
}
if (!restartWithWrapper) {
logger.info("Initiating requested restart");
try {
node.stop();
node.start();
} catch (Exception e) {
logger.warn("Failed to restart", e);
} finally {
restartRequested.set(false);
}
}
}
});
return new NodesRestartResponse.NodeRestartResponse(clusterService.localNode());
}
@Override
protected boolean accumulateExceptions() {
return false;
}
protected static class NodeRestartRequest extends NodeOperationRequest {
TimeValue delay;
private NodeRestartRequest() {
}
private NodeRestartRequest(String nodeId, NodesRestartRequest request) {
super(request, nodeId);
this.delay = request.delay;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
delay = readTimeValue(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
delay.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_node_restart_TransportNodesRestartAction.java
|
41 |
public class ClusterLeaveReelectionListener
extends ClusterListener.Adapter
{
private final Election election;
private final StringLogger logger;
public ClusterLeaveReelectionListener( Election election, StringLogger logger )
{
this.election = election;
this.logger = logger;
}
@Override
public void leftCluster( InstanceId member )
{
logger.warn( "Demoting member " + member + " because it left the cluster" );
// Suggest reelection for all roles of this node
election.demote( member );
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ClusterLeaveReelectionListener.java
|
276 |
emailServiceTemplate.send(emailServiceDestination, new MessageCreator() {
public Message createMessage(Session session) throws JMSException {
ObjectMessage message = session.createObjectMessage(props);
EmailInfo info = (EmailInfo) props.get(EmailPropertyType.INFO.getType());
message.setJMSPriority(Integer.parseInt(info.getSendAsyncPriority()));
return message;
}
});
| 0true
|
common_src_main_java_org_broadleafcommerce_common_email_service_jms_JMSEmailServiceProducerImpl.java
|
87 |
public interface ObjectByObjectToInt<A,B> {int apply(A a, B b); }
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
1,447 |
public class TitanHadoopGraph {
private final TitanHadoopSetup setup;
private final TypeInspector typeManager;
private final SystemTypeInspector systemTypes;
private final VertexReader vertexReader;
private final boolean verifyVertexExistence = false;
private static final Logger log =
LoggerFactory.getLogger(TitanHadoopGraph.class);
public TitanHadoopGraph(final TitanHadoopSetup setup) {
this.setup = setup;
this.typeManager = setup.getTypeInspector();
this.systemTypes = setup.getSystemTypeInspector();
this.vertexReader = setup.getVertexReader();
}
protected FaunusVertex readHadoopVertex(final Configuration configuration, final StaticBuffer key, Iterable<Entry> entries) {
final long vertexId = this.vertexReader.getVertexId(key);
Preconditions.checkArgument(vertexId > 0);
FaunusVertex vertex = new FaunusVertex(configuration, vertexId);
boolean foundVertexState = !verifyVertexExistence;
for (final Entry data : entries) {
try {
RelationReader relationReader = setup.getRelationReader(vertex.getLongId());
final RelationCache relation = relationReader.parseRelation(data, false, typeManager);
if (this.systemTypes.isVertexExistsSystemType(relation.typeId)) {
foundVertexState = true;
} else if (this.systemTypes.isVertexLabelSystemType(relation.typeId)) {
//Vertex Label
long vertexLabelId = relation.getOtherVertexId();
VertexLabel vl = typeManager.getExistingVertexLabel(vertexLabelId);
vertex.setVertexLabel(vertex.getTypeManager().getVertexLabel(vl.getName()));
}
if (systemTypes.isSystemType(relation.typeId)) continue; //Ignore system types
final RelationType type = typeManager.getExistingRelationType(relation.typeId);
if (((InternalRelationType)type).isHiddenType()) continue; //Ignore hidden types
StandardFaunusRelation frel;
if (type.isPropertyKey()) {
Object value = relation.getValue();
Preconditions.checkNotNull(value);
final StandardFaunusProperty fprop = new StandardFaunusProperty(relation.relationId, vertex, type.getName(), value);
vertex.addProperty(fprop);
frel = fprop;
} else {
assert type.isEdgeLabel();
StandardFaunusEdge fedge;
if (relation.direction.equals(Direction.IN))
fedge = new StandardFaunusEdge(configuration, relation.relationId, relation.getOtherVertexId(), vertexId, type.getName());
else if (relation.direction.equals(Direction.OUT))
fedge = new StandardFaunusEdge(configuration, relation.relationId, vertexId, relation.getOtherVertexId(), type.getName());
else
throw ExceptionFactory.bothIsNotSupported();
vertex.addEdge(fedge);
frel = fedge;
}
if (relation.hasProperties()) {
// load relation properties
for (final LongObjectCursor<Object> next : relation) {
assert next.value != null;
RelationType rt = typeManager.getExistingRelationType(next.key);
if (rt.isPropertyKey()) {
PropertyKey pkey = (PropertyKey)vertex.getTypeManager().getPropertyKey(rt.getName());
log.debug("Retrieved key {} for name \"{}\"", pkey, rt.getName());
frel.setProperty(pkey, next.value);
} else {
assert next.value instanceof Long;
EdgeLabel el = (EdgeLabel)vertex.getTypeManager().getEdgeLabel(rt.getName());
log.debug("Retrieved ege label {} for name \"{}\"", el, rt.getName());
frel.setProperty(el, new FaunusVertex(configuration,(Long)next.value));
}
}
for (TitanRelation rel : frel.query().queryAll().relations())
((FaunusRelation)rel).setLifeCycle(ElementLifeCycle.Loaded);
}
frel.setLifeCycle(ElementLifeCycle.Loaded);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
vertex.setLifeCycle(ElementLifeCycle.Loaded);
/*Since we are filtering out system relation types, we might end up with vertices that have no incident relations.
This is especially true for schema vertices. Those are filtered out. */
if (!foundVertexState || !vertex.query().relations().iterator().hasNext()) return null;
return vertex;
}
public void close() {
setup.close();
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_formats_util_TitanHadoopGraph.java
|
584 |
nodeEngine.getExecutionService().schedule(new Runnable() {
public void run() {
merge(newTargetAddress);
}
}, 10, TimeUnit.SECONDS);
| 0true
|
hazelcast_src_main_java_com_hazelcast_cluster_ClusterServiceImpl.java
|
444 |
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface AdminPresentationMergeEntry {
/**
* The type for this property override. The types available are specific to the properties available in
* the various admin presentation annotations. See {@link PropertyType} for a comprehensive list of
* available values organized by admin presentation annotation type.
*
* @see PropertyType
* @return the property override type
*/
String propertyType();
/**
* The string representation of the override value. Any primitive property can be specified as a string (int, boolean, enum).
* The backend override system will be responsible for converting the string representation back
* to the appropriate type for use by the admin. The type specific override value properties are provided
* for convenience (e.g. doubleOverrideValue()).
*
* @return The string representation of the property value
*/
String overrideValue() default "";
/**
* Convenience property for specifying a double value override. The target property
* must be of this type. This type can also be specified using overrideValue() and the backend will convert.
*
* @return the property override value in the form of a double
*/
double doubleOverrideValue() default Double.MIN_VALUE;
/**
* Convenience property for specifying a float value override. The target property
* must be of this type. This type can also be specified using overrideValue() and the backend will convert.
*
* @return the property override value in the form of a float
*/
float floatOverrideValue() default Float.MIN_VALUE;
/**
* Convenience property for specifying a boolean value override. The target property
* must be of this type. This type can also be specified using overrideValue() and the backend will convert.
*
* @return the property override value in the form of a boolean
*/
boolean booleanOverrideValue() default false;
/**
* Convenience property for specifying a int value override. The target property
* must be of this type. This type can also be specified using overrideValue() and the backend will convert.
*
* @return the property override value in the form of a int
*/
int intOverrideValue() default Integer.MIN_VALUE;
/**
* Convenience property for specifying a long value override. The target property
* must be of this type. This type can also be specified using overrideValue() and the backend will convert.
*
* @return the property override value in the form of a long
*/
long longOverrideValue() default Long.MIN_VALUE;
/**
* Convenience property for specifying a string array value override. The target property
* must be of this type.
*
* @return the property override value in the form of a string array
*/
String[] stringArrayOverrideValue() default {};
/**
* Convenience property for specifying a double array value override. The target property
* must be of this type.
*
* @return the property override value in the form of a double array
*/
double[] doubleArrayOverrideValue() default {};
/**
* Convenience property for specifying a float array value override. The target property
* must be of this type.
*
* @return the property override value in the form of a float array
*/
float[] floatArrayOverrideValue() default {};
/**
* Convenience property for specifying a boolean array value override. The target property
* must be of this type.
*
* @return the property override value in the form of a boolean array
*/
boolean[] booleanArrayOverrideValue() default {};
/**
* Convenience property for specifying a int array value override. The target property
* must be of this type.
*
* @return the property override value in the form of a int array
*/
int[] intArrayOverrideValue() default {};
/**
* Convenience property for specifying a long array value override. The target property
* must be of this type.
*
* @return the property override value in the form of a long array
*/
long[] longArrayOverrideValue() default {};
/**
* Property for overriding the validation configuration for a field annotated with the
* {@link org.broadleafcommerce.common.presentation.AdminPresentation} annotation.
*
* @return the validation config override
*/
ValidationConfiguration[] validationConfigurations() default {};
/**
* Property for overriding the operationTypes for an advanced collection. See
* {@link org.broadleafcommerce.common.presentation.AdminPresentationCollection},
* {@link org.broadleafcommerce.common.presentation.AdminPresentationAdornedTargetCollection} and
* {@link org.broadleafcommerce.common.presentation.AdminPresentationMap} for default values for each type.
*
* @return the operationType override
*/
AdminPresentationOperationTypes operationTypes() default @AdminPresentationOperationTypes(addType = OperationType.BASIC,
fetchType = OperationType.BASIC, inspectType = OperationType.BASIC, removeType = OperationType.BASIC,
updateType = OperationType.BASIC);
/**
* Property for overriding the filter configuration for a field annotated with the
* {@link org.broadleafcommerce.common.presentation.AdminPresentationDataDrivenEnumeration} annotation.
*
* @return the option filter configuration
*/
OptionFilterParam[] optionFilterParams() default {};
/**
* Property for overriding the map key configuration for a field annotated with the
* {@link org.broadleafcommerce.common.presentation.AdminPresentationMap} annotation.
*
* @return the map key configuration
*/
AdminPresentationMapKey[] keys() default {};
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_presentation_override_AdminPresentationMergeEntry.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.