name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_OrderedBytes_isNumeric | /**
* Return true when the next encoded value in {@code src} uses Numeric encoding, false otherwise.
* {@code NaN}, {@code +/-Inf} are valid Numeric values.
*/
public static boolean isNumeric(PositionedByteRange src) {
byte x = (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
return x >= NEG_INF && x <= NAN;
} | 3.68 |
hudi_BaseHoodieWriteClient_commit | /**
*
* Commit changes performed at the given instantTime marker.
*/
public boolean commit(String instantTime, O writeStatuses, Option<Map<String, String>> extraMetadata) {
HoodieTableMetaClient metaClient = createMetaClient(false);
String actionType = metaClient.getCommitActionType();
return commit(instantTime, writeStatuses, extraMetadata, actionType, Collections.emptyMap());
} | 3.68 |
framework_LegacyLocatorStrategy_findParentWidget | /**
* Returns the first widget found when going from {@code targetElement}
* upwards in the DOM hierarchy, assuming that {@code ancestorWidget} is a
* parent of {@code targetElement}.
*
* @param targetElement
* @param ancestorWidget
* @return The widget whose root element is a parent of
* {@code targetElement}.
*/
private Widget findParentWidget(Element targetElement,
Widget ancestorWidget) {
/*
* As we cannot resolve Widgets from the element we start from the
* widget and move downwards to the correct child widget, as long as we
* find one.
*/
if (ancestorWidget instanceof HasWidgets) {
for (Widget w : ((HasWidgets) ancestorWidget)) {
if (w.getElement().isOrHasChild(targetElement)) {
return findParentWidget(targetElement, w);
}
}
}
// No children found, this is it
return ancestorWidget;
} | 3.68 |
hmily_HmilySQLUtil_isReadOnly | /**
* Determine whether SQL is read-only.
*
* @param sqlStatement SQL statement
* @return true if read-only, otherwise false
*/
public static boolean isReadOnly(final HmilyStatement sqlStatement) {
if (sqlStatement instanceof HmilyDMLStatement) {
return isReadOnly((HmilyDMLStatement) sqlStatement);
}
throw new UnsupportedOperationException(String.format("Unsupported SQL Type `%s`", sqlStatement.getClass().getSimpleName()));
} | 3.68 |
dubbo_ConfigurationUtils_getGlobalConfiguration | /**
* For compact single instance
*
* @deprecated Replaced to {@link ConfigurationUtils#getGlobalConfiguration(ScopeModel)}
*/
@Deprecated
public static Configuration getGlobalConfiguration() {
return ApplicationModel.defaultModel().modelEnvironment().getConfiguration();
} | 3.68 |
zxing_CameraManager_closeDriver | /**
* Closes the camera driver if still in use.
*/
public synchronized void closeDriver() {
if (camera != null) {
camera.getCamera().release();
camera = null;
// Make sure to clear these each time we close the camera, so that any scanning rect
// requested by intent is forgotten.
framingRect = null;
framingRectInPreview = null;
}
} | 3.68 |
AreaShop_RentRegion_extend | /**
* Try to extend the rent for the current owner, respecting all restrictions.
* @return true if successful, otherwise false
*/
public boolean extend() {
if(!isRented()) {
return false;
}
OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(getRenter());
return offlinePlayer != null && rent(offlinePlayer);
} | 3.68 |
morf_ConnectionResourcesBean_getHostName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#getHostName()
*/
@Override
public String getHostName() {
return hostName;
} | 3.68 |
hudi_HoodieTable_isPartitioned | /**
* @return if the table is physically partitioned, based on the partition fields stored in the table config.
*/
public boolean isPartitioned() {
return getMetaClient().getTableConfig().isTablePartitioned();
} | 3.68 |
flink_FromClasspathEntryClassInformationProvider_createFromSystemClasspath | /**
* Creates a {@code FromClasspathEntryClassInformationProvider} looking for the entry class
* providing the main method on the system classpath.
*
* @return The {@code FromClasspathEntryClassInformationProvider} providing the job class found
* on the system classpath.
* @throws IOException If some Jar listed on the system classpath wasn't accessible.
* @throws FlinkException Either no or too many main methods were found on the system classpath.
*/
public static FromClasspathEntryClassInformationProvider createFromSystemClasspath()
throws IOException, FlinkException {
return new FromClasspathEntryClassInformationProvider(extractJobClassFromSystemClasspath());
} | 3.68 |
morf_OracleMetaDataProvider_expensiveReadTableKeys | /**
* Read the tables, and the primary keys for the database.
*
* @return A map of table name to primary key(s).
*/
private Map<String, List<String>> expensiveReadTableKeys() {
log.info("Starting read of key definitions");
long start = System.currentTimeMillis();
final StringBuilder primaryKeysWithWrongIndex = new StringBuilder();
final String getConstraintSql = "SELECT A.TABLE_NAME, A.COLUMN_NAME, C.INDEX_NAME FROM ALL_CONS_COLUMNS A "
+ "JOIN ALL_CONSTRAINTS C ON A.CONSTRAINT_NAME = C.CONSTRAINT_NAME AND A.OWNER = C.OWNER and A.TABLE_NAME = C.TABLE_NAME "
+ "WHERE C.TABLE_NAME not like 'BIN$%' AND C.OWNER=? AND C.CONSTRAINT_TYPE = 'P' ORDER BY A.TABLE_NAME, A.POSITION";
runSQL(getConstraintSql, new ResultSetHandler() {
@Override public void handle(ResultSet resultSet) throws SQLException {
while (resultSet.next()) {
String tableName = resultSet.getString(1);
String columnName = resultSet.getString(2);
String pkIndexName = resultSet.getString(3);
if (pkIndexName == null || !pkIndexName.endsWith("_PK")) {
primaryKeysWithWrongIndex.append("Primary Key on table [").append(tableName)
.append("] column [").append(columnName).append("] backed with an index whose name does not end in _PK [")
.append(pkIndexName).append("]").append(System.lineSeparator());
}
List<String> columns = keyMap.get(tableName);
if (columns == null) {
columns = new ArrayList<>();
keyMap.put(tableName, columns);
}
columns.add(columnName);
}
}
});
if (primaryKeysWithWrongIndex.length() > 0) {
throw new RuntimeException(primaryKeysWithWrongIndex.toString());
}
long end = System.currentTimeMillis();
log.info(String.format("Read key metadata in %dms; %d tables", end - start, keyMap.size()));
return keyMap;
} | 3.68 |
flink_OpaqueMemoryResource_getResourceHandle | /** Gets the handle to the resource. */
public T getResourceHandle() {
return resourceHandle;
} | 3.68 |
hbase_AccessController_getUserPermissions | /**
* @deprecated since 2.2.0 and will be removed in 4.0.0. Use
* {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
* @see Admin#getUserPermissions(GetUserPermissionsRequest)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-21911">HBASE-21911</a>
*/
@Deprecated
@Override
public void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done) {
AccessControlProtos.GetUserPermissionsResponse response = null;
try {
// only allowed to be called on _acl_ region
if (aclRegion) {
if (!initialized) {
throw new CoprocessorException("AccessController not yet initialized");
}
User caller = RpcServer.getRequestUser().orElse(null);
final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null;
final String namespace =
request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null;
final TableName table =
request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null;
final byte[] cf =
request.hasColumnFamily() ? request.getColumnFamily().toByteArray() : null;
final byte[] cq =
request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null;
preGetUserPermissions(caller, userName, namespace, table, cf, cq);
GetUserPermissionsRequest getUserPermissionsRequest = null;
if (request.getType() == AccessControlProtos.Permission.Type.Table) {
getUserPermissionsRequest = GetUserPermissionsRequest.newBuilder(table).withFamily(cf)
.withQualifier(cq).withUserName(userName).build();
} else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) {
getUserPermissionsRequest =
GetUserPermissionsRequest.newBuilder(namespace).withUserName(userName).build();
} else {
getUserPermissionsRequest =
GetUserPermissionsRequest.newBuilder().withUserName(userName).build();
}
List<UserPermission> perms =
regionEnv.getConnection().getAdmin().getUserPermissions(getUserPermissionsRequest);
response = AccessControlUtil.buildGetUserPermissionsResponse(perms);
} else {
throw new CoprocessorException(AccessController.class,
"This method " + "can only execute at " + PermissionStorage.ACL_TABLE_NAME + " table.");
}
} catch (IOException ioe) {
// pass exception back up
CoprocessorRpcUtils.setControllerException(controller, ioe);
}
done.run(response);
} | 3.68 |
flink_AbstractID_toHexString | /**
* Returns pure String representation of the ID in hexadecimal. This method should be used to
* construct things like paths etc., that require a stable representation and is therefore
* final.
*/
public final String toHexString() {
if (this.hexString == null) {
final byte[] ba = new byte[SIZE];
longToByteArray(this.lowerPart, ba, 0);
longToByteArray(this.upperPart, ba, SIZE_OF_LONG);
this.hexString = StringUtils.byteToHexString(ba);
}
return this.hexString;
} | 3.68 |
hudi_DFSDeltaInputReader_analyzeSingleFile | /**
* Implementation of {@link DeltaInputReader}s to provide a way to read a single file on DFS and provide an
* average number of records across N files.
*/
protected long analyzeSingleFile(String filePath) {
throw new UnsupportedOperationException("No implementation found");
} | 3.68 |
hadoop_AddMountAttributes_updateCommonAttributes | /**
* Common attributes like read-only, fault-tolerant, dest order, owner, group, mode etc are
* updated for the given mount table object.
*
* @param existingEntry Mount table object.
*/
private void updateCommonAttributes(MountTable existingEntry) {
if (this.isReadonly()) {
existingEntry.setReadOnly(true);
}
if (this.isFaultTolerant()) {
existingEntry.setFaultTolerant(true);
}
if (this.getOrder() != null) {
existingEntry.setDestOrder(this.getOrder());
}
RouterAdmin.ACLEntity mountAclInfo = this.getAclInfo();
// Update ACL info of mount table entry
if (mountAclInfo.getOwner() != null) {
existingEntry.setOwnerName(mountAclInfo.getOwner());
}
if (mountAclInfo.getGroup() != null) {
existingEntry.setGroupName(mountAclInfo.getGroup());
}
if (mountAclInfo.getMode() != null) {
existingEntry.setMode(mountAclInfo.getMode());
}
existingEntry.validate();
} | 3.68 |
starts_AnnotationVisitor_visitEnum | /**
* Visits an enumeration value of the annotation.
*
* @param name
* the value name.
* @param desc
* the class descriptor of the enumeration class.
* @param value
* the actual enumeration value.
*/
public void visitEnum(String name, String desc, String value) {
if (av != null) {
av.visitEnum(name, desc, value);
}
} | 3.68 |
framework_SimpleStringFilter_getFilterString | /**
* Returns the filter string.
*
* Note: this method is intended only for implementations of lazy string
* filters and may change in the future.
*
* @return filter string given to the constructor
*/
public String getFilterString() {
return filterString;
} | 3.68 |
flink_StateBackendLoader_loadStateBackendFromKeyedStateHandles | /**
* Load state backend which may wrap the original state backend for recovery.
*
* @param originalStateBackend StateBackend loaded from application or config.
* @param classLoader User code classloader.
* @param keyedStateHandles The state handles for restore.
* @return Wrapped state backend for recovery.
* @throws DynamicCodeLoadingException Thrown if keyed state handles of wrapped state backend
* are found and the class was not found or could not be instantiated.
*/
public static StateBackend loadStateBackendFromKeyedStateHandles(
StateBackend originalStateBackend,
ClassLoader classLoader,
Collection<KeyedStateHandle> keyedStateHandles)
throws DynamicCodeLoadingException {
// Wrapping ChangelogStateBackend or ChangelogStateBackendHandle is not supported currently.
if (!isChangelogStateBackend(originalStateBackend)
&& keyedStateHandles.stream()
.anyMatch(
stateHandle ->
stateHandle instanceof ChangelogStateBackendHandle)) {
return wrapStateBackend(
originalStateBackend, classLoader, DEACTIVATED_CHANGELOG_STATE_BACKEND);
}
return originalStateBackend;
} | 3.68 |
framework_BigDecimalTextField_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 9997;
} | 3.68 |
framework_ServerRpcHandler_parseInvocations | /**
* Parse JSON from the client into a list of MethodInvocation instances.
*
* @param connectorTracker
* The ConnectorTracker used to lookup connectors
* @param invocationsJson
* JSON containing all information needed to execute all
* requested RPC calls.
* @return list of MethodInvocation to perform
*/
private List<MethodInvocation> parseInvocations(
ConnectorTracker connectorTracker, JsonArray invocationsJson) {
int invocationCount = invocationsJson.length();
List<MethodInvocation> invocations = new ArrayList<>(invocationCount);
MethodInvocation previousInvocation = null;
// parse JSON to MethodInvocations
for (int i = 0; i < invocationCount; ++i) {
JsonArray invocationJson = invocationsJson.getArray(i);
MethodInvocation invocation = parseInvocation(invocationJson,
previousInvocation, connectorTracker);
if (invocation != null) {
// Can be null if the invocation was a legacy invocation and it
// was merged with the previous one or if the invocation was
// rejected because of an error.
invocations.add(invocation);
previousInvocation = invocation;
}
}
return invocations;
} | 3.68 |
hbase_RSGroupAdminClient_addRSGroup | /**
* Creates a new RegionServer group with the given name.
*/
public void addRSGroup(String groupName) throws IOException {
AddRSGroupRequest request = AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build();
try {
stub.addRSGroup(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.68 |
flink_ModuleManager_unloadModule | /**
* Unload a module with given name.
*
* @param name name of the module
* @throws ValidationException when there is no module with the given name
*/
public void unloadModule(String name) {
if (loadedModules.containsKey(name)) {
loadedModules.remove(name);
boolean used = usedModules.remove(name);
LOG.info("Unloaded an {} module '{}'", used ? "used" : "unused", name);
} else {
throw new ValidationException(String.format("No module with name '%s' exists", name));
}
} | 3.68 |
hadoop_TimelineEntities_addEntities | /**
* All a list of entities into the existing entity list
*
* @param entities
* a list of entities
*/
public void addEntities(List<TimelineEntity> entities) {
this.entities.addAll(entities);
} | 3.68 |
hadoop_SaslParticipant_wrap | /**
* Wraps a byte array.
*
* @param bytes The array containing the bytes to wrap.
* @param off The starting position at the array
* @param len The number of bytes to wrap
* @return byte[] wrapped bytes
* @throws SaslException if the bytes cannot be successfully wrapped
*/
public byte[] wrap(byte[] bytes, int off, int len) throws SaslException {
if (saslClient != null) {
return saslClient.wrap(bytes, off, len);
} else {
return saslServer.wrap(bytes, off, len);
}
} | 3.68 |
hudi_IncrSourceHelper_getHollowCommitHandleMode | /**
* When hollow commits are found while using incremental source with {@link HoodieDeltaStreamer},
* unlike batch incremental query, we do not use {@link HollowCommitHandling#FAIL} by default,
* instead we use {@link HollowCommitHandling#BLOCK} to block processing data from going beyond the
* hollow commits to avoid unintentional skip.
* <p>
* Users can set {@link DataSourceReadOptions#INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT} to
* {@link HollowCommitHandling#USE_TRANSITION_TIME} to avoid the blocking behavior.
*/
public static HollowCommitHandling getHollowCommitHandleMode(TypedProperties props) {
return HollowCommitHandling.valueOf(
props.getString(INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT().key(), HollowCommitHandling.BLOCK.name()));
} | 3.68 |
hadoop_HdfsDataInputStream_getWrappedStream | /**
* Get a reference to the wrapped output stream. We always want to return the
* actual underlying InputStream, even when we're using a CryptoStream. e.g.
* in the delegated methods below.
*
* @return the underlying output stream
*/
public InputStream getWrappedStream() {
return in;
} | 3.68 |
framework_DDEventHandleStrategy_handleTouchEnd | /**
* Called to handle {@link Event#ONTOUCHEND} event.
*
* @param target
* target element over which DnD event has happened
* @param event
* ONTOUCHEND GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
*/
protected void handleTouchEnd(Element target, NativePreviewEvent event,
DDManagerMediator mediator) {
/* Avoid simulated event on drag end */
event.getNativeEvent().preventDefault();
handleMouseUp(target, event, mediator);
} | 3.68 |
hadoop_OBSCommonUtils_initMultipartUploads | /**
* initialize multi-part upload, purge larger than the value of
* PURGE_EXISTING_MULTIPART_AGE.
*
* @param owner the owner OBSFileSystem instance
* @param conf the configuration to use for the FS
* @throws IOException on any failure to initialize multipart upload
*/
static void initMultipartUploads(final OBSFileSystem owner,
final Configuration conf)
throws IOException {
boolean purgeExistingMultipart =
conf.getBoolean(OBSConstants.PURGE_EXISTING_MULTIPART,
OBSConstants.DEFAULT_PURGE_EXISTING_MULTIPART);
long purgeExistingMultipartAge =
longOption(conf, OBSConstants.PURGE_EXISTING_MULTIPART_AGE,
OBSConstants.DEFAULT_PURGE_EXISTING_MULTIPART_AGE, 0);
if (!purgeExistingMultipart) {
return;
}
final Date purgeBefore = new Date(
new Date().getTime() - purgeExistingMultipartAge * 1000);
try {
ListMultipartUploadsRequest request
= new ListMultipartUploadsRequest(owner.getBucket());
while (true) {
// List + purge
MultipartUploadListing uploadListing = owner.getObsClient()
.listMultipartUploads(request);
for (MultipartUpload upload
: uploadListing.getMultipartTaskList()) {
if (upload.getInitiatedDate().compareTo(purgeBefore) < 0) {
owner.getObsClient().abortMultipartUpload(
new AbortMultipartUploadRequest(
owner.getBucket(), upload.getObjectKey(),
upload.getUploadId()));
}
}
if (!uploadListing.isTruncated()) {
break;
}
request.setUploadIdMarker(
uploadListing.getNextUploadIdMarker());
request.setKeyMarker(uploadListing.getNextKeyMarker());
}
} catch (ObsException e) {
if (e.getResponseCode() == FORBIDDEN_CODE) {
LOG.debug("Failed to purging multipart uploads against {},"
+ " FS may be read only", owner.getBucket(),
e);
} else {
throw translateException("purging multipart uploads",
owner.getBucket(), e);
}
}
} | 3.68 |
hbase_ZKProcedureUtil_isAcquiredPathNode | /**
* Is this in the procedure barrier acquired znode path
*/
boolean isAcquiredPathNode(String path) {
return path.startsWith(this.acquiredZnode) && !path.equals(acquiredZnode)
&& isMemberNode(path, acquiredZnode);
} | 3.68 |
hadoop_SampleQuantiles_snapshot | /**
* Get a snapshot of the current values of all the tracked quantiles.
*
* @return snapshot of the tracked quantiles. If no items are added
* to the estimator, returns null.
*/
synchronized public Map<Quantile, Long> snapshot() {
// flush the buffer first for best results
insertBatch();
if (samples.isEmpty()) {
return null;
}
Map<Quantile, Long> values = new TreeMap<Quantile, Long>();
for (int i = 0; i < quantiles.length; i++) {
values.put(quantiles[i], query(quantiles[i].quantile));
}
return values;
} | 3.68 |
hbase_CatalogFamilyFormat_getRegionStateColumn | /**
* Returns the column qualifier for serialized region state
* @param replicaId the replicaId of the region
* @return a byte[] for state qualifier
*/
public static byte[] getRegionStateColumn(int replicaId) {
return replicaId == 0
? HConstants.STATE_QUALIFIER
: Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.68 |
hadoop_DatanodeAdminProperties_setAdminState | /**
* Set the admin state of the datanode.
* @param adminState the admin state of the datanode.
*/
public void setAdminState(final AdminStates adminState) {
this.adminState = adminState;
} | 3.68 |
framework_CssLayoutConnector_onConnectorHierarchyChange | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractComponentContainerConnector#
* onConnectorHierarchyChange
* (com.vaadin.client.ConnectorHierarchyChangeEvent)
*/
@Override
public void onConnectorHierarchyChange(
ConnectorHierarchyChangeEvent event) {
Profiler.enter("CssLayoutConnector.onConnectorHierarchyChange");
// Detach old child widgets and possibly their caption
Profiler.enter(
"CssLayoutConnector.onConnectorHierarchyChange remove old children");
for (ComponentConnector child : event.getOldChildren()) {
if (child.getParent() == this) {
// Skip current children
continue;
}
getWidget().remove(child.getWidget());
VCaption vCaption = childIdToCaption.get(child.getConnectorId());
if (vCaption != null) {
childIdToCaption.remove(child.getConnectorId());
getWidget().remove(vCaption);
}
}
Profiler.leave(
"CssLayoutConnector.onConnectorHierarchyChange remove old children");
Profiler.enter(
"CssLayoutConnector.onConnectorHierarchyChange add children");
int index = 0;
for (ComponentConnector child : getChildComponents()) {
VCaption childCaption = childIdToCaption
.get(child.getConnectorId());
if (childCaption != null) {
getWidget().addOrMove(childCaption, index++);
}
getWidget().addOrMove(child.getWidget(), index++);
}
Profiler.leave(
"CssLayoutConnector.onConnectorHierarchyChange add children");
Profiler.leave("CssLayoutConnector.onConnectorHierarchyChange");
} | 3.68 |
pulsar_AbstractDispatcherSingleActiveConsumer_pickAndScheduleActiveConsumer | /**
* Pick active consumer for a topic for {@link SubType#Failover} subscription.
* If it's a non-partitioned topic then it'll pick consumer based on order they subscribe to the topic.
* If is's a partitioned topic, first sort consumers based on their priority level and consumer name then
* distributed partitions evenly across consumers with highest priority level.
*
* @return the true consumer if the consumer is changed, otherwise false.
*/
protected boolean pickAndScheduleActiveConsumer() {
checkArgument(!consumers.isEmpty());
AtomicBoolean hasPriorityConsumer = new AtomicBoolean(false);
consumers.sort((c1, c2) -> {
int priority = c1.getPriorityLevel() - c2.getPriorityLevel();
if (priority != 0) {
hasPriorityConsumer.set(true);
return priority;
}
return c1.consumerName().compareTo(c2.consumerName());
});
int consumersSize = consumers.size();
// find number of consumers which are having the highest priorities. so partitioned-topic assignment happens
// evenly across highest priority consumers
if (hasPriorityConsumer.get()) {
int highestPriorityLevel = consumers.get(0).getPriorityLevel();
for (int i = 0; i < consumers.size(); i++) {
if (highestPriorityLevel != consumers.get(i).getPriorityLevel()) {
consumersSize = i;
break;
}
}
}
int index = partitionIndex >= 0
? partitionIndex % consumersSize
: peekConsumerIndexFromHashRing(makeHashRing(consumersSize));
Consumer prevConsumer = ACTIVE_CONSUMER_UPDATER.getAndSet(this, consumers.get(index));
Consumer activeConsumer = ACTIVE_CONSUMER_UPDATER.get(this);
if (prevConsumer == activeConsumer) {
// Active consumer did not change. Do nothing at this point
return false;
} else {
// If the active consumer is changed, send notification.
scheduleReadOnActiveConsumer();
return true;
}
} | 3.68 |
incubator-hugegraph-toolchain_PropertyKeyController_delete | /**
* Should request "check_using" before delete
*/
@DeleteMapping
public void delete(@PathVariable("connId") int connId,
@RequestParam List<String> names,
@RequestParam(name = "skip_using",
defaultValue = "false")
boolean skipUsing) {
for (String name : names) {
this.service.checkExist(name, connId);
if (this.service.checkUsing(name, connId)) {
if (skipUsing) {
continue;
} else {
throw new ExternalException("schema.propertykey.in-using",
name);
}
}
this.service.remove(name, connId);
}
} | 3.68 |
hbase_RpcServer_getService | /**
* @param serviceName Some arbitrary string that represents a 'service'.
* @param services Available services and their service interfaces.
* @return BlockingService that goes with the passed <code>serviceName</code>
*/
protected static BlockingService getService(final List<BlockingServiceAndInterface> services,
final String serviceName) {
BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName);
return bsasi == null ? null : bsasi.getBlockingService();
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_bufferSize | /**
* Set the size of the buffer to be used.
*
* @param bufSize buffer size.
* @return Generics Type B.
*/
public B bufferSize(int bufSize) {
bufferSize = bufSize;
return getThisBuilder();
} | 3.68 |
shardingsphere-elasticjob_JobFacade_afterJobExecuted | /**
* Call after job executed.
*
* @param shardingContexts sharding contexts
*/
public void afterJobExecuted(final ShardingContexts shardingContexts) {
for (ElasticJobListener each : elasticJobListeners) {
each.afterJobExecuted(shardingContexts);
}
} | 3.68 |
flink_BlobServerConnection_put | /**
* Handles an incoming PUT request from a BLOB client.
*
* @param inputStream The input stream to read incoming data from
* @param outputStream The output stream to send data back to the client
* @param buf An auxiliary buffer for data serialization/deserialization
* @throws IOException thrown if an I/O error occurs while reading/writing data from/to the
* respective streams
*/
private void put(InputStream inputStream, OutputStream outputStream, byte[] buf)
throws IOException {
File incomingFile = null;
try {
// read HEADER contents: job ID, HA mode/permanent or transient BLOB
final int mode = inputStream.read();
if (mode < 0) {
throw new EOFException("Premature end of PUT request");
}
final JobID jobId;
if (mode == JOB_UNRELATED_CONTENT) {
jobId = null;
} else if (mode == JOB_RELATED_CONTENT) {
byte[] jidBytes = new byte[JobID.SIZE];
readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
jobId = JobID.fromByteArray(jidBytes);
} else {
throw new IOException("Unknown type of BLOB addressing.");
}
final BlobKey.BlobType blobType;
{
final int read = inputStream.read();
if (read < 0) {
throw new EOFException("Read an incomplete BLOB type");
} else if (read == TRANSIENT_BLOB.ordinal()) {
blobType = TRANSIENT_BLOB;
} else if (read == PERMANENT_BLOB.ordinal()) {
blobType = PERMANENT_BLOB;
checkArgument(jobId != null, "Invalid BLOB addressing for permanent BLOBs");
} else {
throw new IOException("Invalid data received for the BLOB type: " + read);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(
"Received PUT request for BLOB of job {} with from {}.",
jobId,
clientSocket.getInetAddress());
}
incomingFile = blobServer.createTemporaryFilename();
byte[] digest = readFileFully(inputStream, incomingFile, buf);
BlobKey blobKey = blobServer.moveTempFileToStore(incomingFile, jobId, digest, blobType);
// Return computed key to client for validation
outputStream.write(RETURN_OKAY);
blobKey.writeToOutputStream(outputStream);
} catch (SocketException e) {
// happens when the other side disconnects
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error("PUT operation failed", t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means not much that we could not send the
// error
// ignore this
}
clientSocket.close();
} finally {
if (incomingFile != null) {
if (!incomingFile.delete() && incomingFile.exists()) {
LOG.warn(
"Cannot delete BLOB server staging file "
+ incomingFile.getAbsolutePath());
}
}
}
} | 3.68 |
framework_Action_setCaption | /**
* Sets the caption.
*
* @param caption
* the caption to set.
*/
public void setCaption(String caption) {
this.caption = caption;
} | 3.68 |
hbase_MetricsAssignmentManager_updateRITOldestAge | /**
* update the timestamp for oldest region in transition metrics.
*/
public void updateRITOldestAge(final long timestamp) {
assignmentManagerSource.setRITOldestAge(timestamp);
} | 3.68 |
pulsar_LoadManagerShared_applyNamespacePolicies | // Determines the brokers available for the given service unit according to the given policies.
// The brokers are put into brokerCandidateCache.
public static void applyNamespacePolicies(final ServiceUnitId serviceUnit,
final SimpleResourceAllocationPolicies policies, final Set<String> brokerCandidateCache,
final Set<String> availableBrokers, final BrokerTopicLoadingPredicate brokerTopicLoadingPredicate) {
Set<String> primariesCache = localPrimariesCache.get();
primariesCache.clear();
Set<String> secondaryCache = localSecondaryCache.get();
secondaryCache.clear();
NamespaceName namespace = serviceUnit.getNamespaceObject();
boolean isIsolationPoliciesPresent = policies.areIsolationPoliciesPresent(namespace);
boolean isNonPersistentTopic = (serviceUnit instanceof NamespaceBundle)
? ((NamespaceBundle) serviceUnit).hasNonPersistentTopic() : false;
if (isIsolationPoliciesPresent) {
LOG.debug("Isolation Policies Present for namespace - [{}]", namespace.toString());
}
for (final String broker : availableBrokers) {
final String brokerUrlString = String.format("http://%s", broker);
URL brokerUrl;
try {
brokerUrl = new URL(brokerUrlString);
} catch (MalformedURLException e) {
LOG.error("Unable to parse brokerUrl from ResourceUnitId", e);
continue;
}
// todo: in future check if the resource unit has resources to take the namespace
if (isIsolationPoliciesPresent) {
// note: serviceUnitID is namespace name and ResourceID is brokerName
if (policies.isPrimaryBroker(namespace, brokerUrl.getHost())) {
primariesCache.add(broker);
if (LOG.isDebugEnabled()) {
LOG.debug("Added Primary Broker - [{}] as possible Candidates for"
+ " namespace - [{}] with policies", brokerUrl.getHost(), namespace.toString());
}
} else if (policies.isSecondaryBroker(namespace, brokerUrl.getHost())) {
secondaryCache.add(broker);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Added Shared Broker - [{}] as possible "
+ "Candidates for namespace - [{}] with policies",
brokerUrl.getHost(), namespace.toString());
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping Broker - [{}] not primary broker and not shared" + " for namespace - [{}] ",
brokerUrl.getHost(), namespace.toString());
}
}
} else {
// non-persistent topic can be assigned to only those brokers that enabled for non-persistent topic
if (isNonPersistentTopic
&& !brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerUrlString)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Filter broker- [{}] because it doesn't support non-persistent namespace - [{}]",
brokerUrl.getHost(), namespace.toString());
}
} else if (!isNonPersistentTopic
&& !brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerUrlString)) {
// persistent topic can be assigned to only brokers that enabled for persistent-topic
if (LOG.isDebugEnabled()) {
LOG.debug("Filter broker- [{}] because broker only supports non-persistent namespace - [{}]",
brokerUrl.getHost(), namespace.toString());
}
} else if (policies.isSharedBroker(brokerUrl.getHost())) {
secondaryCache.add(broker);
if (LOG.isDebugEnabled()) {
LOG.debug("Added Shared Broker - [{}] as possible Candidates for namespace - [{}]",
brokerUrl.getHost(), namespace.toString());
}
}
}
}
if (isIsolationPoliciesPresent) {
brokerCandidateCache.addAll(primariesCache);
if (policies.shouldFailoverToSecondaries(namespace, primariesCache.size())) {
LOG.debug(
"Not enough of primaries [{}] available for namespace - [{}], "
+ "adding shared [{}] as possible candidate owners",
primariesCache.size(), namespace.toString(), secondaryCache.size());
brokerCandidateCache.addAll(secondaryCache);
}
} else {
LOG.debug(
"Policies not present for namespace - [{}] so only "
+ "considering shared [{}] brokers for possible owner",
namespace.toString(), secondaryCache.size());
brokerCandidateCache.addAll(secondaryCache);
}
} | 3.68 |
hbase_SequenceIdAccounting_findLower | /**
* Iterates over the given Map and compares sequence ids with corresponding entries in
* {@link #lowestUnflushedSequenceIds}. If a region in {@link #lowestUnflushedSequenceIds} has a
* sequence id less than that passed in <code>sequenceids</code> then return it.
* @param sequenceids Sequenceids keyed by encoded region name.
* @return stores of regions found in this instance with sequence ids less than those passed in.
*/
Map<byte[], List<byte[]>> findLower(Map<byte[], Long> sequenceids) {
Map<byte[], List<byte[]>> toFlush = null;
// Keeping the old behavior of iterating unflushedSeqNums under oldestSeqNumsLock.
synchronized (tieLock) {
for (Map.Entry<byte[], Long> e : sequenceids.entrySet()) {
Map<ImmutableByteArray, Long> m = this.lowestUnflushedSequenceIds.get(e.getKey());
if (m == null) {
continue;
}
for (Map.Entry<ImmutableByteArray, Long> me : m.entrySet()) {
if (me.getValue() <= e.getValue()) {
if (toFlush == null) {
toFlush = new TreeMap(Bytes.BYTES_COMPARATOR);
}
toFlush.computeIfAbsent(e.getKey(), k -> new ArrayList<>())
.add(Bytes.toBytes(me.getKey().toString()));
}
}
}
}
return toFlush;
} | 3.68 |
flink_UnresolvedIdentifier_asSummaryString | /** Returns a string that summarizes this instance for printing to a console or log. */
public String asSummaryString() {
return Stream.of(catalogName, databaseName, objectName)
.filter(Objects::nonNull)
.map(EncodingUtils::escapeIdentifier)
.collect(Collectors.joining("."));
} | 3.68 |
flink_UpsertTestSinkBuilder_setKeySerializationSchema | /**
* Sets the key {@link SerializationSchema} that transforms incoming records to byte[].
*
* @param keySerializationSchema
* @return {@link UpsertTestSinkBuilder}
*/
public UpsertTestSinkBuilder<IN> setKeySerializationSchema(
SerializationSchema<IN> keySerializationSchema) {
this.keySerializationSchema = checkNotNull(keySerializationSchema);
return this;
} | 3.68 |
hbase_LoadBalancerFactory_getDefaultLoadBalancerClass | /**
* The default {@link LoadBalancer} class.
* @return The Class for the default {@link LoadBalancer}.
*/
public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {
return StochasticLoadBalancer.class;
} | 3.68 |
morf_XmlDataSetConsumer_close | /**
* Fired when a dataset has ended.
*
* @see org.alfasoftware.morf.dataset.DataSetConsumer#close(org.alfasoftware.morf.dataset.DataSetConsumer.CloseState)
*/
@Override
public void close(CloseState closeState) {
xmlStreamProvider.close();
} | 3.68 |
flink_AbstractPagedOutputView_clear | /**
* Clears the internal state. Any successive write calls will fail until either {@link
* #advance()} or {@link #seekOutput(MemorySegment, int)} is called.
*
* @see #advance()
* @see #seekOutput(MemorySegment, int)
*/
protected void clear() {
this.currentSegment = null;
this.positionInSegment = this.headerLength;
} | 3.68 |
Activiti_CollectionUtil_mapOfClass | /**
* Helper method to easily create a map with keys of type String and values of a given Class. Null values are allowed.
*
* @param clazz the target Value class
* @param objects varargs containing the key1, value1, key2, value2, etc. Note: although an Object, we will cast the key to String internally
* @throws ActivitiIllegalArgumentException when objects are not even or key/value are not expected types
*/
public static <T> Map<String, T> mapOfClass(Class<T> clazz, Object... objects) {
if (objects.length % 2 != 0) {
throw new ActivitiIllegalArgumentException("The input should always be even since we expect a list of key-value pairs!");
}
Map<String, T> map = new HashMap();
for (int i = 0; i < objects.length; i += 2) {
int keyIndex = i;
int valueIndex = i + 1;
Object key = objects[keyIndex];
Object value = objects[valueIndex];
if (!String.class.isInstance(key)) {
throw new ActivitiIllegalArgumentException("key at index " + keyIndex + " should be a String but is a " + key.getClass());
}
if (value != null && !clazz.isInstance(value)) {
throw new ActivitiIllegalArgumentException("value at index " + valueIndex + " should be a " + clazz + " but is a " + value.getClass());
}
map.put((String) key, (T) value);
}
return map;
} | 3.68 |
querydsl_BeanMap_put | /**
* Sets the bean property with the given name to the given value.
*
* @param name the name of the property to set
* @param value the value to set that property to
* @return the previous value of that property
*/
@Override
public Object put(String name, Object value) {
if (bean != null) {
Object oldValue = get(name);
Method method = getWriteMethod(name);
if (method == null) {
throw new IllegalArgumentException("The bean of type: " + bean.getClass().getName() + " has no property called: " + name);
}
try {
Object[] arguments = createWriteMethodArguments(method, value);
method.invoke(bean, arguments);
Object newValue = get(name);
firePropertyChange(name, oldValue, newValue);
} catch (InvocationTargetException | IllegalAccessException e) {
throw new IllegalArgumentException(e.getMessage());
}
return oldValue;
}
return null;
} | 3.68 |
hadoop_AllocateResponse_getUpdateErrors | /**
* Get the list of container update errors to inform the
* Application Master about the container updates that could not be
* satisfied due to error.
*
* @return List of Update Container Errors.
*/
@Public
@Unstable
public List<UpdateContainerError> getUpdateErrors() {
return new ArrayList<>();
} | 3.68 |
framework_AbstractComponentConnector_sendContextClickEvent | /**
* This method sends the context menu event to the server-side. Can be
* overridden to provide extra information through an alternative RPC
* interface.
*
* @since 7.6
* @param details
* the mouse event details
* @param eventTarget
* the target of the event
*/
protected void sendContextClickEvent(MouseEventDetails details,
EventTarget eventTarget) {
// The default context click implementation only provides the mouse
// coordinates relative to root element of widget.
getRpcProxy(ContextClickRpc.class).contextClick(details);
WidgetUtil.clearTextSelection();
} | 3.68 |
framework_Potus_getFirstName | /**
* @return the firstName
*/
public String getFirstName() {
return firstName;
} | 3.68 |
hbase_MetaTableAccessor_getRegion | /**
* Gets the region info and assignment for the specified region.
* @param connection connection we're using
* @param regionName Region to lookup.
* @return Location and RegionInfo for <code>regionName</code>
* @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
*/
@Deprecated
public static Pair<RegionInfo, ServerName> getRegion(Connection connection, byte[] regionName)
throws IOException {
HRegionLocation location = getRegionLocation(connection, regionName);
return location == null ? null : new Pair<>(location.getRegion(), location.getServerName());
} | 3.68 |
morf_NamedParameterPreparedStatement_setFetchSize | /**
* @param rows the number of rows to fetch
* @see PreparedStatement#setFetchSize(int)
* @exception SQLException if a database access error occurs,
* this method is called on a closed <code>Statement</code> or the
* condition {@code rows >= 0} is not satisfied.
*/
public void setFetchSize(int rows) throws SQLException {
statement.setFetchSize(rows);
} | 3.68 |
hadoop_Anonymizer_main | /**
* The main driver program to use the anonymization utility.
* @param args
*/
public static void main(String[] args) {
Anonymizer instance = new Anonymizer();
int result = 0;
try {
result = ToolRunner.run(instance, args);
} catch (Exception e) {
e.printStackTrace(System.err);
System.exit(-1);
}
if (result != 0) {
System.exit(result);
}
return;
} | 3.68 |
flink_Execution_sendUpdatePartitionInfoRpcCall | /**
* Update the partition infos on the assigned resource.
*
* @param partitionInfos for the remote task
*/
private void sendUpdatePartitionInfoRpcCall(final Iterable<PartitionInfo> partitionInfos) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation();
CompletableFuture<Acknowledge> updatePartitionsResultFuture =
taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout);
updatePartitionsResultFuture.whenCompleteAsync(
(ack, failure) -> {
// fail if there was a failure
if (failure != null) {
fail(
new IllegalStateException(
"Update to task ["
+ getVertexWithAttempt()
+ "] on TaskManager "
+ taskManagerLocation
+ " failed",
failure));
}
},
getVertex().getExecutionGraphAccessor().getJobMasterMainThreadExecutor());
}
} | 3.68 |
framework_Table_getColumnIcons | /**
* Gets the icons of the columns.
*
* <p>
* The icons in headers match the property id:s given by the set visible
* column headers. The table must be set in either
* {@link #COLUMN_HEADER_MODE_EXPLICIT} or
* {@link #COLUMN_HEADER_MODE_EXPLICIT_DEFAULTS_ID} mode to show the headers
* with icons.
* </p>
*
* @return the Array of icons that match the {@link #getVisibleColumns()}.
*/
public Resource[] getColumnIcons() {
if (columnIcons == null) {
return null;
}
final Resource[] icons = new Resource[visibleColumns.size()];
int i = 0;
for (final Object column : visibleColumns) {
icons[i++] = columnIcons.get(column);
}
return icons;
} | 3.68 |
hbase_HBackupFileSystem_getManifestPath | // TODO we do not keep WAL files anymore
// Move manifest file to other place
private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
throws IOException {
FileSystem fs = backupRootPath.getFileSystem(conf);
Path manifestPath = new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
+ BackupManifest.MANIFEST_FILE_NAME);
if (!fs.exists(manifestPath)) {
String errorMsg = "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME
+ " for " + backupId + ". File " + manifestPath + " does not exists. Did " + backupId
+ " correspond to previously taken backup ?";
throw new IOException(errorMsg);
}
return manifestPath;
} | 3.68 |
querydsl_Expressions_listPath | /**
* Create a new Path expression
*
* @param type element type
* @param queryType element expression type
* @param metadata path metadata
* @param <E> element type
* @param <Q> element expression type
* @return path expression
*/
public static <E, Q extends SimpleExpression<? super E>> ListPath<E, Q> listPath(Class<E> type, Class<Q> queryType, PathMetadata metadata) {
return new ListPath<E, Q>(type, queryType, metadata);
} | 3.68 |
hbase_AdaptiveLifoCoDelCallQueue_offer | // Generic BlockingQueue methods we support
@Override
public boolean offer(CallRunner callRunner) {
return queue.offer(callRunner);
} | 3.68 |
hbase_TableRecordReader_getCurrentValue | /**
* Returns the current value.
* @return The current value.
* @throws IOException When the value is faulty.
* @throws InterruptedException When the job is aborted.
* @see org.apache.hadoop.mapreduce.RecordReader#getCurrentValue()
*/
@Override
public Result getCurrentValue() throws IOException, InterruptedException {
return this.recordReaderImpl.getCurrentValue();
} | 3.68 |
pulsar_ManagedLedgerStorage_create | /**
* Initialize the {@link ManagedLedgerStorage} from the provided resources.
*
* @param conf service config
* @param bkProvider bookkeeper client provider
* @return the initialized managed ledger storage.
*/
static ManagedLedgerStorage create(ServiceConfiguration conf,
MetadataStoreExtended metadataStore,
BookKeeperClientFactory bkProvider,
EventLoopGroup eventLoopGroup) throws Exception {
ManagedLedgerStorage storage =
Reflections.createInstance(conf.getManagedLedgerStorageClassName(), ManagedLedgerStorage.class,
Thread.currentThread().getContextClassLoader());
storage.initialize(conf, metadataStore, bkProvider, eventLoopGroup);
return storage;
} | 3.68 |
hadoop_ValidateRenamedFilesStage_getFilesCommitted | /**
* Get the list of files committed.
* @return a possibly empty list.
*/
private synchronized List<FileEntry> getFilesCommitted() {
return filesCommitted;
} | 3.68 |
graphhopper_ArrayUtil_removeConsecutiveDuplicates | /**
* Removes all duplicate elements of the given array in the range [0, end[ in place
*
* @return the size of the new range that contains no duplicates (smaller or equal to end).
*/
public static int removeConsecutiveDuplicates(int[] arr, int end) {
int curr = 0;
for (int i = 1; i < end; ++i) {
if (arr[i] != arr[curr])
arr[++curr] = arr[i];
}
return curr + 1;
} | 3.68 |
hudi_AbstractTableFileSystemView_convertFileStatusesToLogFiles | /**
* Helper to convert file-status to log-files.
*
* @param statuses List of File-Status
*/
private Stream<HoodieLogFile> convertFileStatusesToLogFiles(FileStatus[] statuses) {
Predicate<FileStatus> rtFilePredicate = fileStatus -> {
String fileName = fileStatus.getPath().getName();
Matcher matcher = FSUtils.LOG_FILE_PATTERN.matcher(fileName);
return matcher.find() && fileName.contains(metaClient.getTableConfig().getLogFileFormat().getFileExtension());
};
return Arrays.stream(statuses).filter(rtFilePredicate).map(HoodieLogFile::new);
} | 3.68 |
framework_VCalendar_setReadOnly | /**
* Is the component read-only.
*
* @param readOnly
* True if component is readonly
*/
public void setReadOnly(boolean readOnly) {
this.readOnly = readOnly;
} | 3.68 |
hbase_HDFSBlocksDistribution_getBlockLocalityIndexForSsd | /**
* Get the block locality index for a ssd for a given host
* @param host the host name
* @return the locality index with ssd of the given host
*/
public float getBlockLocalityIndexForSsd(String host) {
if (uniqueBlocksTotalWeight == 0) {
return 0.0f;
} else {
return (float) getBlocksLocalityWeightInternal(host, HostAndWeight::getWeightForSsd)
/ (float) uniqueBlocksTotalWeight;
}
} | 3.68 |
framework_VComboBox_startWaitingForFilteringResponse | /**
* Set a flag that filtering of options is pending a response from the
* server.
*/
private void startWaitingForFilteringResponse() {
waitingForFilteringResponse = true;
} | 3.68 |
graphhopper_ReaderRelation_getRef | /**
* member reference which is an OSM ID
*/
public long getRef() {
return ref;
} | 3.68 |
framework_LegacyCommunicationManager_cache | /**
*
* @param object
* @return true if the given class was added to cache
*/
public boolean cache(Object object) {
return res.add(object);
} | 3.68 |
flink_DoubleParser_parseField | /**
* Static utility to parse a field of type double from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final double parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if (limitedLen > 0
&& (Character.isWhitespace(bytes[startPos])
|| Character.isWhitespace(bytes[startPos + limitedLen - 1]))) {
throw new NumberFormatException(
"There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Double.parseDouble(str);
} | 3.68 |
rocketmq-connect_ParsedSchema_validate | /**
* validate data
*/
default void validate() {
} | 3.68 |
hadoop_RequestFactoryImpl_getContentEncoding | /**
* Get the content encoding (e.g. gzip) or return null if none.
* @return content encoding
*/
@Override
public String getContentEncoding() {
return contentEncoding;
} | 3.68 |
flink_SourceCoordinatorContext_onCheckpointComplete | /**
* Invoked when a successful checkpoint has been taken.
*
* @param checkpointId the id of the successful checkpoint.
*/
void onCheckpointComplete(long checkpointId) {
assignmentTracker.onCheckpointComplete(checkpointId);
} | 3.68 |
framework_VDateField_setDefaultDate | /**
* Set the default date using a map with date values.
*
* @see #setCurrentDate(Map)
* @param defaultValues
* a map from resolutions to date values
* @since 8.1.2
*/
public void setDefaultDate(Map<R, Integer> defaultValues) {
setDefaultDate(getDate(defaultValues));
} | 3.68 |
zxing_RSSExpandedReader_isValidSequence | // Whether the pairs form a valid finder pattern sequence, either complete or a prefix
private static boolean isValidSequence(List<ExpandedPair> pairs, boolean complete) {
for (int[] sequence : FINDER_PATTERN_SEQUENCES) {
boolean sizeOk = (complete ? pairs.size() == sequence.length : pairs.size() <= sequence.length);
if (sizeOk) {
boolean stop = true;
for (int j = 0; j < pairs.size(); j++) {
if (pairs.get(j).getFinderPattern().getValue() != sequence[j]) {
stop = false;
break;
}
}
if (stop) {
return true;
}
}
}
return false;
} | 3.68 |
graphhopper_ViaRouting_lookup | /**
* @throws MultiplePointsNotFoundException in case one or more points could not be resolved
*/
public static List<Snap> lookup(EncodedValueLookup lookup, List<GHPoint> points, EdgeFilter snapFilter,
LocationIndex locationIndex, List<String> snapPreventions, List<String> pointHints,
DirectedEdgeFilter directedSnapFilter, List<Double> headings) {
if (points.size() < 2)
throw new IllegalArgumentException("At least 2 points have to be specified, but was:" + points.size());
final EnumEncodedValue<RoadClass> roadClassEnc = lookup.getEnumEncodedValue(RoadClass.KEY, RoadClass.class);
final EnumEncodedValue<RoadEnvironment> roadEnvEnc = lookup.getEnumEncodedValue(RoadEnvironment.KEY, RoadEnvironment.class);
EdgeFilter strictEdgeFilter = snapPreventions.isEmpty()
? snapFilter
: new SnapPreventionEdgeFilter(snapFilter, roadClassEnc, roadEnvEnc, snapPreventions);
List<Snap> snaps = new ArrayList<>(points.size());
IntArrayList pointsNotFound = new IntArrayList();
for (int placeIndex = 0; placeIndex < points.size(); placeIndex++) {
GHPoint point = points.get(placeIndex);
Snap snap = null;
if (placeIndex < headings.size() && !Double.isNaN(headings.get(placeIndex))) {
if (!pointHints.isEmpty() && !Helper.isEmpty(pointHints.get(placeIndex)))
throw new IllegalArgumentException("Cannot specify heading and point_hint at the same time. " +
"Make sure you specify either an empty point_hint (String) or a NaN heading (double) for point " + placeIndex);
snap = locationIndex.findClosest(point.lat, point.lon, new HeadingEdgeFilter(directedSnapFilter, headings.get(placeIndex), point));
} else if (!pointHints.isEmpty()) {
snap = locationIndex.findClosest(point.lat, point.lon, new NameSimilarityEdgeFilter(strictEdgeFilter,
pointHints.get(placeIndex), point, 170));
} else if (!snapPreventions.isEmpty()) {
snap = locationIndex.findClosest(point.lat, point.lon, strictEdgeFilter);
}
if (snap == null || !snap.isValid())
snap = locationIndex.findClosest(point.lat, point.lon, snapFilter);
if (!snap.isValid())
pointsNotFound.add(placeIndex);
snaps.add(snap);
}
if (!pointsNotFound.isEmpty())
throw new MultiplePointsNotFoundException(pointsNotFound);
return snaps;
} | 3.68 |
flink_Costs_addHeuristicNetworkCost | /**
* Adds the heuristic costs for network to the current heuristic network costs for this Costs
* object.
*
* @param cost The heuristic network cost to add.
*/
public void addHeuristicNetworkCost(double cost) {
if (cost <= 0) {
throw new IllegalArgumentException("Heuristic costs must be positive.");
}
this.heuristicNetworkCost += cost;
// check for overflow
if (this.heuristicNetworkCost < 0) {
this.heuristicNetworkCost = Double.MAX_VALUE;
}
} | 3.68 |
flink_LimitedConnectionsFileSystem_getMaxNumOpenStreamsTotal | /** Gets the maximum number of concurrently open streams (input + output). */
public int getMaxNumOpenStreamsTotal() {
return maxNumOpenStreamsTotal;
} | 3.68 |
dubbo_RegistryDirectory_toInvokers | /**
* Turn urls into invokers, and if url has been referred, will not re-reference.
* the items that will be put into newUrlInvokeMap will be removed from oldUrlInvokerMap.
*
* @param oldUrlInvokerMap it might be modified during the process.
* @param urls
* @return invokers
*/
private Map<URL, Invoker<T>> toInvokers(Map<URL, Invoker<T>> oldUrlInvokerMap, List<URL> urls) {
Map<URL, Invoker<T>> newUrlInvokerMap =
new ConcurrentHashMap<>(urls == null ? 1 : (int) (urls.size() / 0.75f + 1));
if (urls == null || urls.isEmpty()) {
return newUrlInvokerMap;
}
String queryProtocols = this.queryMap.get(PROTOCOL_KEY);
for (URL providerUrl : urls) {
if (!checkProtocolValid(queryProtocols, providerUrl)) {
continue;
}
URL url = mergeUrl(providerUrl);
// Cache key is url that does not merge with consumer side parameters,
// regardless of how the consumer combines parameters,
// if the server url changes, then refer again
Invoker<T> invoker = oldUrlInvokerMap == null ? null : oldUrlInvokerMap.remove(url);
if (invoker == null) { // Not in the cache, refer again
try {
boolean enabled = true;
if (url.hasParameter(DISABLED_KEY)) {
enabled = !url.getParameter(DISABLED_KEY, false);
} else {
enabled = url.getParameter(ENABLED_KEY, true);
}
if (enabled) {
invoker = protocol.refer(serviceType, url);
}
} catch (Throwable t) {
// Thrown by AbstractProtocol.optimizeSerialization()
if (t instanceof RpcException && t.getMessage().contains("serialization optimizer")) {
// 4-2 - serialization optimizer class initialization failed.
logger.error(
PROTOCOL_FAILED_INIT_SERIALIZATION_OPTIMIZER,
"typo in optimizer class",
"",
"Failed to refer invoker for interface:" + serviceType + ",url:(" + url + ")"
+ t.getMessage(),
t);
} else {
// 4-3 - Failed to refer invoker by other reason.
logger.error(
PROTOCOL_FAILED_REFER_INVOKER,
"",
"",
"Failed to refer invoker for interface:" + serviceType + ",url:(" + url + ")"
+ t.getMessage(),
t);
}
}
if (invoker != null) { // Put new invoker in cache
newUrlInvokerMap.put(url, invoker);
}
} else {
newUrlInvokerMap.put(url, invoker);
}
}
return newUrlInvokerMap;
} | 3.68 |
flink_SqlFunctionUtils_byteArrayCompare | /**
* Compares two byte arrays in lexicographical order.
*
* <p>The result is positive if {@code array1} is great than {@code array2}, negative if {@code
* array1} is less than {@code array2} and 0 if {@code array1} is equal to {@code array2}.
*
* <p>Note: Currently, this is used in {@code ScalarOperatorGens} for comparing two fields of
* binary or varbinary type.
*
* @param array1 byte array to compare.
* @param array2 byte array to compare.
* @return an Integer indicating which one is bigger
*/
public static int byteArrayCompare(byte[] array1, byte[] array2) {
for (int i = 0, j = 0; i < array1.length && j < array2.length; i++, j++) {
int a = (array1[i] & 0xff);
int b = (array2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return array1.length - array2.length;
} | 3.68 |
morf_AddTable_apply | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
* @return {@link Schema} with new table added.
*/
@Override
public Schema apply(Schema schema) {
return new AugmentedSchema(schema, newTable);
} | 3.68 |
hudi_MetadataConversionUtils_convertCommitMetadata | /**
* Convert commit metadata from json to avro.
*/
public static <T extends SpecificRecordBase> T convertCommitMetadata(HoodieCommitMetadata hoodieCommitMetadata) {
if (hoodieCommitMetadata instanceof HoodieReplaceCommitMetadata) {
return (T) convertReplaceCommitMetadata((HoodieReplaceCommitMetadata) hoodieCommitMetadata);
}
hoodieCommitMetadata.getPartitionToWriteStats().remove(null);
org.apache.hudi.avro.model.HoodieCommitMetadata avroMetaData = JsonUtils.getObjectMapper().convertValue(hoodieCommitMetadata, org.apache.hudi.avro.model.HoodieCommitMetadata.class);
if (hoodieCommitMetadata.getCompacted()) {
avroMetaData.setOperationType(WriteOperationType.COMPACT.name());
}
return (T) avroMetaData;
} | 3.68 |
hbase_ProcedureExecutor_registerListener | // ==========================================================================
// Listeners helpers
// ==========================================================================
public void registerListener(ProcedureExecutorListener listener) {
this.listeners.add(listener);
} | 3.68 |
hadoop_AMRMProxyService_getRootInterceptor | /**
* Gets the root request interceptor.
*
* @return the root request interceptor
*/
public synchronized RequestInterceptor getRootInterceptor() {
return rootInterceptor;
} | 3.68 |
framework_Label_getDataSourceValue | /**
* Returns the current value of the data source converted using the current
* locale.
*
* @return
*/
private String getDataSourceValue() {
return ConverterUtil.convertFromModel(
getPropertyDataSource().getValue(), String.class,
getConverter(), getLocale());
} | 3.68 |
hbase_RegionServerSnapshotManager_buildSubprocedure | /**
* If in a running state, creates the specified subprocedure for handling an online snapshot.
* Because this gets the local list of regions to snapshot and not the set the master had, there
* is a possibility of a race where regions may be missed. This detected by the master in the
* snapshot verification step.
* @return Subprocedure to submit to the ProcedureMember.
*/
public Subprocedure buildSubprocedure(SnapshotDescription snapshot) {
// don't run a snapshot if the parent is stop(ping)
if (rss.isStopping() || rss.isStopped()) {
throw new IllegalStateException(
"Can't start snapshot on RS: " + rss.getServerName() + ", because stopping/stopped!");
}
// check to see if this server is hosting any regions for the snapshots
// check to see if we have regions for the snapshot
List<HRegion> involvedRegions;
try {
involvedRegions = getRegionsToSnapshot(snapshot);
} catch (IOException e1) {
throw new IllegalStateException("Failed to figure out if we should handle a snapshot - "
+ "something has gone awry with the online regions.", e1);
}
// We need to run the subprocedure even if we have no relevant regions. The coordinator
// expects participation in the procedure and without sending message the snapshot attempt
// will hang and fail.
LOG.debug("Launching subprocedure for snapshot " + snapshot.getName() + " from table "
+ snapshot.getTable() + " type " + snapshot.getType());
ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(snapshot.getName());
Configuration conf = rss.getConfiguration();
long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
long wakeMillis =
conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT);
switch (snapshot.getType()) {
case FLUSH:
SnapshotSubprocedurePool taskManager =
new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss);
return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis,
involvedRegions, snapshot, taskManager);
case SKIPFLUSH:
/*
* This is to take an online-snapshot without force a coordinated flush to prevent pause The
* snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure
* should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be
* turned on/off based on the flush type. To minimized the code change, class name is not
* changed.
*/
SnapshotSubprocedurePool taskManager2 =
new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss);
return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis,
involvedRegions, snapshot, taskManager2);
default:
throw new UnsupportedOperationException("Unrecognized snapshot type:" + snapshot.getType());
}
} | 3.68 |
hbase_SimpleRegionNormalizer_getMergeMinRegionAge | /**
* Return this instance's configured value for {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}.
*/
public Period getMergeMinRegionAge() {
return normalizerConfiguration.getMergeMinRegionAge();
} | 3.68 |
druid_Lexer_mark | //出现多次调用mark()后调用reset()会有问题
@Deprecated
public SavePoint mark() {
return this.savePoint = markOut();
} | 3.68 |
hudi_SparkPreCommitValidator_publishRunStats | /**
* Publish pre-commit validator run stats for a given commit action.
*/
private void publishRunStats(String instantTime, long duration) {
// Record validator duration metrics.
if (getWriteConfig().isMetricsOn()) {
HoodieTableMetaClient metaClient = getHoodieTable().getMetaClient();
Option<HoodieInstant> currentInstant = metaClient.getActiveTimeline()
.findInstantsAfterOrEquals(instantTime, 1)
.firstInstant();
metrics.reportMetrics(currentInstant.get().getAction(), getClass().getSimpleName(), duration);
}
} | 3.68 |
hbase_MasterQuotaManager_removeTableFromNamespaceQuota | /**
* Remove table from namespace quota.
* @param tName - The table name to update quota usage.
* @throws IOException Signals that an I/O exception has occurred.
*/
public void removeTableFromNamespaceQuota(TableName tName) throws IOException {
if (initialized) {
namespaceQuotaManager.removeFromNamespaceUsage(tName);
}
} | 3.68 |
flink_Tuple19_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>
copy() {
return new Tuple19<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17, this.f18);
} | 3.68 |
hbase_RemoteProcedureException_serialize | /**
* Converts a RemoteProcedureException to an array of bytes.
* @param source the name of the external exception source
* @param t the "local" external exception (local)
* @return protobuf serialized version of RemoteProcedureException
*/
public static byte[] serialize(String source, Throwable t) {
return toProto(source, t).toByteArray();
} | 3.68 |
hudi_BaseAvroPayload_isDeleted | /**
* Defines whether this implementation of {@link HoodieRecordPayload} is deleted.
* We will not do deserialization in this method.
*/
public boolean isDeleted(Schema schema, Properties props) {
return isDeletedRecord;
} | 3.68 |
hadoop_GlobPattern_hasWildcard | /**
* @return true if this is a wildcard pattern (with special chars)
*/
public boolean hasWildcard() {
return hasWildcard;
} | 3.68 |
framework_Color_getCSS | /**
* Returns CSS representation of the Color, e.g. #000000.
*/
public String getCSS() {
String redString = Integer.toHexString(red);
redString = redString.length() < 2 ? "0" + redString : redString;
String greenString = Integer.toHexString(green);
greenString = greenString.length() < 2 ? "0" + greenString
: greenString;
String blueString = Integer.toHexString(blue);
blueString = blueString.length() < 2 ? "0" + blueString : blueString;
return "#" + redString + greenString + blueString;
} | 3.68 |
framework_VTree_getNavigationPageDownKey | /**
* Get the key the moves the selection one page down in the table. By
* default this is the Page Down key but by overriding this you can change
* the key to whatever you want.
*
* @return
*/
protected int getNavigationPageDownKey() {
return KeyCodes.KEY_PAGEDOWN;
} | 3.68 |
pulsar_ClientCnxIdleState_isUsing | /**
* @return Whether this connection is in use.
*/
public boolean isUsing() {
return getIdleStat() == State.USING;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.