name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_OrderedBytes_putUint32 | /**
* Write a 32-bit unsigned integer to {@code dst} as 4 big-endian bytes.
* @return number of bytes written.
*/
private static int putUint32(PositionedByteRange dst, int val) {
dst.put((byte) (val >>> 24)).put((byte) (val >>> 16)).put((byte) (val >>> 8)).put((byte) val);
return 4;
} | 3.68 |
flink_FileCatalogStore_getCatalog | /**
* Returns the catalog descriptor for the specified catalog, if it exists in the catalog store.
*
* @param catalogName the name of the catalog to retrieve
* @return an {@link Optional} containing the catalog descriptor, or an empty {@link Optional}
* if the catalog does not exist in the catalog store
* @throws CatalogException if the catalog store is not open or if there is an error retrieving
* the catalog
*/
@Override
public Optional<CatalogDescriptor> getCatalog(String catalogName) throws CatalogException {
checkOpenState();
Path catalogPath = getCatalogPath(catalogName);
try {
FileSystem fs = catalogPath.getFileSystem();
if (!fs.exists(catalogPath)) {
return Optional.empty();
}
try (FSDataInputStream is = fs.open(catalogPath)) {
Map<String, String> configMap =
YAML_MAPPER.readValue(is, new TypeReference<Map<String, String>>() {});
CatalogDescriptor catalog =
CatalogDescriptor.of(catalogName, Configuration.fromMap(configMap));
return Optional.of(catalog);
}
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed to load catalog %s's configuration from file.", catalogName),
e);
}
} | 3.68 |
morf_DataValueLookupBuilderImpl_grow | /**
* Grow the array to accommodate new values. Based on {@link ArrayList}, but
* without the overflow protection. We've got bigger problems if records get
* that big.
*/
private void grow() {
int size = metadata.getColumnNames().size();
data = Arrays.copyOf(data, size + (size >> 1));
} | 3.68 |
framework_ConnectorTracker_removeUnregisteredConnectors | /**
* Removes all references and information about connectors marked as
* unregistered.
*
*/
private void removeUnregisteredConnectors() {
GlobalResourceHandler globalResourceHandler = uI.getSession()
.getGlobalResourceHandler(false);
for (ClientConnector connector : unregisteredConnectors) {
removeUnregisteredConnector(connector, globalResourceHandler);
}
unregisteredConnectors.clear();
} | 3.68 |
flink_PythonEnvUtils_resetCallbackClient | /**
* Reset the callback client of gatewayServer with the given callbackListeningAddress and
* callbackListeningPort after the callback server started.
*
* @param callbackServerListeningAddress the listening address of the callback server.
* @param callbackServerListeningPort the listening port of the callback server.
*/
public static void resetCallbackClient(
GatewayServer gatewayServer,
String callbackServerListeningAddress,
int callbackServerListeningPort)
throws UnknownHostException, InvocationTargetException, NoSuchMethodException,
IllegalAccessException, NoSuchFieldException {
gatewayServer.resetCallbackClient(
InetAddress.getByName(callbackServerListeningAddress), callbackServerListeningPort);
resetCallbackClientExecutorService(gatewayServer);
} | 3.68 |
framework_ElementResizeEvent_getLayoutManager | /**
* Returns the current layout manager.
*
* @return the layout manager
*/
public LayoutManager getLayoutManager() {
return layoutManager;
} | 3.68 |
AreaShop_GeneralRegion_getDepth | /**
* Get the depth of the region (z-axis).
* @return The depth of the region (z-axis)
*/
@Override
public int getDepth() {
if(getRegion() == null) {
return 0;
}
return getMaximumPoint().getBlockZ() - getMinimumPoint().getBlockZ() + 1;
} | 3.68 |
druid_DruidDataSourceStatLoggerImpl_configFromProperties | /**
* @since 0.2.21
*/
@Override
public void configFromProperties(Properties properties) {
if (properties == null) {
return;
}
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
} | 3.68 |
framework_SelectorPath_getNameFromPredicates | /**
* Get variable name based on predicates. Fallback to elementType
*
* @param predicates
* Predicates related to element
* @param elementType
* Element type
* @return name for Variable
*/
private String getNameFromPredicates(List<SelectorPredicate> predicates,
String elementType) {
String name = elementType;
for (SelectorPredicate p : predicates) {
if ("caption".equals(p.getName())) {
// Caption + elementType is a suitable name
name = p.getValue() + elementType;
} else if ("id".equals(p.getName())) {
// Just id. This is unique, use it.
return p.getValue();
}
}
return name;
} | 3.68 |
framework_Calendar_expandEndDate | /**
* Finds the last day of the week and returns a day representing the end of
* that day.
*
* @param end
* The actual date
* @param expandToFullWeek
* Should the returned date be moved to the end of the week
* @return If expandToFullWeek is set then it returns the last day of the
* week, else it returns a clone of the actual date with the time
* set to the end of the day
*/
protected Date expandEndDate(Date end, boolean expandToFullWeek) {
// If the duration is more than week, use monthly view and get startweek
// and endweek. Example if views daterange is from tuesday to next weeks
// wednesday->expand to monday to nextweeks sunday. If firstdayofweek =
// monday
if (expandToFullWeek) {
end = getLastDateForWeek(end);
} else {
end = (Date) end.clone();
}
// Always expand to the start of the first day to the end of the last
// day
end = getEndOfDay(currentCalendar, end);
return end;
} | 3.68 |
streampipes_AbstractProcessingElementBuilder_unaryMappingPropertyWithoutRequirement | /**
* Adds a new {@link org.apache.streampipes.model.staticproperty.MappingPropertyUnary}
* to the pipeline element definition which is not linked to a specific input property.
*
* @param label A human-readable label that is displayed to users in the StreamPipes UI.
* @param propertyScope Only input event properties that match the
* {@link org.apache.streampipes.model.schema.PropertyScope} are displayed.
* @return this
* Use this method if you want to present users a single-value selection of all available input
* event properties.
*/
public K unaryMappingPropertyWithoutRequirement(Label label, PropertyScope propertyScope) {
MappingPropertyUnary mp = new MappingPropertyUnary(label.getInternalId(), label.getLabel(), label.getDescription());
mp.setPropertyScope(propertyScope.name());
this.staticProperties.add(mp);
return me();
} | 3.68 |
hbase_HRegion_setTimeoutForWriteLock | /**
* The {@link HRegion#doClose} will block forever if someone tries proving the dead lock via the
* unit test. Instead of blocking, the {@link HRegion#doClose} will throw exception if you set the
* timeout.
* @param timeoutForWriteLock the second time to wait for the write lock in
* {@link HRegion#doClose}
*/
public void setTimeoutForWriteLock(long timeoutForWriteLock) {
assert timeoutForWriteLock >= 0;
this.timeoutForWriteLock = timeoutForWriteLock;
} | 3.68 |
hbase_MetricRegistriesLoader_load | /**
* Creates a {@link MetricRegistries} instance using the corresponding {@link MetricRegistries}
* available to {@link ServiceLoader} on the classpath. If no instance is found, then default
* implementation will be loaded.
* @return A {@link MetricRegistries} implementation.
*/
static MetricRegistries load(List<MetricRegistries> availableImplementations) {
if (availableImplementations.size() == 1) {
// One and only one instance -- what we want/expect
MetricRegistries impl = availableImplementations.get(0);
LOG.info("Loaded MetricRegistries " + impl.getClass());
return impl;
} else if (availableImplementations.isEmpty()) {
try {
return ReflectionUtils.newInstance((Class<MetricRegistries>) Class.forName(defaultClass));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
} else {
// Tell the user they're doing something wrong, and choose the first impl.
StringBuilder sb = new StringBuilder();
for (MetricRegistries factory : availableImplementations) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(factory.getClass());
}
LOG.warn("Found multiple MetricRegistries implementations: " + sb
+ ". Using first found implementation: " + availableImplementations.get(0));
return availableImplementations.get(0);
}
} | 3.68 |
hbase_MetricsConnection_getMetricScope | /** scope of the metrics object */
public String getMetricScope() {
return scope;
} | 3.68 |
hadoop_ParsedTaskAttempt_putDiagnosticInfo | /** Set the task attempt diagnostic-info */
public void putDiagnosticInfo(String msg) {
diagnosticInfo = msg;
} | 3.68 |
framework_AbstractClientConnector_getErrorHandler | /*
* (non-Javadoc)
*
* @see com.vaadin.server.ClientConnector#getErrorHandler()
*/
@Override
public ErrorHandler getErrorHandler() {
return errorHandler;
} | 3.68 |
framework_CheckBoxGroupElement_setValue | /**
* Sets the selected options for this checkbox group.
*
* @param options
* the list of options to select
*
* @see #getValue()
* @see #setValue(String...)
*/
public void setValue(List<String> options) {
// Deselect everything that is not going to be selected again.
getValue().stream().filter(option -> !options.contains(option))
.forEach(this::selectByText);
// Select everything that still needs selecting.
List<String> selection = getValue();
options.stream().filter(option -> !selection.contains(option))
.forEach(this::selectByText);
} | 3.68 |
hbase_ShadedAccessControlUtil_toPermissionAction | /**
* Convert a Permission.Action shaded proto to a client Permission.Action object.
*/
public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) {
switch (action) {
case READ:
return Permission.Action.READ;
case WRITE:
return Permission.Action.WRITE;
case EXEC:
return Permission.Action.EXEC;
case CREATE:
return Permission.Action.CREATE;
case ADMIN:
return Permission.Action.ADMIN;
}
throw new IllegalArgumentException("Unknown action value " + action.name());
} | 3.68 |
dubbo_ReflectUtils_makeAccessible | /**
* Copy from org.springframework.util.ReflectionUtils.
* Make the given method accessible, explicitly setting it accessible if
* necessary. The {@code setAccessible(true)} method is only called
* when actually necessary, to avoid unnecessary conflicts with a JVM
* SecurityManager (if active).
* @param method the method to make accessible
* @see java.lang.reflect.Method#setAccessible
*/
@SuppressWarnings("deprecation") // on JDK 9
public static void makeAccessible(Method method) {
if ((!Modifier.isPublic(method.getModifiers())
|| !Modifier.isPublic(method.getDeclaringClass().getModifiers()))
&& !method.isAccessible()) {
method.setAccessible(true);
}
} | 3.68 |
framework_AbstractJunctionFilter_getFilters | /**
* Returns an unmodifiable collection of the sub-filters of this composite
* filter.
*
* @return
*/
public Collection<Filter> getFilters() {
return filters;
} | 3.68 |
dubbo_ServiceBeanExportedEvent_getServiceBean | /**
* Get {@link ServiceBean} instance
*
* @return non-null
*/
public ServiceBean getServiceBean() {
return (ServiceBean) super.getSource();
} | 3.68 |
zxing_CameraConfigurationManager_initFromCameraParameters | /**
* Reads, one time, values from the camera that are needed by the app.
*/
void initFromCameraParameters(OpenCamera camera) {
Camera.Parameters parameters = camera.getCamera().getParameters();
WindowManager manager = (WindowManager) context.getSystemService(Context.WINDOW_SERVICE);
Display display = manager.getDefaultDisplay();
int displayRotation = display.getRotation();
int cwRotationFromNaturalToDisplay;
switch (displayRotation) {
case Surface.ROTATION_0:
cwRotationFromNaturalToDisplay = 0;
break;
case Surface.ROTATION_90:
cwRotationFromNaturalToDisplay = 90;
break;
case Surface.ROTATION_180:
cwRotationFromNaturalToDisplay = 180;
break;
case Surface.ROTATION_270:
cwRotationFromNaturalToDisplay = 270;
break;
default:
// Have seen this return incorrect values like -90
if (displayRotation % 90 == 0) {
cwRotationFromNaturalToDisplay = (360 + displayRotation) % 360;
} else {
throw new IllegalArgumentException("Bad rotation: " + displayRotation);
}
}
Log.i(TAG, "Display at: " + cwRotationFromNaturalToDisplay);
int cwRotationFromNaturalToCamera = camera.getOrientation();
Log.i(TAG, "Camera at: " + cwRotationFromNaturalToCamera);
// Still not 100% sure about this. But acts like we need to flip this:
if (camera.getFacing() == CameraFacing.FRONT) {
cwRotationFromNaturalToCamera = (360 - cwRotationFromNaturalToCamera) % 360;
Log.i(TAG, "Front camera overriden to: " + cwRotationFromNaturalToCamera);
}
cwRotationFromDisplayToCamera =
(360 + cwRotationFromNaturalToCamera - cwRotationFromNaturalToDisplay) % 360;
Log.i(TAG, "Final display orientation: " + cwRotationFromDisplayToCamera);
if (camera.getFacing() == CameraFacing.FRONT) {
Log.i(TAG, "Compensating rotation for front camera");
cwNeededRotation = (360 - cwRotationFromDisplayToCamera) % 360;
} else {
cwNeededRotation = cwRotationFromDisplayToCamera;
}
Log.i(TAG, "Clockwise rotation from display to camera: " + cwNeededRotation);
Point theScreenResolution = new Point();
display.getSize(theScreenResolution);
screenResolution = theScreenResolution;
Log.i(TAG, "Screen resolution in current orientation: " + screenResolution);
cameraResolution = CameraConfigurationUtils.findBestPreviewSizeValue(parameters, screenResolution);
Log.i(TAG, "Camera resolution: " + cameraResolution);
bestPreviewSize = CameraConfigurationUtils.findBestPreviewSizeValue(parameters, screenResolution);
Log.i(TAG, "Best available preview size: " + bestPreviewSize);
boolean isScreenPortrait = screenResolution.x < screenResolution.y;
boolean isPreviewSizePortrait = bestPreviewSize.x < bestPreviewSize.y;
if (isScreenPortrait == isPreviewSizePortrait) {
previewSizeOnScreen = bestPreviewSize;
} else {
previewSizeOnScreen = new Point(bestPreviewSize.y, bestPreviewSize.x);
}
Log.i(TAG, "Preview size on screen: " + previewSizeOnScreen);
} | 3.68 |
framework_AbstractComponent_getActionManager | /**
* Gets the {@link ActionManager} used to manage the
* {@link ShortcutListener}s added to this component.
*
* @return the ActionManager in use
*/
protected ActionManager getActionManager() {
if (actionManager == null) {
actionManager = new ConnectorActionManager(this);
setActionManagerViewer();
}
return actionManager;
} | 3.68 |
framework_VTabsheet_clearPaintables | /**
* {@inheritDoc}
*
* @deprecated This method is not called by the framework code anymore.
*/
@Deprecated
@Override
protected void clearPaintables() {
int i = tb.getTabCount();
while (i > 0) {
tb.removeTab(--i);
}
tabPanel.clear();
} | 3.68 |
morf_ColumnTypeBean_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return String.format("ColumnType-%s-%d-%d-%s", type, width, scale, nullable);
} | 3.68 |
hudi_FileIOUtils_closeQuietly | /**
* Closes {@code Closeable} quietly.
*
* @param closeable {@code Closeable} to close
*/
public static void closeQuietly(Closeable closeable) {
if (closeable == null) {
return;
}
try {
closeable.close();
} catch (IOException e) {
LOG.warn("IOException during close", e);
}
} | 3.68 |
hadoop_CommitContext_getSinglePendingFileSerializer | /**
* Get a serializer for .pending files.
* @return a serializer.
*/
public JsonSerialization<SinglePendingCommit> getSinglePendingFileSerializer() {
return singleCommitSerializer.getForCurrentThread();
} | 3.68 |
pulsar_BrokerService_checkUnAckMessageDispatching | /**
* Adds given dispatcher's unackMessage count to broker-unack message count and if it reaches to the
* {@link #maxUnackedMessages} then it blocks all the dispatchers which has unack-messages higher than
* {@link #maxUnackedMsgsPerDispatcher}. It unblocks all dispatchers once broker-unack message counts decreased to
* ({@link #maxUnackedMessages}/2)
*
*/
public void checkUnAckMessageDispatching() {
// don't block dispatchers if maxUnackedMessages = 0
if (maxUnackedMessages <= 0) {
return;
}
long unAckedMessages = totalUnackedMessages.sum();
if (unAckedMessages >= maxUnackedMessages && blockedDispatcherOnHighUnackedMsgs.compareAndSet(false, true)) {
// block dispatcher with higher unack-msg when it reaches broker-unack msg limit
log.info("Starting blocking dispatchers with unacked msgs {} due to reached max broker limit {}",
maxUnackedMessages, maxUnackedMsgsPerDispatcher);
executor().execute(() -> blockDispatchersWithLargeUnAckMessages());
} else if (blockedDispatcherOnHighUnackedMsgs.get() && unAckedMessages < maxUnackedMessages / 2) {
// unblock broker-dispatching if received enough acked messages back
if (blockedDispatcherOnHighUnackedMsgs.compareAndSet(true, false)) {
unblockDispatchersOnUnAckMessages(blockedDispatchers.values());
}
}
} | 3.68 |
hadoop_BoundedResourcePool_toString | // For debugging purposes.
@Override
public synchronized String toString() {
return String.format(
"size = %d, #created = %d, #in-queue = %d, #available = %d",
size, numCreated(), items.size(), numAvailable());
} | 3.68 |
framework_Table_setSortContainerPropertyId | /**
* Internal method to set currently sorted column property id. With doSort
* flag actual sorting may be bypassed.
*
* @param propertyId
* @param doSort
*/
private void setSortContainerPropertyId(Object propertyId, boolean doSort) {
if ((sortContainerPropertyId != null
&& !sortContainerPropertyId.equals(propertyId))
|| (sortContainerPropertyId == null && propertyId != null)) {
sortContainerPropertyId = propertyId;
if (doSort) {
sort();
// Assures the visual refresh. This should not be necessary as
// sort() calls refreshRowCache
refreshRenderedCells();
}
}
} | 3.68 |
hbase_CellFlatMap_subMap | // ---------------- Sub-Maps ----------------
@Override
public NavigableMap<Cell, Cell> subMap(Cell fromKey, boolean fromInclusive, Cell toKey,
boolean toInclusive) {
final int lessCellIndex = getValidIndex(fromKey, fromInclusive, true);
final int greaterCellIndex = getValidIndex(toKey, toInclusive, false);
if (descending) {
return createSubCellFlatMap(greaterCellIndex, lessCellIndex, descending);
} else {
return createSubCellFlatMap(lessCellIndex, greaterCellIndex, descending);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithLessThanWhereClauses | /**
* Tests a select with a simple where clause.
*/
@Test
public void testSelectWithLessThanWhereClauses() {
SelectStatement stmt = new SelectStatement()
.from(new TableReference(TEST_TABLE))
.where(lessThan(new FieldReference(INT_FIELD), 20090101));
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (intField < 20090101)";
assertEquals("Select with less than where clause", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_RestAPIVersion_getLatestVersion | /**
* Accept versions and one of them as a comparator, and get the latest one.
*
* @return latest version that implement RestAPIVersion interface>
*/
static <E extends RestAPIVersion<E>> E getLatestVersion(Collection<E> versions) {
return Collections.max(versions);
} | 3.68 |
hbase_RegionCoprocessorHost_prePut | /**
* Supports Coprocessor 'bypass'.
* @param put The Put object
* @param edit The WALEdit object.
* @return true if default processing should be bypassed
* @exception IOException Exception
*/
public boolean prePut(final Put put, final WALEdit edit) throws IOException {
if (coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;
return execOperation(new RegionObserverOperationWithoutResult(bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.prePut(this, put, edit);
}
});
} | 3.68 |
dubbo_MetricsEventBus_post | /**
* Full lifecycle post, success and failure conditions can be customized
*
* @param event event to post.
* @param targetSupplier original processing result supplier
* @param trFunction Custom event success criteria, judged according to the returned boolean type
* @param <T> Biz result type
* @return Biz result
*/
public static <T> T post(MetricsEvent event, Supplier<T> targetSupplier, Function<T, Boolean> trFunction) {
T result;
tryInvoke(() -> before(event));
if (trFunction == null) {
try {
result = targetSupplier.get();
} catch (Throwable e) {
tryInvoke(() -> error(event));
throw e;
}
tryInvoke(() -> after(event, result));
} else {
// Custom failure status
result = targetSupplier.get();
if (trFunction.apply(result)) {
tryInvoke(() -> after(event, result));
} else {
tryInvoke(() -> error(event));
}
}
return result;
} | 3.68 |
querydsl_BeanMap_containsKey | /**
* Returns true if the bean defines a property with the given name.
* <p>
* The given name must be a {@code String}; if not, this method
* returns false. This method will also return false if the bean
* does not define a property with that name.
* <p>
* Write-only properties will not be matched as the test operates against
* property read methods.
*
* @param name the name of the property to check
* @return false if the given name is null or is not a {@code String};
* false if the bean does not define a property with that name; or
* true if the bean does define a property with that name
*/
public boolean containsKey(String name) {
Method method = getReadMethod(name);
return method != null;
} | 3.68 |
hudi_HoodieRowCreateHandle_createMarkerFile | /**
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath Partition path
*/
private static void createMarkerFile(String partitionPath,
String dataFileName,
String instantTime,
HoodieTable<?, ?, ?, ?> table,
HoodieWriteConfig writeConfig) {
WriteMarkersFactory.get(writeConfig.getMarkersType(), table, instantTime)
.create(partitionPath, dataFileName, IOType.CREATE);
} | 3.68 |
framework_Range_startsAfter | /**
* Checks whether this range starts after the end of another range.
*
* @param other
* the other range to compare against
* @return <code>true</code> if this range starts after the
* <code>other</code>
*/
public boolean startsAfter(final Range other) {
return getStart() >= other.getEnd();
} | 3.68 |
hadoop_OBSCommonUtils_objectRepresentsDirectory | /**
* Predicate: does the object represent a directory?.
*
* @param name object name
* @param size object size
* @return true if it meets the criteria for being an object
*/
public static boolean objectRepresentsDirectory(final String name,
final long size) {
return !name.isEmpty() && name.charAt(name.length() - 1) == '/'
&& size == 0L;
} | 3.68 |
hbase_Procedure_setLastUpdate | /**
* Called on store load to initialize the Procedure internals after the creation/deserialization.
*/
protected void setLastUpdate(long lastUpdate) {
this.lastUpdate = lastUpdate;
} | 3.68 |
hbase_HBaseCommonTestingUtility_randomFreePort | /**
* Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
* called from single-threaded test setup code/
*/
public int randomFreePort() {
int port = 0;
do {
port = randomPort();
if (takenRandomPorts.contains(port)) {
port = 0;
continue;
}
takenRandomPorts.add(port);
if (!portChecker.available(port)) {
port = 0;
}
} while (port == 0);
return port;
} | 3.68 |
framework_AbstractComponent_hasEqualHeight | /**
* Test if the given component has equal height with this instance
*
* @param component
* the component for the height comparison
* @return true if the heights are equal
*/
private boolean hasEqualHeight(Component component) {
return getHeight() == component.getHeight()
&& getHeightUnits().equals(component.getHeightUnits());
} | 3.68 |
hbase_DirectMemoryUtils_getNettyDirectMemoryUsage | /** Returns the current amount of direct memory used by netty module. */
public static long getNettyDirectMemoryUsage() {
ByteBufAllocatorMetric metric =
((ByteBufAllocatorMetricProvider) PooledByteBufAllocator.DEFAULT).metric();
return metric.usedDirectMemory();
} | 3.68 |
hadoop_OBSDataBlocks_getOutstandingBufferCount | /**
* Get count of outstanding buffers.
*
* @return the current buffer count
*/
public int getOutstandingBufferCount() {
return BUFFERS_OUTSTANDING.get();
} | 3.68 |
hmily_MetricsReporter_recordTime | /**
* Record time by duration.
*
* @param name name
* @param duration duration
*/
public static void recordTime(final String name, final long duration) {
recordTime(name, null, duration);
} | 3.68 |
flink_JobVertex_setResources | /**
* Sets the minimum and preferred resources for the task.
*
* @param minResources The minimum resource for the task.
* @param preferredResources The preferred resource for the task.
*/
public void setResources(ResourceSpec minResources, ResourceSpec preferredResources) {
this.minResources = checkNotNull(minResources);
this.preferredResources = checkNotNull(preferredResources);
} | 3.68 |
hudi_HoodieLazyInsertIterable_getTransformer | /**
* Transformer function to help transform a HoodieRecord. This transformer is used by BufferedIterator to offload some
* expensive operations of transformation to the reader thread.
*/
public <T> Function<HoodieRecord<T>, HoodieInsertValueGenResult<HoodieRecord>> getTransformer(Schema schema,
HoodieWriteConfig writeConfig) {
return getTransformerInternal(schema, writeConfig);
} | 3.68 |
framework_Form_getLayout | /**
* Gets the layout of the form.
*
* <p>
* By default form uses <code>OrderedLayout</code> with <code>form</code>
* -style.
* </p>
*
* @return the Layout of the form.
*/
public Layout getLayout() {
return (Layout) getState(false).layout;
} | 3.68 |
hbase_ConnectionCache_getTable | /**
* Caller closes the table afterwards.
*/
public Table getTable(String tableName) throws IOException {
ConnectionInfo connInfo = getCurrentConnection();
return connInfo.connection.getTable(TableName.valueOf(tableName));
} | 3.68 |
framework_AbstractComponentConnector_unregisterTouchHandlers | /**
* The new default behavior is for long taps to fire a contextclick event if
* there's a contextclick listener attached to the component.
*
* If you do not want this in your component, override this with a blank
* method to get rid of said behavior.
*
* @since 7.6
*/
protected void unregisterTouchHandlers() {
if (touchStartHandler != null) {
touchStartHandler.removeHandler();
touchStartHandler = null;
}
if (touchMoveHandler != null) {
touchMoveHandler.removeHandler();
touchMoveHandler = null;
}
if (touchEndHandler != null) {
touchEndHandler.removeHandler();
touchEndHandler = null;
}
} | 3.68 |
flink_SingleOutputStreamOperator_returns | /**
* Adds a type information hint about the return type of this operator. This method can be used
* in cases where Flink cannot determine automatically what the produced type of a function is.
* That can be the case if the function uses generic type variables in the return type that
* cannot be inferred from the input type.
*
* <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are
* preferable.
*
* @param typeInfo type information as a return type hint
* @return This operator with a given return type hint.
*/
public SingleOutputStreamOperator<T> returns(TypeInformation<T> typeInfo) {
requireNonNull(typeInfo, "TypeInformation must not be null");
transformation.setOutputType(typeInfo);
return this;
} | 3.68 |
flink_MessageParameter_getValueAsString | /**
* Returns the resolved value of this parameter as a string, or {@code null} if it isn't
* resolved yet.
*
* @return resolved value, or null if it wasn't resolved yet
*/
final String getValueAsString() {
return value == null ? null : convertToString(value);
} | 3.68 |
flink_UpdatingTopCityExample_createTemporaryDirectory | /** Creates an empty temporary directory for CSV files and returns the absolute path. */
private static String createTemporaryDirectory() throws IOException {
final Path tempDirectory = Files.createTempDirectory("population");
return tempDirectory.toString();
} | 3.68 |
hadoop_BaseRecord_validate | /**
* Validates the record. Called when the record is created, populated from the
* state store, and before committing to the state store. If validate failed,
* there throws an exception.
*/
public void validate() {
if (getDateCreated() <= 0) {
throw new IllegalArgumentException(ERROR_MSG_CREATION_TIME_NEGATIVE);
} else if (getDateModified() <= 0) {
throw new IllegalArgumentException(ERROR_MSG_MODIFICATION_TIME_NEGATIVE);
}
} | 3.68 |
flink_TwoInputUdfOperator_withForwardedFieldsSecond | /**
* Adds semantic information about forwarded fields of the second input of the user-defined
* function. The forwarded fields information declares fields which are never modified by the
* function and which are forwarded at the same position to the output or unchanged copied to
* another position in the output.
*
* <p>Fields that are forwarded at the same position are specified by their position. The
* specified position must be valid for the input and output data type and have the same type.
* For example <code>withForwardedFieldsSecond("f2")</code> declares that the third field of a
* Java input tuple from the second input is copied to the third field of an output tuple.
*
* <p>Fields which are unchanged copied from the second input to another position in the output
* are declared by specifying the source field reference in the second input and the target
* field reference in the output. {@code withForwardedFieldsSecond("f0->f2")} denotes that the
* first field of the second input Java tuple is unchanged copied to the third field of the Java
* output tuple. When using a wildcard ("*") ensure that the number of declared fields and their
* types in second input and output type match.
*
* <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFieldsSecond("f2;
* f3->f0; f4")}) or separate Strings ({@code withForwardedFieldsSecond("f2", "f3->f0", "f4")}).
* Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or
* Flink's documentation for details on field references such as nested fields and wildcard.
*
* <p>It is not possible to override existing semantic information about forwarded fields of the
* second input which was for example added by a {@link
* org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsSecond} class
* annotation.
*
* <p><b>NOTE: Adding semantic information for functions is optional! If used correctly,
* semantic information can help the Flink optimizer to generate more efficient execution plans.
* However, incorrect semantic information can cause the optimizer to generate incorrect
* execution plans which compute wrong results! So be careful when adding semantic information.
* </b>
*
* @param forwardedFieldsSecond A list of forwarded field expressions for the second input of
* the function.
* @return This operator with annotated forwarded field information.
* @see org.apache.flink.api.java.functions.FunctionAnnotation
* @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsSecond
*/
@SuppressWarnings("unchecked")
public O withForwardedFieldsSecond(String... forwardedFieldsSecond) {
if (this.udfSemantics == null || this.analyzedUdfSemantics) {
// extract semantic properties from function annotations
setSemanticProperties(extractSemanticAnnotationsFromUdf(getFunction().getClass()));
}
if (this.udfSemantics == null || this.analyzedUdfSemantics) {
setSemanticProperties(new DualInputSemanticProperties());
SemanticPropUtil.getSemanticPropsDualFromString(
this.udfSemantics,
null,
forwardedFieldsSecond,
null,
null,
null,
null,
getInput1Type(),
getInput2Type(),
getResultType());
} else {
if (udfWithForwardedFieldsSecondAnnotation(getFunction().getClass())) {
// refuse semantic information as it would override the function annotation
throw new SemanticProperties.InvalidSemanticAnnotationException(
"Forwarded field information "
+ "has already been added by a function annotation for the second input of this operator. "
+ "Cannot overwrite function annotations.");
} else {
SemanticPropUtil.getSemanticPropsDualFromString(
this.udfSemantics,
null,
forwardedFieldsSecond,
null,
null,
null,
null,
getInput1Type(),
getInput2Type(),
getResultType());
}
}
O returnType = (O) this;
return returnType;
} | 3.68 |
hadoop_WorkloadMapper_configureJob | /**
* Setup input and output formats and optional reducer.
*/
public void configureJob(Job job) {
job.setInputFormatClass(VirtualInputFormat.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
job.setOutputFormatClass(NullOutputFormat.class);
} | 3.68 |
hadoop_SequenceFileAsTextRecordReader_nextKeyValue | /** Read key/value pair in a line. */
public synchronized boolean nextKeyValue()
throws IOException, InterruptedException {
if (!sequenceFileRecordReader.nextKeyValue()) {
return false;
}
if (key == null) {
key = new Text();
}
if (value == null) {
value = new Text();
}
key.set(sequenceFileRecordReader.getCurrentKey().toString());
value.set(sequenceFileRecordReader.getCurrentValue().toString());
return true;
} | 3.68 |
hadoop_EntryStatus_toEntryStatus | /**
* Go from the result of a getFileStatus call or
* listing entry to a status.
* A null argument is mapped to {@link #not_found}
* @param st file status
* @return the status enum.
*/
public static EntryStatus toEntryStatus(@Nullable FileStatus st) {
if (st == null) {
return not_found;
}
if (st.isDirectory()) {
return dir;
}
if (st.isFile()) {
return file;
}
return unknown;
} | 3.68 |
pulsar_DataBlockHeaderImpl_fromStream | // Construct DataBlockHeader from InputStream, which contains `HEADER_MAX_SIZE` bytes readable.
public static DataBlockHeader fromStream(InputStream stream) throws IOException {
CountingInputStream countingStream = new CountingInputStream(stream);
DataInputStream dis = new DataInputStream(countingStream);
int magic = dis.readInt();
if (magic != MAGIC_WORD) {
throw new IOException("Data block header magic word not match. read: " + magic
+ " expected: " + MAGIC_WORD);
}
long headerLen = dis.readLong();
long blockLen = dis.readLong();
long firstEntryId = dis.readLong();
long toSkip = headerLen - countingStream.getCount();
if (dis.skip(toSkip) != toSkip) {
throw new EOFException("Header was too small");
}
return new DataBlockHeaderImpl(headerLen, blockLen, firstEntryId);
} | 3.68 |
hbase_ProcedureExecutor_execProcedure | /**
* Executes <code>procedure</code>
* <ul>
* <li>Calls the doExecute() of the procedure
* <li>If the procedure execution didn't fail (i.e. valid user input)
* <ul>
* <li>...and returned subprocedures
* <ul>
* <li>The subprocedures are initialized.
* <li>The subprocedures are added to the store
* <li>The subprocedures are added to the runnable queue
* <li>The procedure is now in a WAITING state, waiting for the subprocedures to complete
* </ul>
* </li>
* <li>...if there are no subprocedure
* <ul>
* <li>the procedure completed successfully
* <li>if there is a parent (WAITING)
* <li>the parent state will be set to RUNNABLE
* </ul>
* </li>
* </ul>
* </li>
* <li>In case of failure
* <ul>
* <li>The store is updated with the new state</li>
* <li>The executor (caller of this method) will start the rollback of the procedure</li>
* </ul>
* </li>
* </ul>
*/
private void execProcedure(RootProcedureState<TEnvironment> procStack,
Procedure<TEnvironment> procedure) {
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE,
"NOT RUNNABLE! " + procedure.toString());
// Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException.
// The exception is caught below and then we hurry to the exit without disturbing state. The
// idea is that the processing of this procedure will be unsuspended later by an external event
// such the report of a region open.
boolean suspended = false;
// Whether to 're-' -execute; run through the loop again.
boolean reExecute = false;
Procedure<TEnvironment>[] subprocs = null;
do {
reExecute = false;
procedure.resetPersistence();
try {
subprocs = procedure.doExecute(getEnvironment());
if (subprocs != null && subprocs.length == 0) {
subprocs = null;
}
} catch (ProcedureSuspendedException e) {
LOG.trace("Suspend {}", procedure);
suspended = true;
} catch (ProcedureYieldException e) {
LOG.trace("Yield {}", procedure, e);
yieldProcedure(procedure);
return;
} catch (InterruptedException e) {
LOG.trace("Yield interrupt {}", procedure, e);
handleInterruptedException(procedure, e);
yieldProcedure(procedure);
return;
} catch (Throwable e) {
// Catch NullPointerExceptions or similar errors...
String msg = "CODE-BUG: Uncaught runtime exception: " + procedure;
LOG.error(msg, e);
procedure.setFailure(new RemoteProcedureException(msg, e));
}
if (!procedure.isFailed()) {
if (subprocs != null) {
if (subprocs.length == 1 && subprocs[0] == procedure) {
// Procedure returned itself. Quick-shortcut for a state machine-like procedure;
// i.e. we go around this loop again rather than go back out on the scheduler queue.
subprocs = null;
reExecute = true;
LOG.trace("Short-circuit to next step on pid={}", procedure.getProcId());
} else {
// Yield the current procedure, and make the subprocedure runnable
// subprocs may come back 'null'.
subprocs = initializeChildren(procStack, procedure, subprocs);
LOG.info("Initialized subprocedures=" + (subprocs == null
? null
: Stream.of(subprocs).map(e -> "{" + e.toString() + "}").collect(Collectors.toList())
.toString()));
}
} else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
LOG.trace("Added to timeoutExecutor {}", procedure);
timeoutExecutor.add(procedure);
} else if (!suspended) {
// No subtask, so we are done
procedure.setState(ProcedureState.SUCCESS);
}
}
// allows to kill the executor before something is stored to the wal.
// useful to test the procedure recovery.
if (
testing != null && testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())
) {
kill("TESTING: Kill BEFORE store update: " + procedure);
}
// TODO: The code here doesn't check if store is running before persisting to the store as
// it relies on the method call below to throw RuntimeException to wind up the stack and
// executor thread to stop. The statement following the method call below seems to check if
// store is not running, to prevent scheduling children procedures, re-execution or yield
// of this procedure. This may need more scrutiny and subsequent cleanup in future
//
// Commit the transaction even if a suspend (state may have changed). Note this append
// can take a bunch of time to complete.
if (procedure.needPersistence()) {
// Add the procedure to the stack
// See HBASE-28210 on why we need synchronized here
synchronized (procStack) {
procStack.addRollbackStep(procedure);
updateStoreOnExec(procStack, procedure, subprocs);
}
}
// if the store is not running we are aborting
if (!store.isRunning()) {
return;
}
// if the procedure is kind enough to pass the slot to someone else, yield
if (
procedure.isRunnable() && !suspended
&& procedure.isYieldAfterExecutionStep(getEnvironment())
) {
yieldProcedure(procedure);
return;
}
assert (reExecute && subprocs == null) || !reExecute;
} while (reExecute);
// Allows to kill the executor after something is stored to the WAL but before the below
// state settings are done -- in particular the one on the end where we make parent
// RUNNABLE again when its children are done; see countDownChildren.
if (testing != null && testing.shouldKillAfterStoreUpdate(suspended)) {
kill("TESTING: Kill AFTER store update: " + procedure);
}
// Submit the new subprocedures
if (subprocs != null && !procedure.isFailed()) {
submitChildrenProcedures(subprocs);
}
// we need to log the release lock operation before waking up the parent procedure, as there
// could be race that the parent procedure may call updateStoreOnExec ahead of us and remove all
// the sub procedures from store and cause problems...
releaseLock(procedure, false);
// if the procedure is complete and has a parent, count down the children latch.
// If 'suspended', do nothing to change state -- let other threads handle unsuspend event.
if (!suspended && procedure.isFinished() && procedure.hasParent()) {
countDownChildren(procStack, procedure);
}
} | 3.68 |
flink_RocksDBStateDownloader_downloadDataForStateHandle | /** Copies the file from a single state handle to the given path. */
private void downloadDataForStateHandle(
Path restoreFilePath,
StreamStateHandle remoteFileHandle,
CloseableRegistry closeableRegistry)
throws IOException {
if (closeableRegistry.isClosed()) {
return;
}
try {
FSDataInputStream inputStream = remoteFileHandle.openInputStream();
closeableRegistry.registerCloseable(inputStream);
Files.createDirectories(restoreFilePath.getParent());
OutputStream outputStream = Files.newOutputStream(restoreFilePath);
closeableRegistry.registerCloseable(outputStream);
byte[] buffer = new byte[8 * 1024];
while (true) {
int numBytes = inputStream.read(buffer);
if (numBytes == -1) {
break;
}
outputStream.write(buffer, 0, numBytes);
}
closeableRegistry.unregisterAndCloseAll(outputStream, inputStream);
} catch (Exception ex) {
// Quickly close all open streams. This also stops all concurrent downloads because they
// are registered with the same registry.
IOUtils.closeQuietly(closeableRegistry);
throw new IOException(ex);
}
} | 3.68 |
flink_FactoryUtil_createDynamicTableSource | /**
* @deprecated Use {@link #createDynamicTableSource(DynamicTableSourceFactory, ObjectIdentifier,
* ResolvedCatalogTable, Map, ReadableConfig, ClassLoader, boolean)}
*/
@Deprecated
public static DynamicTableSource createDynamicTableSource(
@Nullable DynamicTableSourceFactory preferredFactory,
ObjectIdentifier objectIdentifier,
ResolvedCatalogTable catalogTable,
ReadableConfig configuration,
ClassLoader classLoader,
boolean isTemporary) {
return createDynamicTableSource(
preferredFactory,
objectIdentifier,
catalogTable,
Collections.emptyMap(),
configuration,
classLoader,
isTemporary);
} | 3.68 |
flink_FloatValue_setValue | /**
* Sets the value of the encapsulated primitive float.
*
* @param value the new value of the encapsulated primitive float.
*/
public void setValue(float value) {
this.value = value;
} | 3.68 |
morf_InsertStatement_isParameterisedInsert | /**
* Identifies whether this insert is a parameterised insert
* with no source table or select.
*
* @return true if this is a parameterised insert statement, false otherwise
*/
public boolean isParameterisedInsert() {
return fromTable == null && selectStatement == null && values.isEmpty();
} | 3.68 |
querydsl_AbstractSQLInsertClause_setBatchToBulk | /**
* Set whether batches should be optimized into a single bulk operation.
* Will revert to batches, if bulk is not supported
*/
public void setBatchToBulk(boolean b) {
this.batchToBulk = b && configuration.getTemplates().isBatchToBulkSupported();
} | 3.68 |
hbase_RpcServer_getServiceInterface | /**
* @param serviceName Some arbitrary string that represents a 'service'.
* @param services Available services and their service interfaces.
* @return Service interface class for <code>serviceName</code>
*/
protected static Class<?> getServiceInterface(final List<BlockingServiceAndInterface> services,
final String serviceName) {
BlockingServiceAndInterface bsasi = getServiceAndInterface(services, serviceName);
return bsasi == null ? null : bsasi.getServiceInterface();
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_toString | /**
* String operator describes all the current statistics.
* <b>Important: there are no guarantees as to the stability
* of this value.</b>
*
* @return the current values of the stream statistics.
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"StreamStatistics{");
sb.append(ioStatisticsStore.toString());
sb.append('}');
return sb.toString();
} | 3.68 |
hadoop_TFile_getKeyLength | /**
* Get the length of the key.
*
* @return the length of the key.
*/
public int getKeyLength() {
return klen;
} | 3.68 |
dubbo_ReflectUtils_resolveTypes | /**
* Resolve the types of the specified values
*
* @param values the values
* @return If can't be resolved, return {@link ReflectUtils#EMPTY_CLASS_ARRAY empty class array}
* @since 2.7.6
*/
public static Class[] resolveTypes(Object... values) {
if (isEmpty(values)) {
return EMPTY_CLASS_ARRAY;
}
int size = values.length;
Class[] types = new Class[size];
for (int i = 0; i < size; i++) {
Object value = values[i];
types[i] = value == null ? null : value.getClass();
}
return types;
} | 3.68 |
framework_LayoutDependencyTree_getScrollingBoundary | /**
* Returns the scrolling boundary for this component. If a cached value is
* available, the check isn't performed again. If no cached value exists,
* iterates through the component hierarchy until the closest parent that
* implements {@link MayScrollChildren} has been found.
*
* @param connector
* the connector to check, should not be {@code null}
* @return the closest scrolling parent or {@code null} if not found
*/
public ComponentConnector getScrollingBoundary(
ComponentConnector connector) {
LayoutDependency dependency = getDependency(connector.getConnectorId(),
HORIZONTAL);
if (!dependency.scrollingParentCached) {
ServerConnector parent = dependency.connector.getParent();
if (parent instanceof MayScrollChildren) {
dependency.scrollingBoundary = connector;
} else if (parent instanceof ComponentConnector) {
dependency.scrollingBoundary = getScrollingBoundary(
(ComponentConnector) parent);
} else {
// No scrolling parent
}
dependency.scrollingParentCached = true;
}
return dependency.scrollingBoundary;
} | 3.68 |
querydsl_AntMetaDataExporter_getCustomTypes | /**
* Gets a list of custom types
* @return a list of custom types
* @deprecated Use addCustomType instead
*/
public String[] getCustomTypes() {
String[] customTypes = new String[this.customTypes.size()];
for (int i = 0; i < this.customTypes.size(); i++) {
CustomType customType = this.customTypes.get(i);
customTypes[i] = customType.getClassName();
}
return customTypes;
} | 3.68 |
open-banking-gateway_BaseDatasafeDbStorageService_write | /**
* Open Datasafe object for writing.
* @param withCallback Absolute path of the object to write to, including callback hook. I.e. {@code db://storage/deadbeef}
* @return Stream to write data to.
*/
@Override
@SneakyThrows
@Transactional
public OutputStream write(WithCallback<AbsoluteLocation, ? extends ResourceWriteCallback> withCallback) {
return new SetAndSaveOnClose(
deduceId(withCallback.getWrapped()),
handlers.get(deduceTable(withCallback.getWrapped()))
);
} | 3.68 |
flink_ExceptionUtils_stripExecutionException | /**
* Unpacks an {@link ExecutionException} and returns its cause. Otherwise the given Throwable is
* returned.
*
* @param throwable to unpack if it is an ExecutionException
* @return Cause of ExecutionException or given Throwable
*/
public static Throwable stripExecutionException(Throwable throwable) {
return stripException(throwable, ExecutionException.class);
} | 3.68 |
querydsl_AbstractMySQLQuery_into | /**
* SELECT ... INTO var_list selects column values and stores them into variables.
*
* @param var variable name
* @return the current object
*/
public C into(String var) {
return addFlag(Position.END, "\ninto " + var);
} | 3.68 |
hadoop_RawErasureEncoder_allowChangeInputs | /**
* Allow change into input buffers or not while perform encoding/decoding.
* @return true if it's allowed to change inputs, false otherwise
*/
public boolean allowChangeInputs() {
return coderOptions.allowChangeInputs();
} | 3.68 |
flink_SortUtil_maxNormalizedKey | /** Max unsigned byte is -1. */
public static void maxNormalizedKey(MemorySegment target, int offset, int numBytes) {
// write max value.
for (int i = 0; i < numBytes; i++) {
target.put(offset + i, (byte) -1);
}
} | 3.68 |
hadoop_BlockBlobInputStream_skip | /**
* Skips over and discards n bytes of data from this input stream.
* @param n the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @throws IOException IO failure
* @throws IndexOutOfBoundsException if n is negative or if the sum of n
* and the current value of getPos() is greater than the length of the stream.
*/
@Override
public synchronized long skip(long n) throws IOException {
checkState();
if (blobInputStream != null) {
// blobInput stream is open; delegate the work to it
long skipped = blobInputStream.skip(n);
// update position to the actual skip value
streamPosition += skipped;
return skipped;
}
// no blob stream; implement the skip logic directly
if (n < 0 || n > streamLength - getPos()) {
throw new IndexOutOfBoundsException("skip range");
}
if (streamBuffer != null) {
// there's a buffer, so seek with it
if (n < streamBufferLength - streamBufferPosition) {
// new range is in the buffer, so just update the buffer position
// skip within the buffer.
streamBufferPosition += (int) n;
} else {
// skip is out of range, so move position to ne value and reset
// the buffer ready for the next read()
streamPosition = getPos() + n;
resetStreamBuffer();
}
} else {
// no stream buffer; increment the stream position ready for
// the next triggered connection & read
streamPosition += n;
}
return n;
} | 3.68 |
framework_VAbstractPopupCalendar_setFocus | /**
* Sets focus to Calendar panel.
*
* @param focus
* {@code true} for {@code focus}, {@code false} for {@code blur}
*/
public void setFocus(boolean focus) {
calendar.setFocus(focus);
} | 3.68 |
querydsl_ProjectableSQLQuery_unionAll | /**
* Creates an union expression for the given subqueries
*
* @param <RT>
* @param alias alias for union
* @param sq subqueries
* @return the current object
*/
@SuppressWarnings("unchecked")
public <RT> Q unionAll(Path<?> alias, SubQueryExpression<RT>... sq) {
return from((Expression) UnionUtils.union(Arrays.asList(sq), (Path) alias, true));
} | 3.68 |
graphhopper_PointList_copy | /**
* This method does a deep copy of this object for the specified range.
*
* @param from the copying of the old PointList starts at this index
* @param end the copying of the old PointList ends at the index before (i.e. end is exclusive)
*/
public PointList copy(int from, int end) {
if (from > end)
throw new IllegalArgumentException("from must be smaller or equal to end");
if (from < 0 || end > size())
throw new IllegalArgumentException("Illegal interval: " + from + ", " + end + ", size:" + size());
PointList thisPL = this;
if (this instanceof ShallowImmutablePointList) {
ShallowImmutablePointList spl = (ShallowImmutablePointList) this;
thisPL = spl.wrappedPointList;
from = spl.fromOffset + from;
end = spl.fromOffset + end;
}
int len = end - from;
PointList copyPL = new PointList(len, is3D());
copyPL.size = len;
copyPL.isImmutable = isImmutable();
System.arraycopy(thisPL.latitudes, from, copyPL.latitudes, 0, len);
System.arraycopy(thisPL.longitudes, from, copyPL.longitudes, 0, len);
if (is3D())
System.arraycopy(thisPL.elevations, from, copyPL.elevations, 0, len);
return copyPL;
} | 3.68 |
hadoop_PowerShellFencer_buildPSScript | /**
* Build a PowerShell script to kill a java.exe process in a remote machine.
*
* @param processName Name of the process to kill. This is an attribute in
* CommandLine.
* @param host Host where the process is.
* @return Path of the PowerShell script.
*/
private String buildPSScript(final String processName, final String host) {
LOG.info(
"Building PowerShell script to kill " + processName + " at " + host);
String ps1script = null;
BufferedWriter writer = null;
try {
File file = File.createTempFile("temp-fence-command", ".ps1");
file.deleteOnExit();
FileOutputStream fos = new FileOutputStream(file, false);
OutputStreamWriter osw =
new OutputStreamWriter(fos, StandardCharsets.UTF_8);
writer = new BufferedWriter(osw);
// Filter to identify the Namenode process
String filter = StringUtils.join(" and ", new String[] {
"Name LIKE '%java.exe%'",
"CommandLine LIKE '%" + processName+ "%'"});
// Identify the process
String cmd = "Get-WmiObject Win32_Process";
cmd += " -Filter \"" + filter + "\"";
// Remote location
cmd += " -Computer " + host;
// Kill it
cmd += " |% { $_.Terminate() }";
LOG.info("PowerShell command: " + cmd);
writer.write(cmd);
writer.flush();
ps1script = file.getAbsolutePath();
} catch (IOException ioe) {
LOG.error("Cannot create PowerShell script", ioe);
} finally {
if (writer != null) {
try {
writer.close();
} catch (IOException ioe) {
LOG.error("Cannot close PowerShell script", ioe);
}
}
}
return ps1script;
} | 3.68 |
flink_AndCondition_getRight | /** @return One of the {@link IterativeCondition conditions} combined in this condition. */
public IterativeCondition<T> getRight() {
return right;
} | 3.68 |
flink_TimestampData_isCompact | /**
* Returns whether the timestamp data is small enough to be stored in a long of milliseconds.
*/
public static boolean isCompact(int precision) {
return precision <= 3;
} | 3.68 |
framework_BeanValidator_getMessage | /**
* Returns the interpolated error message for the given constraint violation
* using the locale specified for this validator.
*
* @param violation
* the constraint violation
* @param locale
* the used locale
* @return the localized error message
*/
protected String getMessage(ConstraintViolation<?> violation,
Locale locale) {
return getJavaxBeanValidatorFactory().getMessageInterpolator()
.interpolate(violation.getMessageTemplate(),
createContext(violation), locale);
} | 3.68 |
pulsar_NonPersistentTopic_close | /**
* Close this topic - close all producers and subscriptions associated with this topic.
*
* @param disconnectClients disconnect clients
* @param closeWithoutWaitingClientDisconnect don't wait for client disconnect and forcefully close managed-ledger
* @return Completable future indicating completion of close operation
*/
@Override
public CompletableFuture<Void> close(
boolean disconnectClients, boolean closeWithoutWaitingClientDisconnect) {
CompletableFuture<Void> closeFuture = new CompletableFuture<>();
lock.writeLock().lock();
try {
if (!isFenced || closeWithoutWaitingClientDisconnect) {
isFenced = true;
} else {
log.warn("[{}] Topic is already being closed or deleted", topic);
closeFuture.completeExceptionally(new TopicFencedException("Topic is already fenced"));
return closeFuture;
}
} finally {
lock.writeLock().unlock();
}
List<CompletableFuture<Void>> futures = new ArrayList<>();
replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect()));
if (disconnectClients) {
futures.add(ExtensibleLoadManagerImpl.getAssignedBrokerLookupData(
brokerService.getPulsar(), topic).thenAccept(lookupData ->
producers.values().forEach(producer -> futures.add(producer.disconnect(lookupData)))
));
}
if (topicPublishRateLimiter != null) {
topicPublishRateLimiter.close();
}
subscriptions.forEach((s, sub) -> futures.add(sub.disconnect()));
if (this.resourceGroupPublishLimiter != null) {
this.resourceGroupPublishLimiter.unregisterRateLimitFunction(this.getName());
}
if (entryFilters != null) {
entryFilters.getRight().forEach(filter -> {
try {
filter.close();
} catch (Throwable e) {
log.warn("Error shutting down entry filter {}", filter, e);
}
});
}
CompletableFuture<Void> clientCloseFuture =
closeWithoutWaitingClientDisconnect ? CompletableFuture.completedFuture(null)
: FutureUtil.waitForAll(futures);
clientCloseFuture.thenRun(() -> {
log.info("[{}] Topic closed", topic);
// unload topic iterates over topics map and removing from the map with the same thread creates deadlock.
// so, execute it in different thread
brokerService.executor().execute(() -> {
if (disconnectClients) {
brokerService.removeTopicFromCache(NonPersistentTopic.this);
unregisterTopicPolicyListener();
}
closeFuture.complete(null);
});
}).exceptionally(exception -> {
log.error("[{}] Error closing topic", topic, exception);
isFenced = false;
closeFuture.completeExceptionally(exception);
return null;
});
return closeFuture;
} | 3.68 |
hbase_Get_hasFamilies | /**
* Method for checking if any families have been inserted into this Get
* @return true if familyMap is non empty false otherwise
*/
public boolean hasFamilies() {
return !this.familyMap.isEmpty();
} | 3.68 |
hbase_NettyServerCall_sendResponseIfReady | /**
* If we have a response, and delay is not set, then respond immediately. Otherwise, do not
* respond to client. This is called by the RPC code in the context of the Handler thread.
*/
@Override
public synchronized void sendResponseIfReady() throws IOException {
// set param null to reduce memory pressure
this.param = null;
connection.doRespond(this);
} | 3.68 |
hadoop_ReadBufferManager_testMimicFullUseAndAddFailedBuffer | /**
* Test method that can mimic no free buffers scenario and also add a ReadBuffer
* into completedReadList. This readBuffer will get picked up by TryEvict()
* next time a new queue request comes in.
* @param buf that needs to be added to completedReadlist
*/
@VisibleForTesting
void testMimicFullUseAndAddFailedBuffer(ReadBuffer buf) {
freeList.clear();
completedReadList.add(buf);
} | 3.68 |
flink_JoinOperator_projectTuple12 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>
ProjectJoin<I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>
projectTuple12() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType =
new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(
fTypes);
return new ProjectJoin<
I1, I2, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
hadoop_MountdBase_startTCPServer | /* Start TCP server */
private void startTCPServer() {
tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
rpcProgram, 1);
rpcProgram.startDaemons();
try {
tcpServer.run();
} catch (Throwable e) {
LOG.error("Failed to start the TCP server.", e);
if (tcpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
tcpServer.getBoundPort());
}
tcpServer.shutdown();
terminate(1, e);
}
tcpBoundPort = tcpServer.getBoundPort();
} | 3.68 |
hadoop_SaslOutputStream_close | /**
* Closes this output stream and releases any system resources associated with
* this stream.
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void close() throws IOException {
disposeSasl();
outStream.close();
} | 3.68 |
hudi_HiveSchemaUtil_createHiveArray | /**
* Create an Array Hive schema from equivalent parquet list type.
*/
private static String createHiveArray(Type elementType, String elementName, boolean supportTimestamp, boolean doFormat) {
StringBuilder array = new StringBuilder();
array.append(doFormat ? "ARRAY< " : "ARRAY<");
if (elementType.isPrimitive()) {
array.append(convertField(elementType, supportTimestamp, doFormat));
} else {
final GroupType groupType = elementType.asGroupType();
final List<Type> groupFields = groupType.getFields();
if (groupFields.size() > 1 || (groupFields.size() == 1
&& (elementType.getName().equals("array") || elementType.getName().equals(elementName + "_tuple")))) {
array.append(convertField(elementType, supportTimestamp, doFormat));
} else {
array.append(convertField(groupType.getFields().get(0), supportTimestamp, doFormat));
}
}
array.append(">");
return array.toString();
} | 3.68 |
zxing_BinaryBitmap_isRotateSupported | /**
* @return Whether this bitmap supports counter-clockwise rotation.
*/
public boolean isRotateSupported() {
return binarizer.getLuminanceSource().isRotateSupported();
} | 3.68 |
flink_TableResultImpl_resultKind | /**
* Specifies result kind of the execution result.
*
* @param resultKind a {@link ResultKind} for the execution result.
*/
public Builder resultKind(ResultKind resultKind) {
Preconditions.checkNotNull(resultKind, "resultKind should not be null");
this.resultKind = resultKind;
return this;
} | 3.68 |
morf_SqlDialect_getSqlForSubstring | /**
* Converts the substring function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForSubstring(Function function) {
return getSubstringFunctionName() + "("
+ getSqlFrom(function.getArguments().get(0)) + ", "
+ getSqlFrom(function.getArguments().get(1)) + ", "
+ getSqlFrom(function.getArguments().get(2)) + ")";
} | 3.68 |
hudi_HoodieTableMetadataUtil_convertFilesToFilesPartitionRecords | /**
* Convert rollback action metadata to files partition records.
*/
protected static List<HoodieRecord> convertFilesToFilesPartitionRecords(Map<String, List<String>> partitionToDeletedFiles,
Map<String, Map<String, Long>> partitionToAppendedFiles,
String instantTime, String operation) {
List<HoodieRecord> records = new ArrayList<>(partitionToDeletedFiles.size() + partitionToAppendedFiles.size());
int[] fileChangeCount = {0, 0}; // deletes, appends
partitionToDeletedFiles.forEach((partitionName, deletedFiles) -> {
fileChangeCount[0] += deletedFiles.size();
final String partition = getPartitionIdentifier(partitionName);
Map<String, Long> filesAdded = Collections.emptyMap();
if (partitionToAppendedFiles.containsKey(partitionName)) {
filesAdded = partitionToAppendedFiles.remove(partitionName);
}
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, filesAdded,
deletedFiles);
records.add(record);
});
partitionToAppendedFiles.forEach((partitionName, appendedFileMap) -> {
final String partition = getPartitionIdentifier(partitionName);
fileChangeCount[1] += appendedFileMap.size();
// Validate that no appended file has been deleted
checkState(
!appendedFileMap.keySet().removeAll(partitionToDeletedFiles.getOrDefault(partition, Collections.emptyList())),
"Rollback file cannot both be appended and deleted");
// New files added to a partition
HoodieRecord record = HoodieMetadataPayload.createPartitionFilesRecord(partition, appendedFileMap,
Collections.emptyList());
records.add(record);
});
LOG.info("Found at " + instantTime + " from " + operation + ". #partitions_updated=" + records.size()
+ ", #files_deleted=" + fileChangeCount[0] + ", #files_appended=" + fileChangeCount[1]);
return records;
} | 3.68 |
hadoop_HdfsDtFetcher_getServiceName | /**
* Returns the service name for HDFS, which is also a valid URL prefix.
*/
public Text getServiceName() {
return new Text(SERVICE_NAME);
} | 3.68 |
morf_UpgradeStatusTableServiceImpl_writeStatusFromStatus | /**
* @see org.alfasoftware.morf.upgrade.UpgradeStatusTableService#writeStatusFromStatus(org.alfasoftware.morf.upgrade.UpgradeStatus, org.alfasoftware.morf.upgrade.UpgradeStatus)
*/
@Override
public int writeStatusFromStatus(UpgradeStatus fromStatus, UpgradeStatus toStatus) {
List<String> script = updateTableScript(fromStatus, toStatus);
try {
return sqlScriptExecutorProvider.get().execute(script);
} catch (RuntimeSqlException e) {
UpgradeStatus currentStatus = getStatus(Optional.empty());
log.debug("Caught exception trying to move from [" + fromStatus + "] to [" + toStatus + "]; current status = [" + currentStatus + "]", e);
if (currentStatus.equals(toStatus)) {
return 0;
} else if (currentStatus.equals(fromStatus)) {
// This might throw an exception if it fails again
return sqlScriptExecutorProvider.get().execute(script);
} else {
// No point trying again, so throw the original exception
throw e;
}
}
} | 3.68 |
hbase_Procedure_setStackIndexes | /**
* Called on store load to initialize the Procedure internals after the creation/deserialization.
*/
protected synchronized void setStackIndexes(final List<Integer> stackIndexes) {
this.stackIndexes = new int[stackIndexes.size()];
for (int i = 0; i < this.stackIndexes.length; ++i) {
this.stackIndexes[i] = stackIndexes.get(i);
}
} | 3.68 |
morf_Function_averageDistinct | /**
* Helper method to create an instance of the "average(distinct)" SQL function.
*
* @param field the field to evaluate in the average function.
* @return an instance of a average function.
*/
public static Function averageDistinct(AliasedField field) {
return new Function(FunctionType.AVERAGE_DISTINCT, field);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.