name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_SignerManager_maybeRegisterSigner | /**
* Make sure the signer class is registered once with the AWS SDK.
* @param signerName signer name
* @param signerClassName classname
* @param conf source configuration
* @throws RuntimeException if the class is not found
*/
private static void maybeRegisterSigner(String signerName,
String signerClassName, Configuration conf) {
if (!SignerFactory.isSignerRegistered(signerName)) {
// Signer is not registered with the AWS SDK.
// Load the class and register the signer.
Class<? extends Signer> clazz;
try {
clazz = (Class<? extends Signer>) conf.getClassByName(signerClassName);
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException(String
.format("Signer class [%s] not found for signer [%s]",
signerClassName, signerName), cnfe);
}
LOG.debug("Registering Custom Signer - [{}->{}]", signerName,
clazz.getName());
synchronized (SignerManager.class) {
SignerFactory.registerSigner(signerName, clazz);
}
}
} | 3.68 |
shardingsphere-elasticjob_JobItemExecutorFactory_getExecutor | /**
* Get executor.
*
* @param elasticJobClass elastic job class
* @return job item executor
*/
@SuppressWarnings("unchecked")
public static JobItemExecutor getExecutor(final Class<? extends ElasticJob> elasticJobClass) {
for (ClassedJobItemExecutor each : ShardingSphereServiceLoader.getServiceInstances(ClassedJobItemExecutor.class)) {
if (each.getElasticJobClass().isAssignableFrom(elasticJobClass)) {
return each;
}
}
throw new JobConfigurationException("Can not find executor for elastic job class `%s`", elasticJobClass.getName());
} | 3.68 |
hadoop_TaskPool_revertWith | /**
* Task to revert with after another task failed.
* @param task task to execute
* @return the builder
*/
public Builder<I> revertWith(Task<I, ?> task) {
this.revertTask = task;
return this;
} | 3.68 |
graphhopper_OSMNodeData_addCoordinatesIfMapped | /**
* Stores the given coordinates for the given OSM node ID, but only if a non-empty node type was set for this
* OSM node ID previously.
*
* @return the node type this OSM node was associated with before this method was called
*/
public long addCoordinatesIfMapped(long osmNodeId, double lat, double lon, DoubleSupplier getEle) {
long nodeType = idsByOsmNodeIds.get(osmNodeId);
if (nodeType == EMPTY_NODE)
return nodeType;
else if (nodeType == JUNCTION_NODE || nodeType == CONNECTION_NODE)
addTowerNode(osmNodeId, lat, lon, getEle.getAsDouble());
else if (nodeType == INTERMEDIATE_NODE || nodeType == END_NODE)
addPillarNode(osmNodeId, lat, lon, getEle.getAsDouble());
else
throw new IllegalStateException("Unknown node type: " + nodeType + ", or coordinates already set. Possibly duplicate OSM node ID: " + osmNodeId);
return nodeType;
} | 3.68 |
hbase_BucketCache_cacheBlock | /**
* Cache the block with the specified name and buffer.
* @param cacheKey block's cache key
* @param cachedItem block buffer
* @param inMemory if block is in-memory
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
boolean waitWhenCache) {
cacheBlockWithWait(cacheKey, cachedItem, inMemory, waitWhenCache && queueAdditionWaitTime > 0);
} | 3.68 |
flink_ResettableExternalBuffer_close | /** Delete all files and release the memory. */
@Override
public void close() {
clearChannels();
inMemoryBuffer.close();
pool.close();
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations13 | /**
* @return expected SQL for math operation 13
*/
protected String expectedSqlForMathOperations13() {
return "a + b + c / 2";
} | 3.68 |
flink_CompositeTypeSerializerUtil_setNestedSerializersSnapshots | /**
* Overrides the existing nested serializer's snapshots with the provided {@code
* nestedSnapshots}.
*
* @param compositeSnapshot the composite snapshot to overwrite its nested serializers.
* @param nestedSnapshots the nested snapshots to overwrite with.
*/
public static void setNestedSerializersSnapshots(
CompositeTypeSerializerSnapshot<?, ?> compositeSnapshot,
TypeSerializerSnapshot<?>... nestedSnapshots) {
NestedSerializersSnapshotDelegate delegate =
new NestedSerializersSnapshotDelegate(nestedSnapshots);
compositeSnapshot.setNestedSerializersSnapshotDelegate(delegate);
} | 3.68 |
morf_ConnectionResourcesBean_setSchemaName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setSchemaName(java.lang.String)
*/
@Override
public void setSchemaName(String schemaName) {
this.schemaName = schemaName;
} | 3.68 |
framework_ColorUtil_getHSLPatternColor | /**
* Parses {@link Color} from matched HSL {@link Matcher}.
*
* @param matcher
* {@link Matcher} matching HSL pattern with named regex groups
* {@code hue}, {@code saturation}, and {@code light}
* @return {@link Color} parsed from {@link Matcher}
*/
public static Color getHSLPatternColor(Matcher matcher) {
int hue = Integer.parseInt(matcher.group("hue"));
int saturation = Integer.parseInt(matcher.group("saturation"));
int light = Integer.parseInt(matcher.group("light"));
int rgb = Color.HSLtoRGB(hue, saturation, light);
return new Color(rgb);
} | 3.68 |
framework_LocaleService_createLocaleData | /**
* Creates a LocaleData instance for transportation to the client.
*
* @since 7.1
* @param locale
* The locale for which to create a LocaleData object
* @return A LocaleData object with information about the given locale
*/
protected LocaleData createLocaleData(Locale locale) {
LocaleData localeData = new LocaleData();
localeData.name = locale.toString();
Calendar c = Calendar.getInstance(locale);
c.set(2015, 0, 1);
SimpleDateFormat shortMonthFormat = new SimpleDateFormat("MMM", locale);
SimpleDateFormat longMonthFormat = new SimpleDateFormat("MMMM", locale);
int monthsInYear = c.getMaximum(Calendar.MONTH) + 1;
localeData.shortMonthNames = new String[monthsInYear];
localeData.monthNames = new String[monthsInYear];
for (int month = 0; month < monthsInYear; month++) {
c.set(Calendar.MONTH, month);
String shortMonth = shortMonthFormat.format(c.getTime());
String longMonth = longMonthFormat.format(c.getTime());
localeData.shortMonthNames[month] = shortMonth;
localeData.monthNames[month] = longMonth;
}
final DateFormatSymbols dfs = new DateFormatSymbols(locale);
// Client expects 0 based indexing, DateFormatSymbols use 1 based
localeData.shortDayNames = new String[7];
localeData.dayNames = new String[7];
String[] sDayNames = dfs.getShortWeekdays();
String[] lDayNames = dfs.getWeekdays();
for (int i = 0; i < 7; i++) {
localeData.shortDayNames[i] = sDayNames[i + 1];
localeData.dayNames[i] = lDayNames[i + 1];
}
/*
* First day of week (0 = sunday, 1 = monday)
*/
final Calendar cal = new GregorianCalendar(locale);
localeData.firstDayOfWeek = cal.getFirstDayOfWeek() - 1;
/*
* Date formatting (MM/DD/YYYY etc.)
*/
DateFormat dateFormat = DateFormat.getDateInstance(DateFormat.SHORT,
locale);
DateFormat timeFormat = DateFormat.getTimeInstance(DateFormat.SHORT,
locale);
if (!(dateFormat instanceof SimpleDateFormat)) {
getLogger().warning("Unable to get default date pattern for locale "
+ locale.toString());
dateFormat = new SimpleDateFormat();
}
if (!(timeFormat instanceof SimpleDateFormat)) {
getLogger().warning("Unable to get default time pattern for locale "
+ locale.toString());
timeFormat = new SimpleDateFormat();
}
final String datePattern = ((SimpleDateFormat) dateFormat).toPattern();
final String timePattern = ((SimpleDateFormat) timeFormat).toPattern();
localeData.dateFormat = datePattern.trim();
final boolean twelveHourClock = timePattern.indexOf("a") > -1;
// TODO there are other possibilities as well, like 'h' in french
// (ignore them, too complicated)
final String hourMinDelimiter = timePattern.indexOf(".") > -1 ? "."
: ":";
localeData.twelveHourClock = twelveHourClock;
localeData.hourMinuteDelimiter = hourMinDelimiter;
if (twelveHourClock) {
final String[] ampm = dfs.getAmPmStrings();
localeData.am = ampm[0];
localeData.pm = ampm[1];
}
return localeData;
} | 3.68 |
querydsl_StringExpression_substring | /**
* Create a {@code this.substring(beginIndex, endIndex)} expression
*
* @param beginIndex inclusive start index
* @param endIndex exclusive end index
* @return this.substring(beginIndex, endIndex)
* @see java.lang.String#substring(int, int)
*/
public StringExpression substring(Expression<Integer> beginIndex, Expression<Integer> endIndex) {
return Expressions.stringOperation(Ops.SUBSTR_2ARGS, mixin, beginIndex, endIndex);
} | 3.68 |
hbase_HRegion_attachRegionReplicationInWALAppend | /**
* Attach {@link RegionReplicationSink#add} to the mvcc writeEntry for replicating to region
* replica.
*/
private void attachRegionReplicationInWALAppend(BatchOperation<?> batchOp,
MiniBatchOperationInProgress<Mutation> miniBatchOp, WALKeyImpl walKey, WALEdit walEdit,
WriteEntry writeEntry) {
if (!regionReplicationSink.isPresent()) {
return;
}
/**
* If {@link HRegion#regionReplicationSink} is present,only {@link MutationBatchOperation} is
* used and {@link NonceKey} is all the same for {@link Mutation}s in
* {@link MutationBatchOperation},so for HBASE-26993 case 1,if
* {@link MiniBatchOperationInProgress#getWalEditForReplicateSkipWAL} is not null and we could
* enter {@link HRegion#doWALAppend},that means partial {@link Mutation}s are
* {@link Durability#SKIP_WAL}, we use
* {@link MiniBatchOperationInProgress#getWalEditForReplicateSkipWAL} to replicate to region
* replica,but if {@link MiniBatchOperationInProgress#getWalEditForReplicateSkipWAL} is
* null,that means there is no {@link Mutation} is {@link Durability#SKIP_WAL},so we just use
* walEdit to replicate.
*/
assert batchOp instanceof MutationBatchOperation;
WALEdit walEditToUse = miniBatchOp.getWalEditForReplicateIfExistsSkipWAL();
if (walEditToUse == null) {
walEditToUse = walEdit;
}
doAttachReplicateRegionReplicaAction(walKey, walEditToUse, writeEntry);
} | 3.68 |
pulsar_ManagedLedgerImpl_getPreviousPosition | /**
* Get the entry position that come before the specified position in the message stream, using information from the
* ledger list and each ledger entries count.
*
* @param position
* the current position
* @return the previous position
*/
public PositionImpl getPreviousPosition(PositionImpl position) {
if (position.getEntryId() > 0) {
return PositionImpl.get(position.getLedgerId(), position.getEntryId() - 1);
}
// The previous position will be the last position of an earlier ledgers
NavigableMap<Long, LedgerInfo> headMap = ledgers.headMap(position.getLedgerId(), false);
final Map.Entry<Long, LedgerInfo> firstEntry = headMap.firstEntry();
if (firstEntry == null) {
// There is no previous ledger, return an invalid position in the current ledger
return PositionImpl.get(position.getLedgerId(), -1);
}
// We need to find the most recent non-empty ledger
for (long ledgerId : headMap.descendingKeySet()) {
LedgerInfo li = headMap.get(ledgerId);
if (li != null && li.getEntries() > 0) {
return PositionImpl.get(li.getLedgerId(), li.getEntries() - 1);
}
}
// in case there are only empty ledgers, we return a position in the first one
return PositionImpl.get(firstEntry.getKey(), -1);
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_mergeConnectConfig | /**
* Merge new received configs with the configs in memory.
*
* @param connectName
* @param schemaAndValue
* @return
*/
private boolean mergeConnectConfig(String connectName, SchemaAndValue schemaAndValue) {
Struct value = (Struct) schemaAndValue.value();
Object targetState = value.get(FIELD_STATE);
if (!(targetState instanceof String)) {
// target state
log.error("Invalid data for target state for connector '{}': 'state' field should be a String but is {}",
connectName, className(targetState));
return false;
}
Object epoch = value.get(FIELD_EPOCH);
if (!(epoch instanceof Long)) {
// epoch
log.error("Invalid data for epoch for connector '{}': 'state' field should be a long but is {}",
connectName, className(epoch));
return false;
}
Object props = value.get(FIELD_PROPS);
if (!(props instanceof Map)) {
// properties
log.error("Invalid data for properties for connector '{}': 'state' field should be a Map but is {}",
connectName, className(props));
return false;
}
// new configs
ConnectKeyValue newConfig = new ConnectKeyValue();
newConfig.setEpoch((Long) epoch);
newConfig.setTargetState(TargetState.valueOf((String) targetState));
newConfig.setProperties((Map<String, String>) props);
// not exist
if (!connectorKeyValueStore.containsKey(connectName)) {
connectorKeyValueStore.put(connectName, newConfig);
recomputeTaskConfigs(connectName, newConfig);
return true;
}
// exist and update config
ConnectKeyValue oldConfig = connectorKeyValueStore.get(connectName);
if (!newConfig.equals(oldConfig)) {
// compare and swap
if (newConfig.getEpoch() > oldConfig.getEpoch()) {
connectorKeyValueStore.put(connectName, newConfig);
recomputeTaskConfigs(connectName, newConfig);
}
return true;
}
return false;
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_setAccountName | /**
* Sets the account name to tag all the metrics with.
* @param accountName The account name.
*/
public void setAccountName(String accountName) {
registry.tag("accountName",
"Name of the Azure Storage account that these metrics are going against",
accountName);
} | 3.68 |
hbase_StoreFileInfo_isReference | /**
* @param name file name to check.
* @return True if the path has format of a HStoreFile reference.
*/
public static boolean isReference(final String name) {
Matcher m = REF_NAME_PATTERN.matcher(name);
return m.matches() && m.groupCount() > 1;
} | 3.68 |
framework_TooltipDelay_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13695;
} | 3.68 |
hbase_MultiByteBuff_getLong | /**
* Returns the long value at the current position. Also advances the position by the size of long
* @return the long value at the current position
*/
@Override
public long getLong() {
checkRefCount();
int remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_LONG) {
return this.curItem.getLong();
}
long l = 0;
for (int i = 0; i < Bytes.SIZEOF_LONG; i++) {
l <<= 8;
l ^= get() & 0xFF;
}
return l;
} | 3.68 |
flink_DataStream_transform | /**
* Method for passing user defined operators created by the given factory along with the type
* information that will transform the DataStream.
*
* <p>This method uses the rather new operator factories and should only be used when custom
* factories are needed.
*
* @param operatorName name of the operator, for logging purposes
* @param outTypeInfo the output type of the operator
* @param operatorFactory the factory for the operator.
* @param <R> type of the return stream
* @return the data stream constructed.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> transform(
String operatorName,
TypeInformation<R> outTypeInfo,
OneInputStreamOperatorFactory<T, R> operatorFactory) {
return doTransform(operatorName, outTypeInfo, operatorFactory);
} | 3.68 |
hudi_HoodieLogFormatWriter_getOutputStream | /**
* Lazily opens the output stream if needed for writing.
* @return OutputStream for writing to current log file.
* @throws IOException
*/
private FSDataOutputStream getOutputStream() throws IOException {
if (this.output == null) {
boolean created = false;
while (!created) {
try {
// Block size does not matter as we will always manually auto-flush
createNewFile();
LOG.info("Created a new log file: {}", logFile);
created = true;
} catch (FileAlreadyExistsException ignored) {
LOG.info("File {} already exists, rolling over", logFile.getPath());
rollOver();
} catch (RemoteException re) {
if (re.getClassName().contentEquals(AlreadyBeingCreatedException.class.getName())) {
LOG.warn("Another task executor writing to the same log file(" + logFile + ", rolling over");
// Rollover the current log file (since cannot get a stream handle) and create new one
rollOver();
} else {
throw re;
}
}
}
}
return output;
} | 3.68 |
hbase_CloneSnapshotProcedure_postCloneSnapshot | /**
* Action after cloning from snapshot.
* @param env MasterProcedureEnv
*/
private void postCloneSnapshot(final MasterProcedureEnv env)
throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
final RegionInfo[] regions =
(newRegions == null) ? null : newRegions.toArray(new RegionInfo[newRegions.size()]);
cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
}
} | 3.68 |
hbase_SpaceLimitSettings_validateProtoArguments | /**
* Validates that the provided protobuf SpaceQuota has the necessary information to construct a
* {@link SpaceLimitSettings}.
* @param proto The protobuf message to validate.
*/
static void validateProtoArguments(final QuotaProtos.SpaceQuota proto) {
if (!Objects.requireNonNull(proto).hasSoftLimit()) {
throw new IllegalArgumentException("Cannot handle SpaceQuota without a soft limit");
}
if (!proto.hasViolationPolicy()) {
throw new IllegalArgumentException("Cannot handle SpaceQuota without a violation policy");
}
} | 3.68 |
querydsl_JTSGeometryExpression_union | /**
* Returns a geometric object that represents the Point set
* union of this geometric object with anotherGeometry.
*
* @param geometry other geometry
* @return union of this and the other geometry
*/
public JTSGeometryExpression<Geometry> union(Expression<? extends Geometry> geometry) {
return JTSGeometryExpressions.geometryOperation(SpatialOps.UNION, mixin, geometry);
} | 3.68 |
framework_Dependency_getType | /**
* Gets the type of the dependency.
*
* @return the type of the dependency
*/
public Type getType() {
return type;
} | 3.68 |
hadoop_S3AReadOpContext_getReadInvoker | /**
* Get invoker to use for read operations.
* @return invoker to use for read codepaths
*/
public Invoker getReadInvoker() {
return invoker;
} | 3.68 |
hbase_ColumnSchemaModel___getInMemory | /** Returns true if the IN_MEMORY attribute is present and true */
public boolean __getInMemory() {
Object o = attrs.get(IN_MEMORY);
return o != null
? Boolean.parseBoolean(o.toString())
: ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY;
} | 3.68 |
hbase_OperationStatus_getOperationStatusCode | /**
* */
public OperationStatusCode getOperationStatusCode() {
return code;
} | 3.68 |
hadoop_DataNodeVolumeMetrics_getReadIoSampleCount | // Based on readIoRate
public long getReadIoSampleCount() {
return readIoRate.lastStat().numSamples();
} | 3.68 |
morf_UpgradeHelper_preSchemaUpgrade | /**
* preUpgrade - generates a collection of SQL statements to run before the upgrade.
* @param upgradeSchemas - Holds the source and target schema.
* @param viewChanges - Changes to be made to views.
* @param viewChangesDeploymentHelper - Deployment helper for the view changes.
* @return - Collection of SQL Statements.
*/
static Collection<String> preSchemaUpgrade(UpgradeSchemas upgradeSchemas,
ViewChanges viewChanges,
ViewChangesDeploymentHelper viewChangesDeploymentHelper) {
ImmutableList.Builder<String> statements = ImmutableList.builder();
final boolean deleteFromDeployedViews = upgradeSchemas.getSourceSchema().tableExists(DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME)
&& upgradeSchemas.getTargetSchema().tableExists(DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME);
for (View view : viewChanges.getViewsToDrop()) {
if (upgradeSchemas.getSourceSchema().viewExists(view.getName())) {
statements.addAll(viewChangesDeploymentHelper.dropViewIfExists(view, deleteFromDeployedViews, upgradeSchemas));
}
else {
statements.addAll(viewChangesDeploymentHelper.deregisterViewIfExists(view, deleteFromDeployedViews, upgradeSchemas));
}
}
return statements.build();
} | 3.68 |
flink_TransientBlobCache_getBlobExpiryTimes | /**
* Returns the blob expiry times - for testing purposes only!
*
* @return blob expiry times (internal state!)
*/
@VisibleForTesting
ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> getBlobExpiryTimes() {
return blobExpiryTimes;
} | 3.68 |
querydsl_AbstractHibernateSQLQuery_setFetchSize | /**
* Set a fetchJoin size for the underlying JDBC query.
* @param fetchSize the fetchJoin size
*/
@SuppressWarnings("unchecked")
public Q setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
return (Q) this;
} | 3.68 |
hadoop_YarnClientUtils_getRmPrincipal | /**
* Perform the <code>_HOST</code> replacement in the {@code principal},
* Returning the result. Correctly handles HA resource manager configurations.
*
* @param rmPrincipal the principal string to prepare
* @param conf the configuration
* @return the prepared principal string
* @throws IOException thrown if there's an error replacing the host name
*/
public static String getRmPrincipal(String rmPrincipal, Configuration conf)
throws IOException {
if (rmPrincipal == null) {
throw new IllegalArgumentException("RM principal string is null");
}
if (HAUtil.isHAEnabled(conf)) {
conf = getYarnConfWithRmHaId(conf);
}
String hostname = conf.getSocketAddr(
YarnConfiguration.RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_PORT).getHostName();
return SecurityUtil.getServerPrincipal(rmPrincipal, hostname);
} | 3.68 |
morf_OracleDialect_rebuildTriggers | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#rebuildTriggers(org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> rebuildTriggers(Table table) {
return rebuildSequenceAndTrigger(table,getAutoIncrementColumnForTable(table));
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateInvocationArgumentNullCheck | /**
* generate code to test argument of type <code>Invocation</code> is null
*/
private String generateInvocationArgumentNullCheck(Method method) {
Class<?>[] pts = method.getParameterTypes();
return IntStream.range(0, pts.length)
.filter(i -> CLASS_NAME_INVOCATION.equals(pts[i].getName()))
.mapToObj(i -> String.format(CODE_INVOCATION_ARGUMENT_NULL_CHECK, i, i))
.findFirst()
.orElse("");
} | 3.68 |
hadoop_NMClientAsync_onRollbackLastReInitializationError | /**
* Error Callback for rollback of last re-initialization.
*
* @param containerId the Id of the container to restart.
* @param t a Throwable.
*/
public void onRollbackLastReInitializationError(ContainerId containerId,
Throwable t) {} | 3.68 |
flink_LimitedConnectionsFileSystem_getStreamInactivityTimeout | /**
* Gets the milliseconds that a stream may spend not writing any bytes before it is closed as
* inactive.
*/
public long getStreamInactivityTimeout() {
return streamInactivityTimeoutNanos / 1_000_000;
} | 3.68 |
hbase_HRegion_getOpenSeqNum | /** Returns the latest sequence number that was read from storage when this region was opened */
public long getOpenSeqNum() {
return this.openSeqNum;
} | 3.68 |
hbase_HBaseServerBase_updateConfiguration | /**
* Reload the configuration from disk.
*/
public void updateConfiguration() {
LOG.info("Reloading the configuration from disk.");
// Reload the configuration from disk.
conf.reloadConfiguration();
configurationManager.notifyAllObservers(conf);
} | 3.68 |
flink_ExecNodeUtil_setManagedMemoryWeight | /**
* Sets {Transformation#declareManagedMemoryUseCaseAtOperatorScope(ManagedMemoryUseCase, int)}
* using the given bytes for {@link ManagedMemoryUseCase#OPERATOR}.
*/
public static <T> void setManagedMemoryWeight(
Transformation<T> transformation, long memoryBytes) {
if (memoryBytes > 0) {
final int weightInMebibyte = Math.max(1, (int) (memoryBytes >> 20));
final Optional<Integer> previousWeight =
transformation.declareManagedMemoryUseCaseAtOperatorScope(
ManagedMemoryUseCase.OPERATOR, weightInMebibyte);
if (previousWeight.isPresent()) {
throw new TableException(
"Managed memory weight has been set, this should not happen.");
}
}
} | 3.68 |
framework_DragAndDropHandler_onDragStartOnDraggableElement | /**
* This method can be called to trigger drag and drop on any grid element
* that can be dragged and dropped.
*
* @param dragStartingEvent
* the drag triggering event, usually a {@link Event#ONMOUSEDOWN}
* or {@link Event#ONTOUCHSTART} event on the draggable element
*
* @param callback
* the callback that will handle actual drag and drop related
* operations
*/
public void onDragStartOnDraggableElement(
final NativeEvent dragStartingEvent,
final DragAndDropCallback callback) {
startPreviewHandler = Event
.addNativePreviewHandler(new NativePreviewHandler() {
private int startX = WidgetUtil
.getTouchOrMouseClientX(dragStartingEvent);
private int startY = WidgetUtil
.getTouchOrMouseClientY(dragStartingEvent);
@Override
public void onPreviewNativeEvent(NativePreviewEvent event) {
final int typeInt = event.getTypeInt();
if (typeInt == -1 && event.getNativeEvent().getType()
.toLowerCase(Locale.ROOT).contains("pointer")) {
/*
* Ignore PointerEvents since IE10 and IE11 send
* also MouseEvents for backwards compatibility.
*/
return;
}
switch (typeInt) {
case Event.ONMOUSEOVER:
case Event.ONMOUSEOUT:
// we don't care
break;
case Event.ONKEYDOWN:
case Event.ONKEYPRESS:
case Event.ONKEYUP:
case Event.ONBLUR:
case Event.ONFOCUS:
// don't cancel possible drag start
break;
case Event.ONMOUSEMOVE:
case Event.ONTOUCHMOVE:
int currentX = WidgetUtil.getTouchOrMouseClientX(
event.getNativeEvent());
int currentY = WidgetUtil.getTouchOrMouseClientY(
event.getNativeEvent());
if (Math.abs(startX - currentX) > 3
|| Math.abs(startY - currentY) > 3) {
removeStartPreviewHandler();
startDrag(dragStartingEvent, event, callback);
}
event.getNativeEvent().stopPropagation();
event.getNativeEvent().preventDefault();
event.cancel();
break;
default:
// on any other events, clean up this preview
// listener
removeStartPreviewHandler();
break;
}
}
});
} | 3.68 |
querydsl_ComparableExpression_max | /**
* Create a {@code max(this)} expression
*
* <p>Get the maximum value of this expression (aggregation)</p>
*
* @return max(this)
*/
@Override
public ComparableExpression<T> max() {
return Expressions.comparableOperation(getType(), Ops.AggOps.MAX_AGG, mixin);
} | 3.68 |
hudi_HoodieFlinkWriteClient_startAsyncCleaning | /**
* Starts async cleaning service for finished commits.
*
* <p>The Flink write client is designed to write data set as buckets
* but cleaning action should trigger after all the write actions within a
* checkpoint finish.
*/
public void startAsyncCleaning() {
tableServiceClient.startAsyncCleanerService(this);
} | 3.68 |
morf_OracleMetaDataProvider_expensiveReadTableNames | /**
* A table name reading method which is more efficient than the Oracle driver meta-data version.
*
* @see <a href="http://download.oracle.com/docs/cd/B19306_01/server.102/b14237/statviews_2094.htm">ALL_TAB_COLUMNS specification</a>
*/
private void expensiveReadTableNames() {
log.info("Starting read of table definitions");
long start = System.currentTimeMillis();
// -- Stage 1: identify tables & keys...
//
final Map<String, List<String>> primaryKeys = keyMap();
// -- Stage 2: get column data...
//
// Explicitly ignore the BIN$ tables as they are in the recycle bin (for flashback)
final String getColumnsSql = "select cols.table_name, tabcomments.comments as table_comment, cols.column_name, colcomments.COMMENTS, cols.data_type, cols.char_length, cols.data_length, cols.data_precision, cols.data_scale, cols.nullable, cols.DATA_DEFAULT "
+
"from ALL_TAB_COLUMNS cols JOIN ALL_TAB_COMMENTS tabcomments ON cols.OWNER = tabcomments.OWNER AND cols.table_name = tabcomments.table_name " +
"JOIN ALL_COL_COMMENTS colcomments ON cols.OWNER = colcomments.OWNER AND cols.table_name = colcomments.table_name AND cols.column_name = colcomments.column_name " +
"JOIN ALL_TABLES tables on cols.OWNER = tables.OWNER and cols.table_name = tables.table_name " +
"where cols.owner=? and cols.table_name not like 'BIN$%' AND tables.TEMPORARY = 'N' order by cols.table_name, cols.column_id";
runSQL(getColumnsSql, new ResultSetHandler() {
@Override
public void handle(ResultSet resultSet) throws SQLException {
while ( resultSet.next()) {
String tableName = resultSet.getString(1);
String tableComment = resultSet.getString(2);
String columnName = resultSet.getString(3);
String columnComment = resultSet.getString(4);
String dataTypeName = resultSet.getString(5);
if (isSystemTable(tableName))
continue;
try {
Integer dataLength;
if (dataTypeName.contains("CHAR")) {
dataLength = resultSet.getInt(6);
} else {
dataLength = resultSet.getInt(7);
}
Integer dataPrecision;
if (resultSet.getString(8) == null) {
dataPrecision = null;
} else {
dataPrecision = resultSet.getInt(8);
}
Integer dataScale = resultSet.getInt(9);
String nullableStr = resultSet.getString(10);
String defaultValue = determineDefaultValue(columnName);
String actualDefaultValue = determineActualDefaultValue(resultSet.getString(11));
if (!defaultValue.equals(actualDefaultValue)) {
log.warn("DEFAULT value for " + tableName + "." + columnName + " expected to be [" + defaultValue + "], but was [" + actualDefaultValue + "]");
}
handleTableColumnRow(primaryKeys, tableName, tableComment, columnName, columnComment, dataTypeName, dataLength,
dataPrecision, dataScale, nullableStr, defaultValue);
} catch (Exception e) {
throw new RuntimeException("Exception while reading metadata for table [" + tableName + "] column [" + columnName + "] datatype [" + dataTypeName + "]", e);
}
}
}
/**
* Handle the column read from the result set.
*/
private void handleTableColumnRow(final Map<String, List<String>> primaryKeys,
String tableName, String tableComment,
String columnName, String columnComment,
String dataTypeName, Integer dataLength,
Integer dataPrecision, Integer dataScale,
String nullableStr, String defaultValue) {
String commentType = null;
if (tableComment != null) {
Matcher matcher = realnameCommentMatcher.matcher(tableComment);
if (matcher.matches()) {
String tableNameFromComment = matcher.group(1);
if (tableNameFromComment.toUpperCase().equals(tableName)) {
tableName = tableNameFromComment;
} else {
throw new RuntimeException("Table name [" + tableNameFromComment + "] in comment does not match oracle table name [" + tableName + "]");
}
}
}
if (columnComment != null) {
Matcher matcher = realnameCommentMatcher.matcher(columnComment);
if (matcher.matches()) {
columnName = matcher.group(1);
commentType = matcher.group(3);
}
}
Table currentTable = tableMap.get(tableName);
if (currentTable == null) {
currentTable = table(tableName);
tableMap.put(tableName, currentTable);
}
boolean primaryKey = false;
List<String> primaryKeyColumns = primaryKeys.get(tableName.toUpperCase());
if (primaryKeyColumns != null) {
primaryKey = primaryKeyColumns.contains(columnName.toUpperCase());
}
int autoIncrementFrom = getAutoIncrementStartValue(columnComment);
boolean isAutoIncrement = autoIncrementFrom != -1;
autoIncrementFrom = autoIncrementFrom == -1 ? 0 : autoIncrementFrom;
// Deferred type column required as tables not yet excluded will be processed at this stage.
currentTable.columns().add(
new DeferredTypeColumn(
dataTypeName,
dataLength,
dataPrecision == null ? 0 : dataPrecision,
dataScale == null ? 0 : dataScale,
commentType,
columnName,
"Y".equals(nullableStr), // nullable
primaryKey, isAutoIncrement, autoIncrementFrom, defaultValue
)
);
}});
//
// -- Stage 2b: Re-order the columns as per the primary key order...
//
for( Entry<String, Table> entry : tableMap.entrySet()) {
final List<String> primaryKeysForTable = primaryKeys.get(entry.getKey().toUpperCase());
// Table which don't have a primary key return null here
if (primaryKeysForTable != null) {
sort(entry.getValue().columns(), new PrimaryKeyComparator(primaryKeysForTable));
}
}
long pointTwo = System.currentTimeMillis();
if (log.isDebugEnabled()) {
log.debug(String.format("Loaded table column list in %dms", pointTwo - start));
log.debug("Loading indexes: [" + tableMap.size() + "]");
}
Supplier<Map<String, Set<String>>> indexPartitions = Suppliers.memoize(() -> {
Map<String, Set<String>> result = new HashMap<>();
runSQL("select index_name, status from ALL_IND_PARTITIONS where index_owner=?",
resultSet -> {
while(resultSet.next()) {
result.computeIfAbsent(resultSet.getString(1), k -> new HashSet<>()).add(resultSet.getString(2));
}
});
return result;
}
);
// -- Stage 3: find the index names...
//
final String getIndexNamesSql = "select table_name, index_name, uniqueness, status from ALL_INDEXES where owner=? order by table_name, index_name";
runSQL(getIndexNamesSql, new ResultSetHandler() {
@Override
public void handle(ResultSet resultSet) throws SQLException {
int indexCount = 0;
while (resultSet.next()) {
String tableName = resultSet.getString(1);
String indexName = resultSet.getString(2);
String uniqueness = resultSet.getString(3);
String status = resultSet.getString(4);
Table currentTable = tableMap.get(tableName);
if (currentTable == null) {
log.warn(String.format("Table [%s] was not in the table map - ignoring index [%s]", tableName, indexName));
continue;
}
if (DatabaseMetaDataProviderUtils.shouldIgnoreIndex(indexName)) {
log.info("Ignoring index: [" + indexName + "]");
continue;
}
final boolean unique = "UNIQUE".equals(uniqueness);
boolean isValid = isValid(status, indexName, indexPartitions);
// don't output the primary key as an index
if(isPrimaryKeyIndex(indexName) && isValid) {
if (log.isDebugEnabled()) {
log.debug(String.format("Ignoring index [%s] on table [%s] as it is a primary key index", indexName, tableName));
}
if (!unique) {
log.warn("Primary Key on table [" + tableName + "] is backed by non-unique index [" + indexName + "]");
}
continue;
}
// Chop up the index name
if (indexName.toUpperCase().startsWith(currentTable.getName().toUpperCase())) {
indexName = currentTable.getName() + indexName.substring(currentTable.getName().length());
}
if (!isValid) {
log.fatal("Index [" + indexName + "] is not in a valid state");
indexName = indexName + "<UNUSABLE>"; // this will cause the schema checker to find a mismatch and also provide a good hint in the log messages what was wrong
}
final String indexNameFinal = indexName;
currentTable.indexes().add(new Index() {
private final List<String> columnNames = new ArrayList<>();
@Override
public boolean isUnique() {
return unique;
}
@Override
public String getName() {
return indexNameFinal;
}
@Override
public List<String> columnNames() {
return columnNames;
}
@Override
public String toString() {
return this.toStringHelper();
}
});
indexCount++;
}
if (log.isDebugEnabled()) {
log.debug(String.format("Loaded %d indexes", indexCount));
}
}
});
long pointThree = System.currentTimeMillis();
if (log.isDebugEnabled()) {
log.debug(String.format("Loaded index list in %dms", pointThree - pointTwo));
log.debug("Loading index columns");
}
// -- Stage 4: find the index columns...
//
final String getIndexColumnsSql = "select table_name, INDEX_NAME, COLUMN_NAME from ALL_IND_COLUMNS where INDEX_OWNER=? order by table_name, index_name, column_position";
runSQL(getIndexColumnsSql, new ResultSetHandler() {
@Override
public void handle(ResultSet resultSet) throws SQLException {
while (resultSet.next()) {
String tableName = resultSet.getString(1);
Table currentTable = tableMap.get(tableName);
if (currentTable == null) {
continue;
}
String indexName = resultSet.getString(2);
String columnName = resultSet.getString(3);
// Skip this column if the index is a primary key index
if (isPrimaryKeyIndex(indexName)) {
if (log.isDebugEnabled()) {
log.debug(String.format("Ignoring index [%s] on table [%s] as it is a primary key index", indexName, tableName));
}
continue;
}
if (DatabaseMetaDataProviderUtils.shouldIgnoreIndex(indexName)) {
continue;
}
Index lastIndex = null;
for (Index currentIndex : currentTable.indexes()) {
if (currentIndex.getName().equalsIgnoreCase(indexName)) {
lastIndex = currentIndex;
break;
}
}
if (lastIndex == null) {
log.warn(String.format("Ignoring index details for index [%s] on table [%s] as no index definition exists", indexName, tableName));
continue;
}
// Correct the case on the column name
for (Column currentColumn : currentTable.columns()) {
if (currentColumn.getName().equalsIgnoreCase(columnName)) {
columnName = currentColumn.getName();
break;
}
}
lastIndex.columnNames().add(columnName);
}
}
});
long end = System.currentTimeMillis();
if (log.isDebugEnabled()) log.debug(String.format("Loaded index column list in %dms", end - pointThree));
log.info(String.format("Read table metadata in %dms; %d tables", end - start, tableMap.size()));
} | 3.68 |
hbase_HRegionFileSystem_loadRegionInfoFileContent | /**
* Create a {@link RegionInfo} from the serialized version on-disk.
* @param fs {@link FileSystem} that contains the Region Info file
* @param regionDir {@link Path} to the Region Directory that contains the Info file
* @return An {@link RegionInfo} instance gotten from the Region Info file.
* @throws IOException if an error occurred during file open/read operation.
*/
public static RegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
throws IOException {
FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
try {
return RegionInfo.parseFrom(in);
} finally {
in.close();
}
} | 3.68 |
morf_WithMetaDataAdapter_close | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
super.close();
schemaProducer.close();
} | 3.68 |
hbase_SnapshotInfo_getSnapshotFilesMap | /**
* Gets the store files map for snapshot
* @param conf the {@link Configuration} to use
* @param snapshot {@link SnapshotDescription} to get stats from
* @param exec the {@link ExecutorService} to use
* @param filesMap {@link Map} the map to put the mapping entries
* @param uniqueHFilesArchiveSize {@link AtomicLong} the accumulated store file size in archive
* @param uniqueHFilesSize {@link AtomicLong} the accumulated store file size shared
* @param uniqueHFilesMobSize {@link AtomicLong} the accumulated mob store file size shared
*/
private static void getSnapshotFilesMap(final Configuration conf,
final SnapshotDescription snapshot, final ExecutorService exec,
final ConcurrentHashMap<Path, Integer> filesMap, final AtomicLong uniqueHFilesArchiveSize,
final AtomicLong uniqueHFilesSize, final AtomicLong uniqueHFilesMobSize) throws IOException {
SnapshotProtos.SnapshotDescription snapshotDesc =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
Path rootDir = CommonFSUtils.getRootDir(conf);
final FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, exec,
new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public void storeFile(final RegionInfo regionInfo, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if (!storeFile.hasReference()) {
HFileLink link = HFileLink.build(conf, snapshot.getTableName(),
regionInfo.getEncodedName(), family, storeFile.getName());
long size;
Integer count;
Path p;
AtomicLong al;
int c = 0;
if (fs.exists(link.getArchivePath())) {
p = link.getArchivePath();
al = uniqueHFilesArchiveSize;
size = fs.getFileStatus(p).getLen();
} else if (fs.exists(link.getMobPath())) {
p = link.getMobPath();
al = uniqueHFilesMobSize;
size = fs.getFileStatus(p).getLen();
} else {
p = link.getOriginPath();
al = uniqueHFilesSize;
size = link.getFileStatus(fs).getLen();
}
// If it has been counted, do not double count
count = filesMap.get(p);
if (count != null) {
c = count.intValue();
} else {
al.addAndGet(size);
}
filesMap.put(p, ++c);
}
}
});
} | 3.68 |
pulsar_TimeAverageMessageData_update | /**
* Update using a new bundle sample.
*
* @param newSample
* Most recently observed bundle stats.
*/
public void update(final NamespaceBundleStats newSample) {
update(newSample.msgThroughputIn, newSample.msgThroughputOut, newSample.msgRateIn, newSample.msgRateOut);
} | 3.68 |
graphhopper_LandmarkStorage_createLandmarksForSubnetwork | /**
* This method creates landmarks for the specified subnetwork (integer list)
*
* @return landmark mapping
*/
private boolean createLandmarksForSubnetwork(final int startNode, final byte[] subnetworks, EdgeFilter accessFilter) {
final int subnetworkId = landmarkIDs.size();
int[] tmpLandmarkNodeIds = new int[landmarks];
int logOffset = Math.max(1, landmarks / 2);
boolean pickedPrecalculatedLandmarks = false;
if (!landmarkSuggestions.isEmpty()) {
double lat = na.getLat(startNode), lon = na.getLon(startNode);
LandmarkSuggestion selectedSuggestion = null;
for (LandmarkSuggestion lmsugg : landmarkSuggestions) {
if (lmsugg.getBox().contains(lat, lon)) {
selectedSuggestion = lmsugg;
break;
}
}
if (selectedSuggestion != null) {
if (selectedSuggestion.getNodeIds().size() < tmpLandmarkNodeIds.length)
throw new IllegalArgumentException("landmark suggestions are too few " + selectedSuggestion.getNodeIds().size() + " for requested landmarks " + landmarks);
pickedPrecalculatedLandmarks = true;
for (int i = 0; i < tmpLandmarkNodeIds.length; i++) {
int lmNodeId = selectedSuggestion.getNodeIds().get(i);
tmpLandmarkNodeIds[i] = lmNodeId;
}
}
}
if (pickedPrecalculatedLandmarks) {
LOGGER.info("Picked " + tmpLandmarkNodeIds.length + " landmark suggestions, skip finding landmarks");
} else {
LandmarkExplorer explorer = findLandmarks(tmpLandmarkNodeIds, startNode, accessFilter, "create");
if (explorer.getFromCount() < minimumNodes) {
// too small subnetworks are initialized with special id==0
explorer.setSubnetworks(subnetworks, UNCLEAR_SUBNETWORK);
return false;
}
if (logDetails)
LOGGER.info("Finished searching landmarks for subnetwork " + subnetworkId + " of size " + explorer.getVisitedNodes());
}
// 2) calculate weights for all landmarks -> 'from' and 'to' weight
for (int lmIdx = 0; lmIdx < tmpLandmarkNodeIds.length; lmIdx++) {
if (Thread.currentThread().isInterrupted()) {
throw new RuntimeException("Thread was interrupted for landmark " + lmIdx);
}
int lmNodeId = tmpLandmarkNodeIds[lmIdx];
LandmarkExplorer explorer = new LandmarkExplorer(graph, this, weighting, traversalMode, accessFilter, false);
explorer.setStartNode(lmNodeId);
explorer.runAlgo();
explorer.initLandmarkWeights(lmIdx, lmNodeId, LM_ROW_LENGTH, FROM_OFFSET);
// set subnetwork id to all explored nodes, but do this only for the first landmark
if (lmIdx == 0) {
if (explorer.setSubnetworks(subnetworks, subnetworkId))
return false;
}
explorer = new LandmarkExplorer(graph, this, weighting, traversalMode, accessFilter, true);
explorer.setStartNode(lmNodeId);
explorer.runAlgo();
explorer.initLandmarkWeights(lmIdx, lmNodeId, LM_ROW_LENGTH, TO_OFFSET);
if (lmIdx == 0) {
if (explorer.setSubnetworks(subnetworks, subnetworkId))
return false;
}
if (logDetails && lmIdx % logOffset == 0)
LOGGER.info("Set landmarks weights [" + weighting + "]. "
+ "Progress " + (int) (100.0 * lmIdx / tmpLandmarkNodeIds.length) + "%");
}
// TODO set weight to SHORT_MAX if entry has either no 'from' or no 'to' entry
landmarkIDs.add(tmpLandmarkNodeIds);
return true;
} | 3.68 |
flink_PojoSerializerSnapshotData_createFrom | /**
* Creates a {@link PojoSerializerSnapshotData} from existing snapshotted configuration of a
* {@link PojoSerializer}.
*/
static <T> PojoSerializerSnapshotData<T> createFrom(
Class<T> pojoClass,
Field[] fields,
TypeSerializerSnapshot<?>[] existingFieldSerializerSnapshots,
LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>>
existingRegisteredSubclassSerializerSnapshots,
Map<Class<?>, TypeSerializerSnapshot<?>>
existingNonRegisteredSubclassSerializerSnapshots) {
final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots =
new LinkedOptionalMap<>(fields.length);
for (int i = 0; i < fields.length; i++) {
Field field = fields[i];
String fieldName = (field == null) ? getDummyNameForMissingField(i) : field.getName();
fieldSerializerSnapshots.put(fieldName, field, existingFieldSerializerSnapshots[i]);
}
return new PojoSerializerSnapshotData<>(
pojoClass,
fieldSerializerSnapshots,
optionalMapOf(existingRegisteredSubclassSerializerSnapshots, Class::getName),
optionalMapOf(existingNonRegisteredSubclassSerializerSnapshots, Class::getName));
} | 3.68 |
hbase_HashTable_getCurrentKey | /**
* Get the current key
* @return the current key or null if there is no current key
*/
public ImmutableBytesWritable getCurrentKey() {
return key;
} | 3.68 |
hbase_ZKWatcher_registerListenerFirst | /**
* Register the specified listener to receive ZooKeeper events and add it as the first in the list
* of current listeners.
* @param listener the listener to register
*/
public void registerListenerFirst(ZKListener listener) {
listeners.add(0, listener);
} | 3.68 |
flink_StreamOperatorWrapper_endOperatorInput | /**
* Ends an input of the operator contained by this wrapper.
*
* @param inputId the input ID starts from 1 which indicates the first input.
*/
public void endOperatorInput(int inputId) throws Exception {
if (wrapped instanceof BoundedOneInput) {
((BoundedOneInput) wrapped).endInput();
} else if (wrapped instanceof BoundedMultiInput) {
((BoundedMultiInput) wrapped).endInput(inputId);
}
} | 3.68 |
zxing_MathUtils_sum | /**
* @param array values to sum
* @return sum of values in array
*/
public static int sum(int[] array) {
int count = 0;
for (int a : array) {
count += a;
}
return count;
} | 3.68 |
flink_GenericArrayData_isPrimitiveArray | /**
* Returns true if this is a primitive array.
*
* <p>A primitive array is an array whose elements are of primitive type.
*/
public boolean isPrimitiveArray() {
return isPrimitiveArray;
} | 3.68 |
morf_ResolvedTables_addModifiedTable | /**
* Store information about modification of given table.
*
* @param tableName modified table
*/
public void addModifiedTable(String tableName) {
modifiedTables.add(tableName.toUpperCase());
readTables.remove(tableName.toUpperCase());
} | 3.68 |
hadoop_AllocateResponse_nmTokens | /**
* Set the <code>nmTokens</code> of the response.
* @see AllocateResponse#setNMTokens(List)
* @param nmTokens <code>nmTokens</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder nmTokens(List<NMToken> nmTokens) {
allocateResponse.setNMTokens(nmTokens);
return this;
} | 3.68 |
framework_VDateField_getDate | /**
* Returns a copy of the current date. Modifying the returned date will not
* modify the value of this VDateField. Use {@link #setDate(Date)} to change
* the current date.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @return A copy of the current date
*/
public Date getDate() {
Date current = getCurrentDate();
if (current == null) {
return null;
} else {
return (Date) getCurrentDate().clone();
}
} | 3.68 |
framework_VFilterSelect_setSelectedItemIcon | /**
* Sets the icon URI of the selected item. The icon is shown on the left
* side of the item caption text. Set the URI to null to remove the icon.
*
* @param iconUri
* The URI of the icon
*/
public void setSelectedItemIcon(String iconUri) {
if (iconUri == null || iconUri.isEmpty()) {
if (selectedItemIcon != null) {
panel.remove(selectedItemIcon);
selectedItemIcon = null;
afterSelectedItemIconChange();
}
} else {
IconWidget newIcon = new IconWidget(client.getIcon(iconUri));
if (iconEquals(newIcon, selectedItemIcon)) {
/*
* Do not update the icon if nothing has changed. Otherwise we
* can cause problems such as not being able to click in the
* icon to open the popup (blur might occur and call this
* method, icon is replaced and the click event is not delivered
* to the new icon)
*/
return;
}
if (selectedItemIcon != null) {
panel.remove(selectedItemIcon);
}
// Older IE versions don't scale icon correctly if DOM
// contains height and width attributes.
newIcon.getElement().removeAttribute("height");
newIcon.getElement().removeAttribute("width");
newIcon.addDomHandler(new LoadHandler() {
@Override
public void onLoad(LoadEvent event) {
afterSelectedItemIconChange();
}
}, LoadEvent.getType());
panel.insert(newIcon, 0);
selectedItemIcon = newIcon;
afterSelectedItemIconChange();
}
} | 3.68 |
hadoop_RollingFileSystemSink_rollLogDirIfNeeded | /**
* Check the current directory against the time stamp. If they're not
* the same, create a new directory and a new log file in that directory.
*
* @throws MetricsException thrown if an error occurs while creating the
* new directory or new log file
*/
private void rollLogDirIfNeeded() throws MetricsException {
// Because we're working relative to the clock, we use a Date instead
// of Time.monotonicNow().
Date now = new Date();
// We check whether currentOutStream is null instead of currentDirPath,
// because if currentDirPath is null, then currentOutStream is null, but
// currentOutStream can be null for other reasons. Same for nextFlush.
if ((currentOutStream == null) || now.after(nextFlush.getTime())) {
// If we're not yet connected to HDFS, create the connection
if (!initialized) {
initialized = initFs();
}
if (initialized) {
// Close the stream. This step could have been handled already by the
// flusher thread, but if it has, the PrintStream will just swallow the
// exception, which is fine.
if (currentOutStream != null) {
currentOutStream.close();
}
currentDirPath = findCurrentDirectory(now);
try {
rollLogDir();
} catch (IOException ex) {
throwMetricsException("Failed to create new log file", ex);
}
// Update the time of the next flush
updateFlushTime(now);
// Schedule the next flush at that time
scheduleFlush(nextFlush.getTime());
}
} else if (forceFlush) {
scheduleFlush(new Date());
}
} | 3.68 |
hadoop_MkdirOperation_probePathStatusOrNull | /**
* Get the status of a path, downgrading FNFE to null result.
* @param path path to probe.
* @param probes probes to exec
* @return the status or null
* @throws IOException failure other than FileNotFound
*/
private S3AFileStatus probePathStatusOrNull(final Path path,
final Set<StatusProbeEnum> probes) throws IOException {
try {
return callbacks.probePathStatus(path, probes);
} catch (FileNotFoundException fnfe) {
return null;
}
} | 3.68 |
flink_MemorySegment_getHeapMemory | /**
* Get the heap byte array object.
*
* @return Return non-null if the memory is on the heap, and return null if the memory if off
* the heap.
*/
public byte[] getHeapMemory() {
return heapMemory;
} | 3.68 |
hadoop_ExecutionSummarizer_getTraceSignature | // Generates a signature for the trace file based on
// - filename
// - modification time
// - file length
// - owner
protected static String getTraceSignature(String input) throws IOException {
Path inputPath = new Path(input);
FileSystem fs = inputPath.getFileSystem(new Configuration());
FileStatus status = fs.getFileStatus(inputPath);
Path qPath = fs.makeQualified(status.getPath());
String traceID = status.getModificationTime() + qPath.toString()
+ status.getOwner() + status.getLen();
return MD5Hash.digest(traceID).toString();
} | 3.68 |
hadoop_OBSInputStream_remainingInFile | /**
* Bytes left in stream.
*
* @return how many bytes are left to read
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public synchronized long remainingInFile() {
return this.contentLength - this.streamCurrentPos;
} | 3.68 |
morf_AbstractSqlDialectTest_stringLiteralPrefix | /**
* On some databases our string literals need prefixing with N to be
* correctly typed as a unicode string.
*
* @return prefix to insert before quoted string literal.
*/
protected String stringLiteralPrefix() {
return "";
} | 3.68 |
framework_Range_endsBefore | /**
* Checks whether this range ends before the start of another range.
*
* @param other
* the other range to compare against
* @return <code>true</code> if this range ends before the
* <code>other</code>
*/
public boolean endsBefore(final Range other) {
return getEnd() <= other.getStart();
} | 3.68 |
hudi_BaseHoodieWriteClient_completeCompaction | /**
* Commit Compaction and track metrics.
*/
protected void completeCompaction(HoodieCommitMetadata metadata, HoodieTable table, String compactionCommitTime) {
tableServiceClient.completeCompaction(metadata, table, compactionCommitTime);
} | 3.68 |
flink_Tuple25_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>
of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18,
T19 f19,
T20 f20,
T21 f21,
T22 f22,
T23 f23,
T24 f24) {
return new Tuple25<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18,
f19, f20, f21, f22, f23, f24);
} | 3.68 |
streampipes_StreamPipesClient_pipelineElementTemplates | /**
* Get API to work with pipline element templates
*
* @return {@link org.apache.streampipes.client.api.PipelineElementTemplateApi}
*/
@Override
public IPipelineElementTemplateApi pipelineElementTemplates() {
return new PipelineElementTemplateApi(config);
} | 3.68 |
hmily_ConsulClient_pull | /**
* pull.
* @param consulConfig consul config
* @return InputStream
*/
public InputStream pull(final ConsulConfig consulConfig) {
if (consul == null) {
if (StringUtils.isNoneBlank(consulConfig.getHostAndPorts())) {
consul = Consul.builder().withMultipleHostAndPort(buildHostAndPortList(consulConfig.getHostAndPorts()), consulConfig.getBlacklistTimeInMillis()).build().newClient();
} else {
consul = Consul.builder().withHostAndPort(HostAndPort.fromString(consulConfig.getHostAndPort())).build().newClient();
}
}
Value value = consul.keyValueClient().getValue(consulConfig.getKey()).orElse(null);
if (value == null) {
return null;
}
String content = value.getValueAsString(Charset.forName("utf-8")).get();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("consul content {}", content);
}
if (StringUtils.isBlank(content)) {
return null;
}
return new ByteArrayInputStream(content.getBytes(Charset.forName("utf-8")));
} | 3.68 |
shardingsphere-elasticjob_JobNodePath_getConfigNodePath | /**
* Get configuration node path.
*
* @return configuration node path
*/
public String getConfigNodePath() {
return String.format("/%s/%s", jobName, CONFIG_NODE);
} | 3.68 |
flink_BufferManager_recycle | /**
* Exclusive buffer is recycled to this channel manager directly and it may trigger return extra
* floating buffer based on <tt>numRequiredBuffers</tt>.
*
* @param segment The exclusive segment of this channel.
*/
@Override
public void recycle(MemorySegment segment) {
@Nullable Buffer releasedFloatingBuffer = null;
synchronized (bufferQueue) {
try {
// Similar to notifyBufferAvailable(), make sure that we never add a buffer
// after channel released all buffers via releaseAllResources().
if (inputChannel.isReleased()) {
globalPool.recycleUnpooledMemorySegments(Collections.singletonList(segment));
return;
} else {
releasedFloatingBuffer =
bufferQueue.addExclusiveBuffer(
new NetworkBuffer(segment, this), numRequiredBuffers);
}
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
} finally {
bufferQueue.notifyAll();
}
}
if (releasedFloatingBuffer != null) {
releasedFloatingBuffer.recycleBuffer();
} else {
try {
inputChannel.notifyBufferAvailable(1);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
}
} | 3.68 |
framework_Tree_areChildrenAllowed | /**
* Tests if the Item with given ID can have any children.
*
* @see Container.Hierarchical#areChildrenAllowed(Object)
*/
@Override
public boolean areChildrenAllowed(Object itemId) {
return ((Container.Hierarchical) items).areChildrenAllowed(itemId);
} | 3.68 |
framework_ContainerEventProvider_getContainerDataSource | /**
* Returns the container used as data source.
*
*/
public Container.Indexed getContainerDataSource() {
return container;
} | 3.68 |
hadoop_SharedKeyCredentials_initializeMac | /**
* Initialize the HmacSha256 associated with the account key.
*/
private void initializeMac() {
// Initializes the HMAC-SHA256 Mac and SecretKey.
try {
hmacSha256 = Mac.getInstance(HMAC_SHA256);
hmacSha256.init(new SecretKeySpec(accountKey, HMAC_SHA256));
} catch (final Exception e) {
throw new IllegalArgumentException(e);
}
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_isPropertyDelimiter | /**
* Is property delimiter boolean.
*
* @return the boolean
*/
public boolean isPropertyDelimiter() {
return !this.escaped && (this.character == '=' || this.character == ':');
} | 3.68 |
flink_TypeSerializerSnapshotSerializationUtil_deserializeV2 | /** Deserialization path for Flink versions 1.7+. */
@VisibleForTesting
static <T> TypeSerializerSnapshot<T> deserializeV2(DataInputView in, ClassLoader cl)
throws IOException {
return TypeSerializerSnapshot.readVersionedSnapshot(in, cl);
} | 3.68 |
hadoop_ActiveUsersManager_getNumActiveUsers | /**
* Get number of active users i.e. users with applications which have pending
* resource requests.
* @return number of active users
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
@Override
synchronized public int getNumActiveUsers() {
return activeUsers;
} | 3.68 |
dubbo_JValidator_generateMethodParameterClass | /**
* try to generate methodParameterClass.
*
* @param clazz interface class
* @param method invoke method
* @param parameterClassName generated parameterClassName
* @return Class<?> generated methodParameterClass
*/
private static Class<?> generateMethodParameterClass(Class<?> clazz, Method method, String parameterClassName)
throws Exception {
ClassPool pool = ClassGenerator.getClassPool(clazz.getClassLoader());
synchronized (parameterClassName.intern()) {
CtClass ctClass = null;
try {
ctClass = pool.getCtClass(parameterClassName);
} catch (NotFoundException ignore) {
}
if (null == ctClass) {
ctClass = pool.makeClass(parameterClassName);
ClassFile classFile = ctClass.getClassFile();
ctClass.addConstructor(CtNewConstructor.defaultConstructor(pool.getCtClass(parameterClassName)));
// parameter fields
Parameter[] parameters = method.getParameters();
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
for (int i = 0; i < parameters.length; i++) {
Annotation[] annotations = parameterAnnotations[i];
AnnotationsAttribute attribute =
new AnnotationsAttribute(classFile.getConstPool(), AnnotationsAttribute.visibleTag);
for (Annotation annotation : annotations) {
if (annotation.annotationType().isAnnotationPresent(Constraint.class)) {
javassist.bytecode.annotation.Annotation ja = new javassist.bytecode.annotation.Annotation(
classFile.getConstPool(),
pool.getCtClass(annotation.annotationType().getName()));
Method[] members = annotation.annotationType().getMethods();
for (Method member : members) {
if (Modifier.isPublic(member.getModifiers())
&& member.getParameterTypes().length == 0
&& member.getDeclaringClass() == annotation.annotationType()) {
Object value = member.invoke(annotation);
if (null != value) {
MemberValue memberValue = createMemberValue(
classFile.getConstPool(),
pool.get(member.getReturnType().getName()),
value);
ja.addMemberValue(member.getName(), memberValue);
}
}
}
attribute.addAnnotation(ja);
}
}
Parameter parameter = parameters[i];
Class<?> type = parameter.getType();
String fieldName = parameter.getName();
CtField ctField = CtField.make(
"public " + type.getCanonicalName() + " " + fieldName + ";",
pool.getCtClass(parameterClassName));
ctField.getFieldInfo().addAttribute(attribute);
ctClass.addField(ctField);
}
return pool.toClass(ctClass, clazz, clazz.getClassLoader(), clazz.getProtectionDomain());
} else {
return Class.forName(parameterClassName, true, clazz.getClassLoader());
}
}
} | 3.68 |
hbase_HFileReaderImpl_blockSeek | /**
* Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the
* key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the
* first key in the block = key, then you'll get thrown exceptions. The caller has to check for
* that case and load the previous block as appropriate. the key to find find the key before the
* given key in case of exact match.
* @return 0 in case of an exact key match, 1 in case of an inexact match, -2 in case of an
* inexact match and furthermore, the input key less than the first key of current
* block(e.g. using a faked index key)
*/
protected int blockSeek(Cell key, boolean seekBefore) {
int klen, vlen, tlen = 0;
int lastKeyValueSize = -1;
int offsetFromPos;
do {
offsetFromPos = 0;
// Better to ensure that we use the BB Utils here
long ll = blockBuffer.getLongAfterPosition(offsetFromPos);
klen = (int) (ll >> Integer.SIZE);
vlen = (int) (Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll);
if (checkKeyLen(klen) || checkLen(vlen)) {
throw new IllegalStateException(
"Invalid klen " + klen + " or vlen " + vlen + ". Block offset: " + curBlock.getOffset()
+ ", block length: " + blockBuffer.limit() + ", position: " + blockBuffer.position()
+ " (without header)." + " path=" + reader.getPath());
}
offsetFromPos += Bytes.SIZEOF_LONG;
this.rowLen = blockBuffer.getShortAfterPosition(offsetFromPos);
blockBuffer.asSubByteBuffer(blockBuffer.position() + offsetFromPos, klen, pair);
bufBackedKeyOnlyKv.setKey(pair.getFirst(), pair.getSecond(), klen, rowLen);
int comp =
PrivateCellUtil.compareKeyIgnoresMvcc(reader.getComparator(), key, bufBackedKeyOnlyKv);
offsetFromPos += klen + vlen;
if (this.reader.getFileContext().isIncludesTags()) {
// Read short as unsigned, high byte first
tlen = ((blockBuffer.getByteAfterPosition(offsetFromPos) & 0xff) << 8)
^ (blockBuffer.getByteAfterPosition(offsetFromPos + 1) & 0xff);
if (checkLen(tlen)) {
throw new IllegalStateException("Invalid tlen " + tlen + ". Block offset: "
+ curBlock.getOffset() + ", block length: " + blockBuffer.limit() + ", position: "
+ blockBuffer.position() + " (without header)." + " path=" + reader.getPath());
}
// add the two bytes read for the tags.
offsetFromPos += tlen + (Bytes.SIZEOF_SHORT);
}
if (this.reader.getHFileInfo().shouldIncludeMemStoreTS()) {
// Directly read the mvcc based on current position
readMvccVersion(offsetFromPos);
}
if (comp == 0) {
if (seekBefore) {
if (lastKeyValueSize < 0) {
throw new IllegalStateException("blockSeek with seekBefore "
+ "at the first key of the block: key=" + CellUtil.getCellKeyAsString(key)
+ ", blockOffset=" + curBlock.getOffset() + ", onDiskSize="
+ curBlock.getOnDiskSizeWithHeader() + ", path=" + reader.getPath());
}
blockBuffer.moveBack(lastKeyValueSize);
readKeyValueLen();
return 1; // non exact match.
}
currKeyLen = klen;
currValueLen = vlen;
currTagsLen = tlen;
return 0; // indicate exact match
} else if (comp < 0) {
if (lastKeyValueSize > 0) {
blockBuffer.moveBack(lastKeyValueSize);
}
readKeyValueLen();
if (lastKeyValueSize == -1 && blockBuffer.position() == 0) {
return HConstants.INDEX_KEY_MAGIC;
}
return 1;
}
// The size of this key/value tuple, including key/value length fields.
lastKeyValueSize = klen + vlen + currMemstoreTSLen + KEY_VALUE_LEN_SIZE;
// include tag length also if tags included with KV
if (reader.getFileContext().isIncludesTags()) {
lastKeyValueSize += tlen + Bytes.SIZEOF_SHORT;
}
blockBuffer.skip(lastKeyValueSize);
} while (blockBuffer.hasRemaining());
// Seek to the last key we successfully read. This will happen if this is
// the last key/value pair in the file, in which case the following call
// to next() has to return false.
blockBuffer.moveBack(lastKeyValueSize);
readKeyValueLen();
return 1; // didn't exactly find it.
} | 3.68 |
zxing_QRCodeEncoder_encodeContentsFromZXingIntent | // It would be nice if the string encoding lived in the core ZXing library,
// but we use platform specific code like PhoneNumberUtils, so it can't.
private void encodeContentsFromZXingIntent(Intent intent) {
// Default to QR_CODE if no format given.
String formatString = intent.getStringExtra(Intents.Encode.FORMAT);
format = null;
if (formatString != null) {
try {
format = BarcodeFormat.valueOf(formatString);
} catch (IllegalArgumentException iae) {
// Ignore it then
}
}
if (format == null || format == BarcodeFormat.QR_CODE) {
String type = intent.getStringExtra(Intents.Encode.TYPE);
if (type != null && !type.isEmpty()) {
this.format = BarcodeFormat.QR_CODE;
encodeQRCodeContents(intent, type);
}
} else {
String data = intent.getStringExtra(Intents.Encode.DATA);
if (data != null && !data.isEmpty()) {
contents = data;
displayContents = data;
title = activity.getString(R.string.contents_text);
}
}
} | 3.68 |
hadoop_AbfsHttpOperation_sendRequest | /**
* Sends the HTTP request. Note that HttpUrlConnection requires that an
* empty buffer be sent in order to set the "Content-Length: 0" header, which
* is required by our endpoint.
*
* @param buffer the request entity body.
* @param offset an offset into the buffer where the data beings.
* @param length the length of the data in the buffer.
*
* @throws IOException if an error occurs.
*/
public void sendRequest(byte[] buffer, int offset, int length) throws IOException {
this.connection.setDoOutput(true);
this.connection.setFixedLengthStreamingMode(length);
if (buffer == null) {
// An empty buffer is sent to set the "Content-Length: 0" header, which
// is required by our endpoint.
buffer = new byte[]{};
offset = 0;
length = 0;
}
// send the request body
long startTime = 0;
startTime = System.nanoTime();
OutputStream outputStream = null;
// Updates the expected bytes to be sent based on length.
this.expectedBytesToBeSent = length;
try {
try {
/* Without expect header enabled, if getOutputStream() throws
an exception, it gets caught by the restOperation. But with
expect header enabled we return back without throwing an exception
for the correct response code processing.
*/
outputStream = getConnOutputStream();
} catch (IOException e) {
/* If getOutputStream fails with an exception and expect header
is enabled, we return back without throwing an exception to
the caller. The caller is responsible for setting the correct status code.
If expect header is not enabled, we throw back the exception.
*/
String expectHeader = getConnProperty(EXPECT);
if (expectHeader != null && expectHeader.equals(HUNDRED_CONTINUE)) {
LOG.debug("Getting output stream failed with expect header enabled, returning back ", e);
return;
} else {
LOG.debug("Getting output stream failed without expect header enabled, throwing exception ", e);
throw e;
}
}
// update bytes sent for successful as well as failed attempts via the
// accompanying statusCode.
this.bytesSent = length;
// If this fails with or without expect header enabled,
// it throws an IOException.
outputStream.write(buffer, offset, length);
} finally {
// Closing the opened output stream
if (outputStream != null) {
outputStream.close();
}
this.sendRequestTimeMs = elapsedTimeMs(startTime);
}
} | 3.68 |
hadoop_StartupProgress_endStep | /**
* Ends execution of the specified step within the specified phase. This is
* a no-op if the phase is already completed.
*
* @param phase Phase within which the step should be ended
* @param step Step to end
*/
public void endStep(Phase phase, Step step) {
if (!isComplete(phase)) {
lazyInitStep(phase, step).endTime = monotonicNow();
}
LOG.debug("End of the step. Phase: {}, Step: {}", phase, step);
} | 3.68 |
hbase_HFileWriterImpl_appendMetaBlock | /**
* Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive.
* Fill one with a bunch of serialized data rather than do a metadata block per metadata instance.
* If metadata is small, consider adding to file info using
* {@link #appendFileInfo(byte[], byte[])} name of the block will call readFields to get data
* later (DO NOT REUSE)
*/
@Override
public void appendMetaBlock(String metaBlockName, Writable content) {
byte[] key = Bytes.toBytes(metaBlockName);
int i;
for (i = 0; i < metaNames.size(); ++i) {
// stop when the current key is greater than our own
byte[] cur = metaNames.get(i);
if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0, key.length) > 0) {
break;
}
}
metaNames.add(i, key);
metaData.add(i, content);
} | 3.68 |
flink_AbstractStreamOperator_getRuntimeContext | /**
* Returns a context that allows the operator to query information about the execution and also
* to interact with systems such as broadcast variables and managed state. This also allows to
* register timers.
*/
@VisibleForTesting
public StreamingRuntimeContext getRuntimeContext() {
return runtimeContext;
} | 3.68 |
hbase_QuotaTableUtil_quotasFromData | /*
* ========================================================================= Quotas protobuf
* helpers
*/
protected static Quotas quotasFromData(final byte[] data) throws IOException {
return quotasFromData(data, 0, data.length);
} | 3.68 |
morf_SelectStatementBuilder_except | /**
* Perform an EXCEPT set operation with another {@code selectStatement}, retaining
* all rows which exist in top select statement only.
*
* @param selectStatement the other select statement that contains entries that
* will not be present in the final result set.
* @return this, for method chaining.
*/
public SelectStatementBuilder except(SelectStatement selectStatement) {
setOperators.add(new ExceptSetOperator(this.build(), selectStatement));
return this;
} | 3.68 |
AreaShop_TeleportFeature_hasTeleportLocation | /**
* Check if the region has a teleportLocation specified.
* @return true if the region has a teleportlocation, false otherwise
*/
public boolean hasTeleportLocation() {
return getRegion().getConfigurationSectionSetting("general.teleportLocation") != null;
} | 3.68 |
hadoop_SWebHdfs_createSWebHdfsFileSystem | /**
* Returns a new {@link SWebHdfsFileSystem}, with the given configuration.
*
* @param conf configuration
* @return new SWebHdfsFileSystem
*/
private static SWebHdfsFileSystem createSWebHdfsFileSystem(
Configuration conf) {
SWebHdfsFileSystem fs = new SWebHdfsFileSystem();
fs.setConf(conf);
return fs;
} | 3.68 |
flink_BlobServerConnection_run | /** Main connection work method. Accepts requests until the other side closes the connection. */
@Override
public void run() {
try {
final InputStream inputStream = this.clientSocket.getInputStream();
final OutputStream outputStream = this.clientSocket.getOutputStream();
while (true) {
// Read the requested operation
final int operation = inputStream.read();
if (operation < 0) {
// done, no one is asking anything from us
return;
}
switch (operation) {
case PUT_OPERATION:
put(inputStream, outputStream, new byte[BUFFER_SIZE]);
break;
case GET_OPERATION:
get(inputStream, outputStream, new byte[BUFFER_SIZE]);
break;
default:
throw new IOException("Unknown operation " + operation);
}
}
} catch (SocketException e) {
// this happens when the remote site closes the connection
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error(
"Error while executing BLOB connection from {}.",
clientSocket.getRemoteSocketAddress(),
t);
} finally {
closeSilently(clientSocket, LOG);
blobServer.unregisterConnection(this);
}
} | 3.68 |
zxing_AlignmentPatternFinder_centerFromEnd | /**
* Given a count of black/white/black pixels just seen and an end position,
* figures the location of the center of this black/white/black run.
*/
private static float centerFromEnd(int[] stateCount, int end) {
return (end - stateCount[2]) - stateCount[1] / 2.0f;
} | 3.68 |
querydsl_NumberExpression_mod | /**
* Create a {@code mod(this, num)} expression
*
* @param num
* @return mod(this, num)
*/
public NumberExpression<T> mod(T num) {
return Expressions.numberOperation(getType(), Ops.MOD, mixin, ConstantImpl.create(num));
} | 3.68 |
flink_TableChange_modifyColumnPosition | /**
* A table change to modify the column position.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <original_column_type> <column_position>
* </pre>
*
* @param oldColumn the definition of the old column.
* @param columnPosition the new position of the column.
* @return a TableChange represents the modification.
*/
static ModifyColumnPosition modifyColumnPosition(
Column oldColumn, ColumnPosition columnPosition) {
return new ModifyColumnPosition(oldColumn, columnPosition);
} | 3.68 |
morf_DeleteStatement_shallowCopy | /**
* Performs a shallow copy to a builder, allowing a duplicate
* to be created and modified.
*
* @return A builder, initialised as a duplicate of this statement.
*/
@Override
public DeleteStatementBuilder shallowCopy() {
return new DeleteStatementBuilder(this);
} | 3.68 |
framework_AbstractComponentTest_constructComponent | /**
* Construct the component that is to be tested. This method uses a no-arg
* constructor by default. Override to customize.
*
* @return Instance of the component that is to be tested.
* @throws IllegalAccessException
* @throws InstantiationException
*/
protected T constructComponent() {
try {
return getTestClass().newInstance();
} catch (Exception e) {
throw new RuntimeException(
"Failed to instantiate " + getTestClass(), e);
}
} | 3.68 |
hbase_CompositeImmutableSegment_close | /**
* Closing a segment before it is being discarded
*/
@Override
public void close() {
for (ImmutableSegment s : segments) {
s.close();
}
} | 3.68 |
flink_HiveParserDefaultGraphWalker_startWalking | // starting point for walking.
public void startWalking(Collection<Node> startNodes, HashMap<Node, Object> nodeOutput)
throws SemanticException {
toWalk.addAll(startNodes);
while (toWalk.size() > 0) {
Node nd = toWalk.remove(0);
walk(nd);
// Some walkers extending DefaultGraphWalker e.g. ForwardWalker
// do not use opQueue and rely uniquely in the toWalk structure,
// thus we store the results produced by the dispatcher here
// TODO: rewriting the logic of those walkers to use opQueue
if (nodeOutput != null && getDispatchedList().contains(nd)) {
nodeOutput.put(nd, retMap.get(nd));
}
}
// Store the results produced by the dispatcher
while (!opQueue.isEmpty()) {
Node node = opQueue.poll();
if (nodeOutput != null && getDispatchedList().contains(node)) {
nodeOutput.put(node, retMap.get(node));
}
}
} | 3.68 |
flink_AdaptiveScheduler_computeVertexParallelismStoreForExecution | /**
* Creates the parallelism store that should be used to build the {@link ExecutionGraph}, which
* will respect the vertex parallelism of the passed {@link JobGraph} in all execution modes.
*
* @param jobGraph The job graph for execution.
* @param executionMode The mode of scheduler execution.
* @param defaultMaxParallelismFunc a function for computing a default max parallelism if none
* is specified on a given vertex
* @return The parallelism store.
*/
@VisibleForTesting
static VertexParallelismStore computeVertexParallelismStoreForExecution(
JobGraph jobGraph,
SchedulerExecutionMode executionMode,
Function<JobVertex, Integer> defaultMaxParallelismFunc) {
if (executionMode == SchedulerExecutionMode.REACTIVE) {
return computeReactiveModeVertexParallelismStore(
jobGraph.getVertices(), defaultMaxParallelismFunc, false);
}
return SchedulerBase.computeVertexParallelismStore(
jobGraph.getVertices(), defaultMaxParallelismFunc);
} | 3.68 |
flink_JobManagerSharedServices_shutdown | /**
* Shutdown the {@link JobMaster} services.
*
* <p>This method makes sure all services are closed or shut down, even when an exception
* occurred in the shutdown of one component. The first encountered exception is thrown, with
* successive exceptions added as suppressed exceptions.
*
* @throws Exception The first Exception encountered during shutdown.
*/
public void shutdown() throws Exception {
Throwable exception = null;
try {
ExecutorUtils.gracefulShutdown(
SHUTDOWN_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS, futureExecutor, ioExecutor);
} catch (Throwable t) {
exception = t;
}
try {
shuffleMaster.close();
} catch (Throwable t) {
exception = ExceptionUtils.firstOrSuppressed(t, exception);
}
libraryCacheManager.shutdown();
if (exception != null) {
ExceptionUtils.rethrowException(
exception, "Error while shutting down JobManager services");
}
} | 3.68 |
flink_TypeSerializerSchemaCompatibility_isCompatibleAsIs | /**
* Returns whether or not the type of the compatibility is {@link Type#COMPATIBLE_AS_IS}.
*
* @return whether or not the type of the compatibility is {@link Type#COMPATIBLE_AS_IS}.
*/
public boolean isCompatibleAsIs() {
return resultType == Type.COMPATIBLE_AS_IS;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.