name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_ClassResource_setBufferSize | /**
* Sets the size of the download buffer used for this resource.
*
* @param bufferSize
* the size of the buffer in bytes.
*
* @see #getBufferSize()
*/
public void setBufferSize(int bufferSize) {
this.bufferSize = bufferSize;
} | 3.68 |
hadoop_StoragePolicySatisfyManager_getPendingSPSPaths | /**
* @return the number of paths to be processed by storage policy satisfier.
*/
public int getPendingSPSPaths() {
return pathsToBeTraversed.size();
} | 3.68 |
hbase_RegionHDFSBlockLocationFinder_internalGetTopBlockLocation | /**
* Returns an ordered list of hosts that are hosting the blocks for this region. The weight of
* each host is the sum of the block lengths of all files on that host, so the first host in the
* list is the server which holds the most bytes of the given region's HFiles.
* @param region region
* @return ordered list of hosts holding blocks of the specified region
*/
private HDFSBlocksDistribution internalGetTopBlockLocation(RegionInfo region) {
try {
TableDescriptor tableDescriptor = getDescriptor(region.getTable());
if (tableDescriptor != null) {
HDFSBlocksDistribution blocksDistribution =
provider.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
return blocksDistribution;
}
} catch (IOException ioe) {
LOG.warn("IOException during HDFSBlocksDistribution computation for region = {}",
region.getEncodedName(), ioe);
}
return EMPTY_BLOCK_DISTRIBUTION;
} | 3.68 |
dubbo_Pane_setValue | /**
* Set new value to the pane, for reset the instance.
*
* @param newData the new value.
*/
public void setValue(T newData) {
this.value = newData;
} | 3.68 |
hudi_HoodieAvroUtils_recordNeedsRewriteForExtendedAvroTypePromotion | /**
* Avro does not support type promotion from numbers to string. This function returns true if
* it will be necessary to rewrite the record to support this promotion.
* NOTE: this does not determine whether the writerSchema and readerSchema are compatible.
* It is just trying to find if the reader expects a number to be promoted to string, as quick as possible.
*/
public static boolean recordNeedsRewriteForExtendedAvroTypePromotion(Schema writerSchema, Schema readerSchema) {
if (writerSchema.equals(readerSchema)) {
return false;
}
switch (readerSchema.getType()) {
case RECORD:
Map<String, Schema.Field> writerFields = new HashMap<>();
for (Schema.Field field : writerSchema.getFields()) {
writerFields.put(field.name(), field);
}
for (Schema.Field field : readerSchema.getFields()) {
if (writerFields.containsKey(field.name())) {
if (recordNeedsRewriteForExtendedAvroTypePromotion(writerFields.get(field.name()).schema(), field.schema())) {
return true;
}
}
}
return false;
case ARRAY:
if (writerSchema.getType().equals(ARRAY)) {
return recordNeedsRewriteForExtendedAvroTypePromotion(writerSchema.getElementType(), readerSchema.getElementType());
}
return false;
case MAP:
if (writerSchema.getType().equals(MAP)) {
return recordNeedsRewriteForExtendedAvroTypePromotion(writerSchema.getValueType(), readerSchema.getValueType());
}
return false;
case UNION:
return recordNeedsRewriteForExtendedAvroTypePromotion(getActualSchemaFromUnion(writerSchema, null), getActualSchemaFromUnion(readerSchema, null));
case ENUM:
case STRING:
case BYTES:
return needsRewriteToString(writerSchema);
default:
return false;
}
} | 3.68 |
hbase_TableRecordReaderImpl_init | /**
* Build the scanner. Not done in constructor to allow for extension.
*/
public void init() throws IOException {
restart(startRow);
} | 3.68 |
hbase_HFileArchiveUtil_getRegionArchiveDir | /**
* Get the archive directory for a given region under the specified table
* @param rootDir {@link Path} to the root directory where hbase files are stored (for building
* the archive path)
* @param tableName name of the table to archive. Cannot be null.
* @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
* should not be archived
*/
public static Path getRegionArchiveDir(Path rootDir, TableName tableName,
String encodedRegionName) {
// get the archive directory for a table
Path archiveDir = getTableArchivePath(rootDir, tableName);
return HRegion.getRegionDir(archiveDir, encodedRegionName);
} | 3.68 |
hbase_ConnectionFactory_createAsyncConnection | /**
* Create a new AsyncConnection instance using the passed {@code conf} and {@code user}.
* AsyncConnection encapsulates all housekeeping for a connection to the cluster. All tables and
* interfaces created from returned connection share zookeeper connection, meta cache, and
* connections to region servers and masters.
* <p>
* The caller is responsible for calling {@link AsyncConnection#close()} on the returned
* connection instance.
* <p>
* Usually you should only create one AsyncConnection instance in your code and use it everywhere
* as it is thread safe.
* @param conf configuration
* @param user the user the asynchronous connection is for
* @param connectionAttributes attributes to be sent along to server during connection establish
* @return AsyncConnection object wrapped by CompletableFuture
*/
public static CompletableFuture<AsyncConnection> createAsyncConnection(Configuration conf,
final User user, Map<String, byte[]> connectionAttributes) {
return TraceUtil.tracedFuture(() -> {
CompletableFuture<AsyncConnection> future = new CompletableFuture<>();
ConnectionRegistry registry = ConnectionRegistryFactory.getRegistry(conf);
addListener(registry.getClusterId(), (clusterId, error) -> {
if (error != null) {
registry.close();
future.completeExceptionally(error);
return;
}
if (clusterId == null) {
registry.close();
future.completeExceptionally(new IOException("clusterid came back null"));
return;
}
Class<? extends AsyncConnection> clazz = conf.getClass(HBASE_CLIENT_ASYNC_CONNECTION_IMPL,
AsyncConnectionImpl.class, AsyncConnection.class);
try {
future.complete(
user.runAs((PrivilegedExceptionAction<? extends AsyncConnection>) () -> ReflectionUtils
.newInstance(clazz, conf, registry, clusterId, null, user, connectionAttributes)));
} catch (Exception e) {
registry.close();
future.completeExceptionally(e);
}
});
return future;
}, "ConnectionFactory.createAsyncConnection");
} | 3.68 |
shardingsphere-elasticjob_JobNodePath_getServerNodePath | /**
* Get server node path.
*
* @param serverIp server IP address
* @return server node path
*/
public String getServerNodePath(final String serverIp) {
return String.format("%s/%s", getServerNodePath(), serverIp);
} | 3.68 |
hadoop_AsyncGet_wait | /**
* Use {@link #get(long, TimeUnit)} timeout parameters to wait.
* @param obj object.
* @param timeout timeout.
* @param unit unit.
* @throws InterruptedException if the thread is interrupted.
*/
public static void wait(Object obj, long timeout, TimeUnit unit)
throws InterruptedException {
if (timeout < 0) {
obj.wait();
} else if (timeout > 0) {
obj.wait(unit.toMillis(timeout));
}
} | 3.68 |
framework_VLayoutSlot_reportActualRelativeWidth | /**
* Override this method to report the expected outer width to the
* LayoutManager. By default does nothing.
*
* @param allocatedWidth
* the width to set (including margins, borders and paddings) in
* pixels
*/
protected void reportActualRelativeWidth(int allocatedWidth) {
// Default implementation does nothing
} | 3.68 |
hbase_DisabledTableSnapshotHandler_snapshotRegions | // TODO consider parallelizing these operations since they are independent. Right now its just
// easier to keep them serial though
@Override
public void snapshotRegions(List<Pair<RegionInfo, ServerName>> regionsAndLocations)
throws IOException, KeeperException {
try {
// 1. get all the regions hosting this table.
// extract each pair to separate lists
Set<RegionInfo> regions = new HashSet<>();
for (Pair<RegionInfo, ServerName> p : regionsAndLocations) {
// Don't include non-default regions
RegionInfo hri = p.getFirst();
if (RegionReplicaUtil.isDefaultReplica(hri)) {
regions.add(hri);
}
}
// handle the mob files if any.
boolean mobEnabled = MobUtils.hasMobColumns(htd);
if (mobEnabled) {
// snapshot the mob files as a offline region.
RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(htd.getTableName());
regions.add(mobRegionInfo);
}
// 2. for each region, write all the info to disk
String msg = "Starting to write region info and WALs for regions for offline snapshot:"
+ ClientSnapshotDescriptionUtils.toString(snapshot);
LOG.info(msg);
status.setStatus(msg);
ThreadPoolExecutor exec = SnapshotManifest.createExecutor(conf, "DisabledTableSnapshot");
try {
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo regionInfo) throws IOException {
snapshotManifest.addRegion(CommonFSUtils.getTableDir(rootDir, snapshotTable),
regionInfo);
}
});
} finally {
exec.shutdown();
}
} catch (Exception e) {
// make sure we capture the exception to propagate back to the client later
String reason = "Failed snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " due to exception:" + e.getMessage();
ForeignException ee = new ForeignException(reason, e);
monitor.receive(ee);
status.abort("Snapshot of table: " + snapshotTable + " failed because " + e.getMessage());
} finally {
LOG.debug(
"Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot) + " as finished.");
}
} | 3.68 |
hadoop_TimelineDomain_getModifiedTime | /**
* Get the modified time of the domain
*
* @return the modified time of the domain
*/
@XmlElement(name = "modifiedtime")
public Long getModifiedTime() {
return modifiedTime;
} | 3.68 |
framework_WebBrowser_getBrowserVersion | /**
* Gets the complete browser version as string. The version is given by the
* browser through the user agent string and usually consists of
* dot-separated numbers. Note that the string may contain characters other
* than dots and digits.
*
* @return the complete browser version or {@code null} if unknown
* @since 8.4
*/
public String getBrowserVersion() {
return browserDetails != null ? browserDetails.getBrowserVersion()
: null;
} | 3.68 |
hadoop_FilePosition_invalidate | /**
* Marks the current position as invalid.
*/
public void invalidate() {
buffer = null;
bufferStartOffset = -1;
data = null;
} | 3.68 |
hadoop_TimelineEntities_addEntity | /**
* Add a single entity into the existing entity list
*
* @param entity
* a single entity
*/
public void addEntity(TimelineEntity entity) {
entities.add(entity);
} | 3.68 |
hbase_CompositeImmutableSegment_dump | // Debug methods
/**
* Dumps all cells of the segment into the given log
*/
@Override
void dump(Logger log) {
for (ImmutableSegment s : segments) {
s.dump(log);
}
} | 3.68 |
hadoop_RenameOperation_removeSourceObjects | /**
* Remove source objects.
* @param keys list of keys to delete
* @throws IOException failure
*/
@Retries.RetryTranslated
private void removeSourceObjects(
final List<ObjectIdentifier> keys)
throws IOException {
// remove the keys
// list what is being deleted for the interest of anyone
// who is trying to debug why objects are no longer there.
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating delete operation for {} objects", keys.size());
for (ObjectIdentifier objectIdentifier : keys) {
LOG.debug(" {} {}", objectIdentifier.key(),
objectIdentifier.versionId() != null ? objectIdentifier.versionId() : "");
}
}
Invoker.once("rename " + sourcePath + " to " + destPath,
sourcePath.toString(), () ->
callbacks.removeKeys(
keys,
false
));
} | 3.68 |
flink_TimestampsAndWatermarksOperator_processWatermarkStatus | /** Override the base implementation to completely ignore statuses propagated from upstream. */
@Override
public void processWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {} | 3.68 |
hbase_BaseLoadBalancer_roundRobinAssignment | /**
* Round-robin a list of regions to a list of servers
*/
private void roundRobinAssignment(BalancerClusterState cluster, List<RegionInfo> regions,
List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {
Random rand = ThreadLocalRandom.current();
List<RegionInfo> unassignedRegions = new ArrayList<>();
int numServers = servers.size();
int numRegions = regions.size();
int max = (int) Math.ceil((float) numRegions / numServers);
int serverIdx = 0;
if (numServers > 1) {
serverIdx = rand.nextInt(numServers);
}
int regionIdx = 0;
for (int j = 0; j < numServers; j++) {
ServerName server = servers.get((j + serverIdx) % numServers);
List<RegionInfo> serverRegions = new ArrayList<>(max);
for (int i = regionIdx; i < numRegions; i += numServers) {
RegionInfo region = regions.get(i % numRegions);
if (cluster.wouldLowerAvailability(region, server)) {
unassignedRegions.add(region);
} else {
serverRegions.add(region);
cluster.doAssignRegion(region, server);
}
}
assignments.put(server, serverRegions);
regionIdx++;
}
List<RegionInfo> lastFewRegions = new ArrayList<>();
// assign the remaining by going through the list and try to assign to servers one-by-one
serverIdx = rand.nextInt(numServers);
for (RegionInfo region : unassignedRegions) {
boolean assigned = false;
for (int j = 0; j < numServers; j++) { // try all servers one by one
ServerName server = servers.get((j + serverIdx) % numServers);
if (cluster.wouldLowerAvailability(region, server)) {
continue;
} else {
assignments.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
cluster.doAssignRegion(region, server);
serverIdx = (j + serverIdx + 1) % numServers; // remain from next server
assigned = true;
break;
}
}
if (!assigned) {
lastFewRegions.add(region);
}
}
// just sprinkle the rest of the regions on random regionservers. The balanceCluster will
// make it optimal later. we can end up with this if numReplicas > numServers.
for (RegionInfo region : lastFewRegions) {
int i = rand.nextInt(numServers);
ServerName server = servers.get(i);
assignments.computeIfAbsent(server, k -> new ArrayList<>()).add(region);
cluster.doAssignRegion(region, server);
}
} | 3.68 |
hbase_RawBytesFixedLength_decode | /**
* Read a {@code byte[]} from the buffer {@code src}.
*/
public byte[] decode(PositionedByteRange src, int length) {
return ((RawBytes) base).decode(src, length);
} | 3.68 |
framework_VCalendarAction_setActionEndDate | /**
* Set the date and time when the action ends.
*
* @param actionEndDate
* The date and time when the action ends
*/
public void setActionEndDate(Date actionEndDate) {
this.actionEndDate = actionEndDate;
} | 3.68 |
framework_SerializablePredicate_and | /**
* Returns a composed predicate that represents a short-circuiting logical
* AND of this predicate and another. When evaluating the composed
* predicate, if this predicate is {@code false}, then the {@code other}
* predicate is not evaluated.
*
* <p>
* Any exceptions thrown during evaluation of either predicate are relayed
* to the caller; if evaluation of this predicate throws an exception, the
* {@code other} predicate will not be evaluated.
*
* @param other
* a predicate that will be logically-ANDed with this predicate
* @return a composed predicate that represents the short-circuiting logical
* AND of this predicate and the {@code other} predicate
* @throws NullPointerException
* if other is null
* @since 8.5
*/
default SerializablePredicate<T> and(
SerializablePredicate<? super T> other) {
Objects.requireNonNull(other);
return t -> test(t) && other.test(t);
} | 3.68 |
hadoop_ActiveUsersManager_activateApplication | /**
* An application has new outstanding requests.
*
* @param user application user
* @param applicationId activated application
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
@Override
synchronized public void activateApplication(
String user, ApplicationId applicationId) {
Set<ApplicationId> userApps = usersApplications.get(user);
if (userApps == null) {
userApps = new HashSet<ApplicationId>();
usersApplications.put(user, userApps);
++activeUsers;
metrics.incrActiveUsers();
LOG.debug("User {} added to activeUsers, currently: {}", user,
activeUsers);
}
if (userApps.add(applicationId)) {
metrics.activateApp(user);
}
} | 3.68 |
hudi_HoodieIndex_updateLocation | /**
* Extracts the location of written records, and updates the index.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public HoodieData<WriteStatus> updateLocation(
HoodieData<WriteStatus> writeStatuses, HoodieEngineContext context,
HoodieTable hoodieTable, String instant) throws HoodieIndexException {
return updateLocation(writeStatuses, context, hoodieTable);
} | 3.68 |
hudi_BaseRollbackActionExecutor_executeRollback | /**
* Execute rollback and fetch rollback stats.
* @param instantToRollback instant to be rolled back.
* @param rollbackPlan instance of {@link HoodieRollbackPlan} for which rollback needs to be executed.
* @return list of {@link HoodieRollbackStat}s.
*/
protected List<HoodieRollbackStat> executeRollback(HoodieInstant instantToRollback, HoodieRollbackPlan rollbackPlan) {
return new BaseRollbackHelper(table.getMetaClient(), config).performRollback(context, instantToRollback, rollbackPlan.getRollbackRequests());
} | 3.68 |
flink_TSetClientInfoReq_findByThriftIdOrThrow | /**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new java.lang.IllegalArgumentException(
"Field " + fieldId + " doesn't exist!");
return fields;
} | 3.68 |
morf_ViewChangesDeploymentHelper_deregisterAllViews | /**
* Creates SQL statements for removing all views from the view register.
*
* @return SQL statements to be run to de-register all views.
* @deprecated kept to ensure backwards compatibility.
*/
@Deprecated
public List<String> deregisterAllViews() {
return deregisterAllViews(new UpgradeSchemas(schema(), schema()));
} | 3.68 |
hudi_RunLengthDecoder_readDictionaryIds | /**
* Decoding for dictionary ids. The IDs are populated into `values` and the nullability is
* populated into `nulls`.
*/
void readDictionaryIds(
int total,
WritableIntVector values,
WritableColumnVector nulls,
int rowId,
int level,
RunLengthDecoder data) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {
this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE:
if (currentValue == level) {
data.readDictionaryIdData(n, values, rowId);
} else {
nulls.setNulls(rowId, n);
}
break;
case PACKED:
for (int i = 0; i < n; ++i) {
if (currentBuffer[currentBufferIdx++] == level) {
values.setInt(rowId + i, data.readInteger());
} else {
nulls.setNullAt(rowId + i);
}
}
break;
default:
throw new AssertionError();
}
rowId += n;
left -= n;
currentCount -= n;
}
} | 3.68 |
hudi_PartialUpdateAvroPayload_mergeOldRecord | /**
* Merge old record with new record.
*
* @param oldRecord
* @param schema
* @param isOldRecordNewer
* @param isPreCombining flag for deleted record combine logic
* 1 preCombine: if delete record is newer, return merged record with _hoodie_is_deleted = true
* 2 combineAndGetUpdateValue: if delete record is newer, return empty since we don't need to store deleted data to storage
* @return
* @throws IOException
*/
private Option<IndexedRecord> mergeOldRecord(IndexedRecord oldRecord,
Schema schema,
boolean isOldRecordNewer, boolean isPreCombining) throws IOException {
Option<IndexedRecord> recordOption = getInsertValue(schema, isPreCombining);
if (!recordOption.isPresent() && !isPreCombining) {
// use natural order for delete record
return Option.empty();
}
if (isOldRecordNewer && schema.getField(HoodieRecord.COMMIT_TIME_METADATA_FIELD) != null) {
// handling disorder, should use the metadata fields of the updating record
return mergeDisorderRecordsWithMetadata(schema, (GenericRecord) oldRecord, (GenericRecord) recordOption.get(), isPreCombining);
} else if (isOldRecordNewer) {
return mergeRecords(schema, (GenericRecord) oldRecord, (GenericRecord) recordOption.get());
} else {
return mergeRecords(schema, (GenericRecord) recordOption.get(), (GenericRecord) oldRecord);
}
} | 3.68 |
morf_UnsupportedDatabaseTestStatement_evaluate | /**
* @see org.junit.runners.model.Statement#evaluate()
*/
@Override
public void evaluate() {
String message = String.format("Test %s ignored by %s as it is marked as unsupported", description.getMethodName(),
description.getTestClass().getSimpleName());
log.info(message);
Assume.assumeTrue(message, false);
} | 3.68 |
hbase_WhileMatchFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.WhileMatchFilter.Builder builder = FilterProtos.WhileMatchFilter.newBuilder();
builder.setFilter(ProtobufUtil.toFilter(this.filter));
return builder.build().toByteArray();
} | 3.68 |
hbase_MutableRegionInfo_containsRow | /**
* Return true if the given row falls in this region.
*/
@Override
public boolean containsRow(byte[] row) {
CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName);
return cellComparator.compareRows(row, startKey) >= 0
&& (cellComparator.compareRows(row, endKey) < 0
|| Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY));
} | 3.68 |
hbase_HRegionServer_constructRegionServer | /**
* Utility for constructing an instance of the passed HRegionServer class.
*/
static HRegionServer constructRegionServer(final Class<? extends HRegionServer> regionServerClass,
final Configuration conf) {
try {
Constructor<? extends HRegionServer> c =
regionServerClass.getConstructor(Configuration.class);
return c.newInstance(conf);
} catch (Exception e) {
throw new RuntimeException(
"Failed construction of " + "Regionserver: " + regionServerClass.toString(), e);
}
} | 3.68 |
hudi_BaseHoodieTableServiceClient_getInflightTimelineExcludeCompactionAndClustering | /**
* Get inflight timeline excluding compaction and clustering.
*
* @param metaClient
* @return
*/
private HoodieTimeline getInflightTimelineExcludeCompactionAndClustering(HoodieTableMetaClient metaClient) {
HoodieTimeline inflightTimelineWithReplaceCommit = metaClient.getCommitsTimeline().filterPendingExcludingCompaction();
HoodieTimeline inflightTimelineExcludeClusteringCommit = inflightTimelineWithReplaceCommit.filter(instant -> {
if (instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION)) {
Option<Pair<HoodieInstant, HoodieClusteringPlan>> instantPlan = ClusteringUtils.getClusteringPlan(metaClient, instant);
return !instantPlan.isPresent();
} else {
return true;
}
});
return inflightTimelineExcludeClusteringCommit;
} | 3.68 |
flink_MergingSharedSlotProfileRetrieverFactory_getSlotProfile | /**
* Computes a {@link SlotProfile} of an execution slot sharing group.
*
* <p>The preferred locations of the {@link SlotProfile} is a union of the preferred
* locations of all executions sharing the slot. The input locations within the bulk are
* ignored to avoid cyclic dependencies within the region, e.g. in case of all-to-all
* pipelined connections, so that the allocations do not block each other.
*
* <p>The preferred {@link AllocationID}s of the {@link SlotProfile} are all previous {@link
* AllocationID}s of all executions sharing the slot.
*
* <p>The {@link SlotProfile} also refers to all reserved {@link AllocationID}s of the job.
*
* @param executionSlotSharingGroup executions sharing the slot.
* @param physicalSlotResourceProfile {@link ResourceProfile} of the slot.
* @return {@link SlotProfile} to allocate for the {@code executionSlotSharingGroup}.
*/
@Override
public SlotProfile getSlotProfile(
ExecutionSlotSharingGroup executionSlotSharingGroup,
ResourceProfile physicalSlotResourceProfile) {
Collection<AllocationID> priorAllocations = new HashSet<>();
Collection<TaskManagerLocation> preferredLocations = new ArrayList<>();
for (ExecutionVertexID execution : executionSlotSharingGroup.getExecutionVertexIds()) {
priorAllocationIdRetriever.apply(execution).ifPresent(priorAllocations::add);
preferredLocations.addAll(
preferredLocationsRetriever.getPreferredLocations(
execution, producersToIgnore));
}
return SlotProfile.priorAllocation(
physicalSlotResourceProfile,
physicalSlotResourceProfile,
preferredLocations,
priorAllocations,
reservedAllocationIds);
} | 3.68 |
framework_Flash_getCodetype | /**
* Returns the current codetype.
*
* @see #setCodetype(String)
* @since 7.4.1
* @return Current codetype.
*/
public String getCodetype() {
return getState(false).codetype;
} | 3.68 |
graphhopper_VectorTile_addAllFeatures | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public Builder addAllFeatures(
java.lang.Iterable<? extends vector_tile.VectorTile.Tile.Feature> values) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, features_);
onChanged();
} else {
featuresBuilder_.addAllMessages(values);
}
return this;
} | 3.68 |
querydsl_JTSPointExpression_y | /**
* The y-coordinate value for this Point.
*
* @return y-coordinate
*/
public NumberExpression<Double> y() {
if (y == null) {
y = Expressions.numberOperation(Double.class, SpatialOps.Y, mixin);
}
return y;
} | 3.68 |
morf_ConnectionResourcesBean_toString | /**
* {@inheritDoc}
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(this.getClass().getSimpleName()).append(": ");
builder.append("JDBC URL [").append(getJdbcUrl()).append("] ");
builder.append("Username [").append(userName).append("] ");
builder.append("Password [").append(StringUtils.isBlank(password) ? "NOT SET" : "********").append("] ");
builder.append("Host name [").append(hostName).append("] ");
builder.append("Port [").append(port).append("] ");
builder.append("Database name [").append(databaseName).append("] ");
builder.append("Schema name [").append(schemaName).append("] ");
builder.append("Instance name [").append(instanceName).append("]");
return builder.toString();
} | 3.68 |
framework_TableQuery_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removeRowIdChangeListener(QueryDelegate.RowIdChangeListener)}
*/
@Override
@Deprecated
public void removeListener(RowIdChangeListener listener) {
removeRowIdChangeListener(listener);
} | 3.68 |
flink_DynamicSourceUtils_isUpsertSource | /** Returns true if the table is an upsert source. */
public static boolean isUpsertSource(
ResolvedSchema resolvedSchema, DynamicTableSource tableSource) {
if (!(tableSource instanceof ScanTableSource)) {
return false;
}
ChangelogMode mode = ((ScanTableSource) tableSource).getChangelogMode();
boolean isUpsertMode =
mode.contains(RowKind.UPDATE_AFTER) && !mode.contains(RowKind.UPDATE_BEFORE);
boolean hasPrimaryKey = resolvedSchema.getPrimaryKey().isPresent();
return isUpsertMode && hasPrimaryKey;
} | 3.68 |
flink_ExecutionConfig_setRestartStrategy | /**
* Sets the restart strategy to be used for recovery.
*
* <pre>{@code
* ExecutionConfig config = env.getConfig();
*
* config.setRestartStrategy(RestartStrategies.fixedDelayRestart(
* 10, // number of retries
* 1000 // delay between retries));
* }</pre>
*
* @param restartStrategyConfiguration Configuration defining the restart strategy to use
*/
@PublicEvolving
public void setRestartStrategy(
RestartStrategies.RestartStrategyConfiguration restartStrategyConfiguration) {
this.restartStrategyConfiguration =
Preconditions.checkNotNull(restartStrategyConfiguration);
} | 3.68 |
zxing_PDF417ResultMetadata_getOptionalData | /**
* @return always null
* @deprecated use dedicated already parsed fields
*/
@Deprecated
public int[] getOptionalData() {
return optionalData;
} | 3.68 |
framework_GridConnector_getId | /**
* Fetches id from the row object that corresponds with the given
* rowIndex.
*
* @since 7.6.1
* @param rowIndex
* the index of the row for which to fetch the id
* @return id of the row if such id exists, {@code null} otherwise
*/
private String getId(int rowIndex) {
JsonObject row = getWidget().getDataSource().getRow(rowIndex);
if (!row.hasKey(GridState.JSONKEY_DETAILS_VISIBLE) || row
.getString(GridState.JSONKEY_DETAILS_VISIBLE).isEmpty()) {
return null;
}
return row.getString(GridState.JSONKEY_DETAILS_VISIBLE);
} | 3.68 |
morf_SchemaChangeSequence_removeTable | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#removeTable(org.alfasoftware.morf.metadata.Table)
*/
@Override
public void removeTable(Table table) {
RemoveTable removeTable = new RemoveTable(table);
visitor.visit(removeTable);
schemaAndDataChangeVisitor.visit(removeTable);
} | 3.68 |
hbase_WALProcedureMap_merge | /**
* Merge the given {@link WALProcedureMap} into this one. The {@link WALProcedureMap} passed in
* will be cleared after merging.
*/
public void merge(WALProcedureMap other) {
other.procMap.forEach(procMap::putIfAbsent);
maxModifiedProcId = Math.max(maxModifiedProcId, other.maxModifiedProcId);
minModifiedProcId = Math.max(minModifiedProcId, other.minModifiedProcId);
other.procMap.clear();
other.maxModifiedProcId = Long.MIN_VALUE;
other.minModifiedProcId = Long.MAX_VALUE;
} | 3.68 |
flink_AvroParquetReaders_forGenericRecord | /**
* Creates a new {@link AvroParquetRecordFormat} that reads the parquet file into Avro {@link
* GenericRecord GenericRecords}.
*
* <p>To read into {@link GenericRecord GenericRecords}, this method needs an Avro Schema. That
* is because Flink needs to be able to serialize the results in its data flow, which is very
* inefficient without the schema. And while the Schema is stored in the Avro file header, Flink
* needs this schema during 'pre-flight' time when the data flow is set up and wired, which is
* before there is access to the files.
*/
public static StreamFormat<GenericRecord> forGenericRecord(final Schema schema) {
return new AvroParquetRecordFormat<>(
new GenericRecordAvroTypeInfo(schema),
// Must override the lambda representation because of a bug in shading lambda
// serialization, see FLINK-28043 for more details.
new SerializableSupplier<GenericData>() {
@Override
public GenericData get() {
return GenericData.get();
}
});
} | 3.68 |
hbase_HbaseHandlerMetricsProxy_newInstance | // for thrift 2
public static THBaseService.Iface newInstance(THBaseService.Iface handler, ThriftMetrics metrics,
Configuration conf) {
return (THBaseService.Iface) Proxy.newProxyInstance(handler.getClass().getClassLoader(),
new Class[] { THBaseService.Iface.class },
new HbaseHandlerMetricsProxy(handler, metrics, conf));
} | 3.68 |
flink_OperationTreeBuilder_getUniqueName | /** Return a unique name that does not exist in usedFieldNames according to the input name. */
private String getUniqueName(String inputName, Collection<String> usedFieldNames) {
int i = 0;
String resultName = inputName;
while (usedFieldNames.contains(resultName)) {
resultName = resultName + "_" + i;
i += 1;
}
return resultName;
} | 3.68 |
flink_AvroInputFormat_setReuseAvroValue | /**
* Sets the flag whether to reuse the Avro value instance for all records. By default, the input
* format reuses the Avro value.
*
* @param reuseAvroValue True, if the input format should reuse the Avro value instance, false
* otherwise.
*/
public void setReuseAvroValue(boolean reuseAvroValue) {
this.reuseAvroValue = reuseAvroValue;
} | 3.68 |
hmily_SofaHmilyInventoryApplication_main | /**
* main.
*
* @param args args.
*/
public static void main(final String[] args) {
SpringApplication springApplication = new SpringApplication(SofaHmilyInventoryApplication.class);
springApplication.setWebApplicationType(WebApplicationType.NONE);
springApplication.run(args);
} | 3.68 |
flink_TaskSlot_remove | /**
* Remove the task identified by the given execution attempt id.
*
* @param executionAttemptId identifying the task to be removed
* @return The removed task if there was any; otherwise null.
*/
public T remove(ExecutionAttemptID executionAttemptId) {
return tasks.remove(executionAttemptId);
} | 3.68 |
rocketmq-connect_WorkerConnector_reconfigure | /**
* reconfigure
*
* @param keyValue
*/
public void reconfigure(ConnectKeyValue keyValue) {
try {
this.keyValue = keyValue;
initialize();
connector.stop();
connector.start(keyValue);
} catch (Throwable throwable) {
throw new ConnectException(throwable);
}
} | 3.68 |
hbase_NamespacesInstanceResource_put | /**
* Build a response for PUT alter namespace with properties specified.
* @param model properties used for alter.
* @param uriInfo (JAX-RS context variable) request URL
* @return response code.
*/
@PUT
@Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) {
return processUpdate(model, true, uriInfo);
} | 3.68 |
framework_BinderValidationStatus_isOk | /**
* Gets whether validation for the binder passed or not.
*
* @return {@code true} if validation has passed, {@code false} if not
*/
public boolean isOk() {
return !hasErrors();
} | 3.68 |
AreaShop_RegionSign_getStringLocation | /**
* Location string to be used as key in maps.
* @return Location string
*/
public String getStringLocation() {
return SignsFeature.locationToString(getLocation());
} | 3.68 |
framework_Notification_setStyleName | /**
* Sets the style name for the notification message.
*
* @param styleName
* The desired style name
*/
public void setStyleName(String styleName) {
getState().styleName = styleName;
} | 3.68 |
hbase_HRegionServer_stop | /**
* Stops the regionserver.
* @param msg Status message
* @param force True if this is a regionserver abort
* @param user The user executing the stop request, or null if no user is associated
*/
public void stop(final String msg, final boolean force, final User user) {
if (!this.stopped) {
LOG.info("***** STOPPING region server '" + this + "' *****");
if (this.rsHost != null) {
// when forced via abort don't allow CPs to override
try {
this.rsHost.preStop(msg, user);
} catch (IOException ioe) {
if (!force) {
LOG.warn("The region server did not stop", ioe);
return;
}
LOG.warn("Skipping coprocessor exception on preStop() due to forced shutdown", ioe);
}
}
this.stopped = true;
LOG.info("STOPPED: " + msg);
// Wakes run() if it is sleeping
sleeper.skipSleepCycle();
}
} | 3.68 |
flink_AllWindowedStream_aggregate | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction The aggregation function that is used for incremental aggregation.
* @param windowFunction The process window function.
* @param accumulatorType Type information for the internal accumulator type of the aggregation
* function
* @param resultType Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
* @param <ACC> The type of the AggregateFunction's accumulator
* @param <V> The type of AggregateFunction's result, and the WindowFunction's input
* @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's
* result type
*/
@PublicEvolving
public <ACC, V, R> SingleOutputStreamOperator<R> aggregate(
AggregateFunction<T, ACC, V> aggregateFunction,
ProcessAllWindowFunction<V, R, W> windowFunction,
TypeInformation<ACC> accumulatorType,
TypeInformation<V> aggregateResultType,
TypeInformation<R> resultType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
checkNotNull(aggregateResultType, "aggregateResultType");
checkNotNull(resultType, "resultType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException(
"This aggregate function cannot be a RichFunction.");
}
// clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction);
final String callLocation = Utils.getCallLocationName();
final String udfName = "AllWindowedStream." + callLocation;
final String opName = windowAssigner.getClass().getSimpleName();
final String opDescription;
final KeySelector<T, Byte> keySel = input.getKeySelector();
OneInputStreamOperator<T, R> operator;
if (evictor != null) {
@SuppressWarnings({"unchecked", "rawtypes"})
TypeSerializer<StreamRecord<T>> streamRecordSerializer =
(TypeSerializer<StreamRecord<T>>)
new StreamElementSerializer(
input.getType()
.createSerializer(
getExecutionEnvironment().getConfig()));
ListStateDescriptor<StreamRecord<T>> stateDesc =
new ListStateDescriptor<>("window-contents", streamRecordSerializer);
opDescription =
"TriggerWindow("
+ windowAssigner
+ ", "
+ stateDesc
+ ", "
+ trigger
+ ", "
+ evictor
+ ", "
+ udfName
+ ")";
operator =
new EvictingWindowOperator<>(
windowAssigner,
windowAssigner.getWindowSerializer(
getExecutionEnvironment().getConfig()),
keySel,
input.getKeyType()
.createSerializer(getExecutionEnvironment().getConfig()),
stateDesc,
new InternalAggregateProcessAllWindowFunction<>(
aggregateFunction, windowFunction),
trigger,
evictor,
allowedLateness,
lateDataOutputTag);
} else {
AggregatingStateDescriptor<T, ACC, V> stateDesc =
new AggregatingStateDescriptor<>(
"window-contents",
aggregateFunction,
accumulatorType.createSerializer(
getExecutionEnvironment().getConfig()));
opDescription =
"TriggerWindow("
+ windowAssigner
+ ", "
+ stateDesc
+ ", "
+ trigger
+ ", "
+ udfName
+ ")";
operator =
new WindowOperator<>(
windowAssigner,
windowAssigner.getWindowSerializer(
getExecutionEnvironment().getConfig()),
keySel,
input.getKeyType()
.createSerializer(getExecutionEnvironment().getConfig()),
stateDesc,
new InternalSingleValueProcessAllWindowFunction<>(windowFunction),
trigger,
allowedLateness,
lateDataOutputTag);
}
return input.transform(opName, resultType, operator)
.setDescription(opDescription)
.forceNonParallel();
} | 3.68 |
hadoop_VersionedWritable_readFields | // javadoc from Writable
@Override
public void readFields(DataInput in) throws IOException {
byte version = in.readByte(); // read version
if (version != getVersion())
throw new VersionMismatchException(getVersion(), version);
} | 3.68 |
framework_AbstractComponent_readSize | /**
* Reads the size of this component from the given design attributes. If the
* attributes do not contain relevant size information, defaults is
* consulted.
*
* @param attributes
* the design attributes
*/
private void readSize(Attributes attributes) {
// read width
if (attributes.hasKey("width-auto") || attributes.hasKey("size-auto")) {
this.setWidth(null);
} else if (attributes.hasKey("width-full")
|| attributes.hasKey("size-full")) {
this.setWidth("100%");
} else if (attributes.hasKey("width")) {
this.setWidth(attributes.get("width"));
}
// read height
if (attributes.hasKey("height-auto")
|| attributes.hasKey("size-auto")) {
this.setHeight(null);
} else if (attributes.hasKey("height-full")
|| attributes.hasKey("size-full")) {
this.setHeight("100%");
} else if (attributes.hasKey("height")) {
this.setHeight(attributes.get("height"));
}
} | 3.68 |
hbase_RequestConverter_buildBulkLoadHFileRequest | /**
* Create a protocol buffer bulk load request
* @return a bulk load request
*/
public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
final Token<?> userToken, final String bulkToken, boolean copyFiles, List<String> clusterIds,
boolean replicate) {
RegionSpecifier region =
RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName);
ClientProtos.DelegationToken protoDT = null;
if (userToken != null) {
protoDT = ClientProtos.DelegationToken.newBuilder()
.setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier()))
.setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword()))
.setKind(userToken.getKind().toString()).setService(userToken.getService().toString())
.build();
}
List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths =
new ArrayList<>(familyPaths.size());
if (!familyPaths.isEmpty()) {
ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder =
ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder();
for (Pair<byte[], String> el : familyPaths) {
protoFamilyPaths.add(pathBuilder.setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst()))
.setPath(el.getSecond()).build());
}
pathBuilder.clear();
}
BulkLoadHFileRequest.Builder request = ClientProtos.BulkLoadHFileRequest.newBuilder()
.setRegion(region).setAssignSeqNum(assignSeqNum).addAllFamilyPath(protoFamilyPaths);
if (userToken != null) {
request.setFsToken(protoDT);
}
if (bulkToken != null) {
request.setBulkToken(bulkToken);
}
request.setCopyFile(copyFiles);
if (clusterIds != null) {
request.addAllClusterIds(clusterIds);
}
request.setReplicate(replicate);
return request.build();
} | 3.68 |
framework_VTabsheet_navigateTab | /**
* Updates tab focus styles when navigating from one tab to another.
* <p>
* This method should be called when there is either a mouse click at
* the new tab (which should also trigger selection) or a next/previous
* key navigation event (which should not, unless confirmed with
* selection key).
*
* @param fromIndex
* the index of the previously selected tab
* @param toIndex
* the index of the tab that is getting navigated into
* @return the tab that gets navigated to
*
* @see VTabsheet#getNextTabKey()
* @see VTabsheet#getPreviousTabKey()
* @see VTabsheet#getSelectTabKey()
*/
public Tab navigateTab(int fromIndex, int toIndex) {
Tab newNavigated = getTab(toIndex);
if (newNavigated == null) {
throw new IllegalArgumentException(
"Tab at provided index toIndex was not found");
}
Tab oldNavigated = getTab(fromIndex);
newNavigated.setStyleNames(newNavigated.equals(selected),
isFirstVisibleTabClient(toIndex), true);
if (oldNavigated != null && fromIndex != toIndex) {
oldNavigated.setStyleNames(oldNavigated.equals(selected),
isFirstVisibleTabClient(fromIndex), false);
}
return newNavigated;
} | 3.68 |
hadoop_S3ARemoteObject_getReadInvoker | /**
* Gets an instance of {@code Invoker} for interacting with S3 API.
*
* @return an instance of {@code Invoker} for interacting with S3 API.
*/
public Invoker getReadInvoker() {
return context.getReadInvoker();
} | 3.68 |
morf_AbstractSqlDialectTest_testAddIndexStatementsOnMultipleColumns | /**
* Test adding an index over multiple columns.
*/
@SuppressWarnings("unchecked")
@Test
public void testAddIndexStatementsOnMultipleColumns() {
Table table = metadata.getTable(TEST_TABLE);
Index index = index("indexName").columns(table.columns().get(0).getName(), table.columns().get(1).getName());
compareStatements(
expectedAddIndexStatementsOnMultipleColumns(),
testDialect.addIndexStatements(table, index));
} | 3.68 |
hadoop_KMSAudit_getAuditLoggerClasses | /**
* Read the KMSAuditLogger classes from configuration. If any loggers fail to
* load, a RumTimeException will be thrown.
*
* @param conf The configuration.
* @return Collection of KMSAudigLogger classes.
*/
private Set<Class<? extends KMSAuditLogger>> getAuditLoggerClasses(
final Configuration conf) {
Set<Class<? extends KMSAuditLogger>> result = new HashSet<>();
// getTrimmedStringCollection will remove duplicates.
Collection<String> classes =
conf.getTrimmedStringCollection(KMSConfiguration.KMS_AUDIT_LOGGER_KEY);
if (classes.isEmpty()) {
LOG.info("No audit logger configured, using default.");
result.add(SimpleKMSAuditLogger.class);
return result;
}
for (String c : classes) {
try {
Class<?> cls = conf.getClassByName(c);
result.add(cls.asSubclass(KMSAuditLogger.class));
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException("Failed to load " + c + ", please check "
+ "configuration " + KMSConfiguration.KMS_AUDIT_LOGGER_KEY, cnfe);
}
}
return result;
} | 3.68 |
flink_MathUtils_bitMix | /**
* Bit-mixing for pseudo-randomization of integers (e.g., to guard against bad hash functions).
* Implementation is from Murmur's 32 bit finalizer.
*
* @param in the input value
* @return the bit-mixed output value
*/
public static int bitMix(int in) {
in ^= in >>> 16;
in *= 0x85ebca6b;
in ^= in >>> 13;
in *= 0xc2b2ae35;
in ^= in >>> 16;
return in;
} | 3.68 |
framework_VScrollTable_useOldGeckoNavigation | /*
* Firefox prior to v65 auto-repeat works correctly only if we use a key press
* handler, other browsers handle it correctly when using a key down
* handler.
*/
private boolean useOldGeckoNavigation() {
return BrowserInfo.get().isGecko()
&& BrowserInfo.get().getGeckoVersion() < 65;
} | 3.68 |
framework_DateField_setDateFormat | /**
* Sets formatting used by some component implementations. See
* {@link SimpleDateFormat} for format details.
*
* By default it is encouraged to used default formatting defined by Locale,
* but due some JVM bugs it is sometimes necessary to use this method to
* override formatting. See Vaadin issue #2200.
*
* @param dateFormat
* the dateFormat to set
*
* @see AbstractComponent#setLocale(Locale))
*/
public void setDateFormat(String dateFormat) {
this.dateFormat = dateFormat;
markAsDirty();
} | 3.68 |
hadoop_ReencryptionHandler_resetSubmissionTracker | /**
* Reset the zone submission tracker for re-encryption.
* @param zoneId
*/
synchronized private void resetSubmissionTracker(final long zoneId) {
ZoneSubmissionTracker zst = submissions.get(zoneId);
if (zst == null) {
zst = new ZoneSubmissionTracker();
submissions.put(zoneId, zst);
} else {
zst.reset();
}
} | 3.68 |
hbase_ForeignException_toStackTraceElementMessages | /**
* Convert a stack trace to list of {@link StackTraceElement}.
* @param trace the stack trace to convert to protobuf message
* @return <tt>null</tt> if the passed stack is <tt>null</tt>.
*/
private static List<StackTraceElementMessage>
toStackTraceElementMessages(StackTraceElement[] trace) {
// if there is no stack trace, ignore it and just return the message
if (trace == null) return null;
// build the stack trace for the message
List<StackTraceElementMessage> pbTrace = new ArrayList<>(trace.length);
for (StackTraceElement elem : trace) {
StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder();
stackBuilder.setDeclaringClass(elem.getClassName());
stackBuilder.setFileName(elem.getFileName());
stackBuilder.setLineNumber(elem.getLineNumber());
stackBuilder.setMethodName(elem.getMethodName());
pbTrace.add(stackBuilder.build());
}
return pbTrace;
} | 3.68 |
hbase_Bytes_equals | /**
* Lexicographically determine the equality of two byte[], one as ByteBuffer.
* @param a left operand
* @param buf right operand
* @return True if equal
*/
public static boolean equals(byte[] a, ByteBuffer buf) {
if (a == null) return buf == null;
if (buf == null) return false;
if (a.length != buf.remaining()) return false;
// Thou shalt not modify the original byte buffer in what should be read only operations.
ByteBuffer b = buf.duplicate();
for (byte anA : a) {
if (anA != b.get()) {
return false;
}
}
return true;
} | 3.68 |
flink_ExecutionEnvironment_createLocalEnvironment | /**
* Creates a {@link LocalEnvironment} which is used for executing Flink jobs.
*
* @param configuration to start the {@link LocalEnvironment} with
* @param defaultParallelism to initialize the {@link LocalEnvironment} with
* @return {@link LocalEnvironment}
*/
private static LocalEnvironment createLocalEnvironment(
Configuration configuration, int defaultParallelism) {
final LocalEnvironment localEnvironment = new LocalEnvironment(configuration);
if (defaultParallelism > 0) {
localEnvironment.setParallelism(defaultParallelism);
}
return localEnvironment;
} | 3.68 |
morf_SqlUtils_otherwise | /**
* @param defaultValue If all the when conditions fail return this default value
* @return {@link CaseStatement}
*/
public CaseStatement otherwise(boolean defaultValue) {
return otherwise(literal(defaultValue));
} | 3.68 |
flink_AvroSerializerSnapshot_resolveSchemaCompatibility | /**
* Resolves writer/reader schema compatibly.
*
* <p>Checks whenever a new version of a schema (reader) can read values serialized with the old
* schema (writer). If the schemas are compatible according to {@code Avro} schema resolution
* rules (@see <a href="https://avro.apache.org/docs/current/spec.html#Schema+Resolution">Schema
* Resolution</a>).
*/
@VisibleForTesting
static <T> TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(
Schema writerSchema, Schema readerSchema) {
if (Objects.equals(writerSchema, readerSchema)) {
return TypeSerializerSchemaCompatibility.compatibleAsIs();
}
final SchemaPairCompatibility compatibility =
SchemaCompatibility.checkReaderWriterCompatibility(readerSchema, writerSchema);
return avroCompatibilityToFlinkCompatibility(compatibility);
} | 3.68 |
flink_FormatFactory_forwardOptions | /**
* Returns a set of {@link ConfigOption} that are directly forwarded to the runtime
* implementation but don't affect the final execution topology.
*
* <p>Options declared here can override options of the persisted plan during an enrichment
* phase. Since a restored topology is static, an implementer has to ensure that the declared
* options don't affect fundamental abilities such as {@link ChangelogMode}.
*
* <p>For example, given a JSON format, if an option defines how to parse timestamps, changing
* the parsing behavior does not affect the pipeline topology and can be allowed. However, an
* option that defines whether the format results in a {@link ProjectableDecodingFormat} or not
* is not allowed. The wrapping connector and planner might not react to the changed abilities
* anymore.
*
* @see DynamicTableFactory.Context#getEnrichmentOptions()
*/
default Set<ConfigOption<?>> forwardOptions() {
return Collections.emptySet();
} | 3.68 |
hudi_HoodieRecordUtils_loadRecordMerger | /**
* Instantiate a given class with a record merge.
*/
public static HoodieRecordMerger loadRecordMerger(String mergerClass) {
try {
HoodieRecordMerger recordMerger = (HoodieRecordMerger) INSTANCE_CACHE.get(mergerClass);
if (null == recordMerger) {
synchronized (HoodieRecordMerger.class) {
recordMerger = (HoodieRecordMerger) INSTANCE_CACHE.get(mergerClass);
if (null == recordMerger) {
recordMerger = (HoodieRecordMerger) ReflectionUtils.loadClass(mergerClass,
new Object[] {});
INSTANCE_CACHE.put(mergerClass, recordMerger);
}
}
}
return recordMerger;
} catch (HoodieException e) {
throw new HoodieException("Unable to instantiate hoodie merge class ", e);
}
} | 3.68 |
streampipes_RosBridgeAdapter_getListOfAllTopics | // Ignore for now, but is interesting for future implementations
private List<String> getListOfAllTopics(Ros ros) {
List<String> result = new ArrayList<>();
Service service = new Service(ros, "/rosapi/topics", "rosapi/Topics");
ServiceRequest request = new ServiceRequest();
ServiceResponse response = service.callServiceAndWait(request);
JsonObject ob = new JsonParser().parse(response.toString()).getAsJsonObject();
if (ob.has("topics")) {
JsonArray topics = ob.get("topics").getAsJsonArray();
for (int i = 0; i < topics.size(); i++) {
result.add(topics.get(i).getAsString());
}
}
return result;
} | 3.68 |
hadoop_FilePool_locationsFor | /**
* Get a set of locations for the given file.
*/
public BlockLocation[] locationsFor(FileStatus stat, long start, long len)
throws IOException {
// TODO cache
return fs.getFileBlockLocations(stat, start, len);
} | 3.68 |
flink_Tuple0_toString | /**
* Creates a string representation of the tuple in the form "()".
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "()";
} | 3.68 |
framework_CalendarWeekDropHandler_updateDropDetails | /**
* Update the drop details sent to the server
*
* @param drag
* The drag event
*/
private void updateDropDetails(VDragEvent drag) {
int slotIndex = currentTargetDay.getSlotIndex(currentTargetElement);
int dayIndex = calendarConnector.getWidget().getWeekGrid()
.getDateCellIndex(currentTargetDay);
drag.getDropDetails().put("dropDayIndex", dayIndex);
drag.getDropDetails().put("dropSlotIndex", slotIndex);
} | 3.68 |
hadoop_Quota_getQuotaUsage | /**
* Get aggregated quota usage for the federation path.
* @param path Federation path.
* @return Aggregated quota.
* @throws IOException If the quota system is disabled.
*/
public QuotaUsage getQuotaUsage(String path) throws IOException {
return aggregateQuota(path, getEachQuotaUsage(path));
} | 3.68 |
hbase_RefCnt_create | /**
* Create an {@link RefCnt} with an initial reference count = 1. If the reference count become
* zero, the recycler will do nothing. Usually, an Heap {@link ByteBuff} will use this kind of
* refCnt to track its life cycle, it help to abstract the code path although it's not really
* needed to track on heap ByteBuff.
*/
public static RefCnt create() {
return new RefCnt(ByteBuffAllocator.NONE);
} | 3.68 |
hbase_Encryption_computeCryptoKeyHash | /**
* Returns the hash of the supplied argument, using the hash algorithm specified in the given
* config.
*/
public static byte[] computeCryptoKeyHash(Configuration conf, byte[] arg) {
String algorithm = getConfiguredHashAlgorithm(conf);
try {
return hashWithAlg(algorithm, arg);
} catch (RuntimeException e) {
String message = format(
"Error in computeCryptoKeyHash (please check your configuration "
+ "parameter %s and the security provider configuration of the JVM)",
CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY);
throw new RuntimeException(message, e);
}
} | 3.68 |
hadoop_ConfigurationWithLogging_getFloat | /**
* See {@link Configuration#getFloat(String, float)}.
*/
@Override
public float getFloat(String name, float defaultValue) {
float value = super.getFloat(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
} | 3.68 |
hbase_MultiByteBuff_duplicate | /**
* Returns an MBB which is a duplicate version of this MBB. The position, limit and mark of the
* new MBB will be independent than that of the original MBB. The content of the new MBB will
* start at this MBB's current position The position, limit and mark of the new MBB would be
* identical to this MBB in terms of values.
* @return a duplicated MBB
*/
@Override
public MultiByteBuff duplicate() {
checkRefCount();
ByteBuffer[] itemsCopy = new ByteBuffer[this.items.length];
for (int i = 0; i < this.items.length; i++) {
itemsCopy[i] = items[i].duplicate();
}
return new MultiByteBuff(refCnt, itemsCopy, this.itemBeginPos, this.limit,
this.limitedItemIndex, this.curItemIndex, this.markedItemIndex);
} | 3.68 |
hadoop_Signer_verifyAndExtract | /**
* Verifies a signed string and extracts the original string.
*
* @param signedStr the signed string to verify and extract.
*
* @return the extracted original string.
*
* @throws SignerException thrown if the given string is not a signed string or if the signature is invalid.
*/
public String verifyAndExtract(String signedStr) throws SignerException {
int index = signedStr.lastIndexOf(SIGNATURE);
if (index == -1) {
throw new SignerException("Invalid signed text: " + signedStr);
}
String originalSignature = signedStr.substring(index + SIGNATURE.length());
String rawValue = signedStr.substring(0, index);
checkSignatures(rawValue, originalSignature);
return rawValue;
} | 3.68 |
hbase_NamespaceAuditor_getState | /**
* @param namespace The name of the namespace
* @return An instance of NamespaceTableAndRegionInfo
*/
public NamespaceTableAndRegionInfo getState(String namespace) {
if (stateManager.isInitialized()) {
return stateManager.getState(namespace);
}
return null;
} | 3.68 |
framework_XhrConnection_getUri | /**
* Retrieves the URI to use when sending RPCs to the server.
*
* @return The URI to use for server messages.
*/
protected String getUri() {
String uri = connection
.translateVaadinUri(ApplicationConstants.APP_PROTOCOL_PREFIX
+ ApplicationConstants.UIDL_PATH + '/');
uri = SharedUtil.addGetParameters(uri, UIConstants.UI_ID_PARAMETER + "="
+ connection.getConfiguration().getUIId());
return uri;
} | 3.68 |
hudi_CopyOnWriteInputFormat_addFilesInDir | /**
* Enumerate all files in the directory and recursive if enumerateNestedFiles is true.
*
* @return the total length of accepted files.
*/
private long addFilesInDir(org.apache.hadoop.fs.Path path, List<FileStatus> files, boolean logExcludedFiles)
throws IOException {
final org.apache.hadoop.fs.Path hadoopPath = new org.apache.hadoop.fs.Path(path.toUri());
final FileSystem fs = FSUtils.getFs(hadoopPath.toString(), this.conf.conf());
long length = 0;
for (FileStatus dir : fs.listStatus(hadoopPath)) {
if (dir.isDirectory()) {
if (acceptFile(dir) && enumerateNestedFiles) {
length += addFilesInDir(dir.getPath(), files, logExcludedFiles);
} else {
if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug("Directory " + dir.getPath().toString() + " did not pass the file-filter and is excluded.");
}
}
} else {
if (acceptFile(dir)) {
files.add(dir);
length += dir.getLen();
testForUnsplittable(dir);
} else {
if (logExcludedFiles && LOG.isDebugEnabled()) {
LOG.debug("Directory " + dir.getPath().toString() + " did not pass the file-filter and is excluded.");
}
}
}
}
return length;
} | 3.68 |
graphhopper_GHDirectory_getPreload | /**
* Returns the preload value or 0 if no patterns match.
* See {@link #configure(LinkedHashMap)}
*/
int getPreload(String name) {
for (Map.Entry<String, Integer> entry : mmapPreloads.entrySet())
if (name.matches(entry.getKey())) return entry.getValue();
return 0;
} | 3.68 |
hudi_HoodieMetaSyncOperations_updateLastCommitTimeSynced | /**
* Update the timestamp of last sync.
*/
default void updateLastCommitTimeSynced(String tableName) {
} | 3.68 |
Activiti_BpmnActivityBehavior_dispatchJobCanceledEvents | /**
* dispatch job canceled event for job associated with given execution entity
* @param activityExecution
*/
protected void dispatchJobCanceledEvents(ExecutionEntity activityExecution) {
if (activityExecution != null) {
List<JobEntity> jobs = activityExecution.getJobs();
for (JobEntity job : jobs) {
if (Context.getProcessEngineConfiguration().getEventDispatcher().isEnabled()) {
Context.getProcessEngineConfiguration().getEventDispatcher().dispatchEvent(ActivitiEventBuilder.createEntityEvent(ActivitiEventType.JOB_CANCELED,
job));
}
}
List<TimerJobEntity> timerJobs = activityExecution.getTimerJobs();
for (TimerJobEntity job : timerJobs) {
if (Context.getProcessEngineConfiguration().getEventDispatcher().isEnabled()) {
Context.getProcessEngineConfiguration().getEventDispatcher().dispatchEvent(ActivitiEventBuilder.createEntityEvent(ActivitiEventType.JOB_CANCELED,
job));
}
}
}
} | 3.68 |
hadoop_SingleFilePerBlockCache_getTempFilePath | /**
* Create temporary file based on the file path retrieved from local dir allocator
* instance. The file is created with .bin suffix. The created file has been granted
* posix file permissions available in TEMP_FILE_ATTRS.
*
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
* @return path of the file created.
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
* from local FS or file creation fails or permission set fails.
*/
private static Path getTempFilePath(final Configuration conf,
final LocalDirAllocator localDirAllocator) throws IOException {
org.apache.hadoop.fs.Path path =
localDirAllocator.getLocalPathForWrite(CACHE_FILE_PREFIX, conf);
File dir = new File(path.getParent().toUri().getPath());
String prefix = path.getName();
File tmpFile = File.createTempFile(prefix, BINARY_FILE_SUFFIX, dir);
Path tmpFilePath = Paths.get(tmpFile.toURI());
return Files.setPosixFilePermissions(tmpFilePath, TEMP_FILE_ATTRS);
} | 3.68 |
hadoop_LoggedJob_compareStrings | // I'll treat this as an atomic object type
private void compareStrings(List<String> c1, List<String> c2, TreePath loc,
String eltname) throws DeepInequalityException {
if (c1 == null && c2 == null) {
return;
}
TreePath recursePath = new TreePath(loc, eltname);
if (c1 == null || c2 == null || !c1.equals(c2)) {
throw new DeepInequalityException(eltname + " miscompared", recursePath);
}
} | 3.68 |
morf_InsertStatement_getTable | /**
* Gets the table being inserted into
*
* @return the table being inserted into
*/
public TableReference getTable() {
return table;
} | 3.68 |
hmily_RepositoryPathUtils_buildFilePath | /**
* Build file path string.
*
* @param applicationName the application name
* @return the string
*/
public static String buildFilePath(final String applicationName) {
return String.join("/", CommonConstant.PATH_SUFFIX, applicationName.replaceAll("-", "_"));
} | 3.68 |
querydsl_AbstractSQLClause_startContext | /**
* Called to create and start a new SQL Listener context
*
* @param connection the database connection
* @param metadata the meta data for that context
* @param entity the entity for that context
* @return the newly started context
*/
protected SQLListenerContextImpl startContext(Connection connection, QueryMetadata metadata, RelationalPath<?> entity) {
SQLListenerContextImpl context = new SQLListenerContextImpl(metadata, connection, entity);
listeners.start(context);
return context;
} | 3.68 |
hadoop_WebAppProxyServer_getBindAddress | /**
* Retrieve PROXY bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(
YarnConfiguration.PROXY_BIND_HOST,
YarnConfiguration.PROXY_ADDRESS,
YarnConfiguration.DEFAULT_PROXY_ADDRESS,
YarnConfiguration.DEFAULT_PROXY_PORT);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.