name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_HsController_taskPage | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#taskPage()
*/
@Override
protected Class<? extends View> taskPage() {
return HsTaskPage.class;
} | 3.68 |
hbase_HBaseTestingUtility_memStoreTSAndTagsCombination | /**
* Create combination of memstoreTS and tags
*/
private static List<Object[]> memStoreTSAndTagsCombination() {
List<Object[]> configurations = new ArrayList<>();
configurations.add(new Object[] { false, false });
configurations.add(new Object[] { false, true });
configurations.add(new Object[] { true, false });
configurations.add(new Object[] { true, true });
return Collections.unmodifiableList(configurations);
} | 3.68 |
flink_JoinedStreams_where | /**
* Specifies a {@link KeySelector} for elements from the first input with explicit type
* information for the key type.
*
* @param keySelector The KeySelector to be used for extracting the first input's key for
* partitioning.
* @param keyType The type information describing the key type.
*/
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector, TypeInformation<KEY> keyType) {
requireNonNull(keySelector);
requireNonNull(keyType);
return new Where<>(input1.clean(keySelector), keyType);
} | 3.68 |
flink_SavepointMetadata_getExistingOperators | /** @return List of {@link OperatorState} that already exists within the savepoint. */
public List<OperatorState> getExistingOperators() {
return operatorStateIndex.values().stream()
.filter(OperatorStateSpec::isExistingState)
.map(OperatorStateSpec::asExistingState)
.collect(Collectors.toList());
} | 3.68 |
hbase_Increment_getTimeRange | /**
* Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.68 |
querydsl_EnumExpression_ordinal | /**
* Get the ordinal of this enum
*
* @return ordinal number
*/
public NumberExpression<Integer> ordinal() {
if (ordinal == null) {
ordinal = Expressions.numberOperation(Integer.class, Ops.ORDINAL, mixin);
}
return ordinal;
} | 3.68 |
dubbo_ServiceNameMapping_getDefaultExtension | /**
* Get the default extension of {@link ServiceNameMapping}
*
* @return non-null {@link ServiceNameMapping}
*/
static ServiceNameMapping getDefaultExtension(ScopeModel scopeModel) {
return ScopeModelUtil.getApplicationModel(scopeModel).getDefaultExtension(ServiceNameMapping.class);
} | 3.68 |
pulsar_ComponentImpl_allowFunctionOps | /**
* @deprecated use {@link #isSuperUser(AuthenticationParameters)}
*/
@Deprecated
public boolean allowFunctionOps(NamespaceName namespaceName, String role,
AuthenticationDataSource authenticationData) {
AuthenticationParameters authParams = AuthenticationParameters.builder().clientRole(role)
.clientAuthenticationDataSource(authenticationData).build();
return allowFunctionOps(namespaceName, authParams);
} | 3.68 |
flink_SqlGatewayOpenApiSpecGenerator_main | /**
* Generates the Sql Gateway REST API OpenAPI spec.
*
* @param args args[0] contains the directory into which the generated files are placed
* @throws IOException if any file operation failed
*/
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final SqlGatewayRestAPIVersion apiVersion : SqlGatewayRestAPIVersion.values()) {
if (apiVersion == SqlGatewayRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}
createDocumentationFile(
"Flink SQL Gateway REST API",
new DocumentingSqlGatewayRestEndpoint(),
apiVersion,
Paths.get(
outputDirectory,
"rest_" + apiVersion.getURLVersionPrefix() + "_sql_gateway.yml"));
}
} | 3.68 |
framework_ColorPickerSelect_createColors | /**
* Creates the color.
*
* @param color
* the color
* @param rows
* the rows
* @param columns
* the columns
*
* @return the color[][]
*/
private Color[][] createColors(Color color, int rows, int columns) {
Color[][] colors = new Color[rows][columns];
float[] hsv = color.getHSV();
float hue = hsv[0];
float saturation = 1f;
float value = 1f;
for (int row = 0; row < rows; row++) {
for (int col = 0; col < columns; col++) {
int index = row * columns + col;
saturation = 1f;
value = 1f;
if (index <= rows * columns / 2) {
saturation = index / ((float) rows * (float) columns / 2f);
} else {
index -= rows * columns / 2;
value = 1f - index / ((float) rows * (float) columns / 2f);
}
colors[row][col] = new Color(
Color.HSVtoRGB(hue, saturation, value));
}
}
return colors;
} | 3.68 |
framework_DataCommunicator_getKeyMapper | /**
* Gets the {@link DataKeyMapper} used by this {@link DataCommunicator}. Key
* mapper can be used to map keys sent to the client-side back to their
* respective data objects.
*
* @return key mapper
*/
public DataKeyMapper<T> getKeyMapper() {
return keyMapper;
} | 3.68 |
hadoop_ECBlockGroup_getDataBlocks | /**
* Get data blocks
* @return data blocks
*/
public ECBlock[] getDataBlocks() {
return dataBlocks;
} | 3.68 |
hbase_HBaseTestingUtility_predicateTableAvailable | /**
* Returns a {@link Predicate} for checking that table is enabled
*/
public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
return new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableAvailability(tableName);
}
@Override
public boolean evaluate() throws IOException {
boolean tableAvailable = getAdmin().isTableAvailable(tableName);
if (tableAvailable) {
try (Table table = getConnection().getTable(tableName)) {
TableDescriptor htd = table.getDescriptor();
for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
.getAllRegionLocations()) {
Scan scan = new Scan().withStartRow(loc.getRegion().getStartKey())
.withStopRow(loc.getRegion().getEndKey()).setOneRowLimit()
.setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
for (byte[] family : htd.getColumnFamilyNames()) {
scan.addFamily(family);
}
try (ResultScanner scanner = table.getScanner(scan)) {
scanner.next();
}
}
}
}
return tableAvailable;
}
};
} | 3.68 |
hbase_AccessController_checkPermissions | /**
* @deprecated since 2.2.0 and will be removed 4.0.0. Use {@link Admin#hasUserPermissions(List)}
* instead.
* @see Admin#hasUserPermissions(List)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-22117">HBASE-22117</a>
*/
@Deprecated
@Override
public void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done) {
AccessControlProtos.CheckPermissionsResponse response = null;
try {
User user = RpcServer.getRequestUser().orElse(null);
TableName tableName = regionEnv.getRegion().getTableDescriptor().getTableName();
List<Permission> permissions = new ArrayList<>();
for (int i = 0; i < request.getPermissionCount(); i++) {
Permission permission = AccessControlUtil.toPermission(request.getPermission(i));
permissions.add(permission);
if (permission instanceof TablePermission) {
TablePermission tperm = (TablePermission) permission;
if (!tperm.getTableName().equals(tableName)) {
throw new CoprocessorException(AccessController.class,
String.format(
"This method can only execute at the table specified in "
+ "TablePermission. Table of the region:%s , requested table:%s",
tableName, tperm.getTableName()));
}
}
}
for (Permission permission : permissions) {
boolean hasPermission =
accessChecker.hasUserPermission(user, "checkPermissions", permission);
if (!hasPermission) {
throw new AccessDeniedException("Insufficient permissions " + permission.toString());
}
}
response = AccessControlProtos.CheckPermissionsResponse.getDefaultInstance();
} catch (IOException ioe) {
CoprocessorRpcUtils.setControllerException(controller, ioe);
}
done.run(response);
} | 3.68 |
flink_JobEdge_isBroadcast | /** Gets whether the edge is broadcast edge. */
public boolean isBroadcast() {
return isBroadcast;
} | 3.68 |
flink_SavepointWriter_withConfiguration | /**
* Sets a configuration that will be applied to the stream operators used to bootstrap a new
* savepoint.
*
* @param option metadata information
* @param value value to be stored
* @param <T> type of the value to be stored
* @return The modified savepoint.
*/
public <T> SavepointWriter withConfiguration(ConfigOption<T> option, T value) {
configuration.set(option, value);
return this;
} | 3.68 |
framework_AbstractMedia_setSource | /**
* Sets a single media file as the source of the media component.
*
* @param source
*/
public void setSource(Resource source) {
clearSources();
addSource(source);
} | 3.68 |
hbase_ProcedureCoordinator_memberFinishedBarrier | /**
* Notification that the procedure had another member finished executing its in-barrier subproc
* via {@link Subprocedure#insideBarrier()}.
* @param procName name of the subprocedure that finished
* @param member name of the member that executed and released its barrier
* @param dataFromMember the data that the member returned along with the notification
*/
void memberFinishedBarrier(String procName, final String member, byte[] dataFromMember) {
Procedure proc = procedures.get(procName);
if (proc == null) {
LOG.warn(
"Member '" + member + "' is trying to release an unknown procedure '" + procName + "'");
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("Member '" + member + "' released procedure '" + procName + "'");
}
proc.barrierReleasedByMember(member, dataFromMember);
} | 3.68 |
framework_BeanUtil_getMethodFromBridge | /**
* Return declared method for which {@code bridgeMethod} is generated using
* its {@code paramTypes}. If {@code bridgeMethod} is not a bridge method
* then return null.
*/
private static Method getMethodFromBridge(Method bridgeMethod,
Class<?>... paramTypes) throws SecurityException {
if (bridgeMethod == null || !bridgeMethod.isBridge()) {
return null;
}
try {
return bridgeMethod.getDeclaringClass()
.getMethod(bridgeMethod.getName(), paramTypes);
} catch (NoSuchMethodException e) {
return null;
}
} | 3.68 |
hadoop_StringValueMax_getReport | /**
* @return the string representation of the aggregated value
*/
public String getReport() {
return maxVal;
} | 3.68 |
hadoop_AbfsClient_getAbfsRestOperation | /**
* Creates an AbfsRestOperation with parameters including request headers and SAS token.
*
* @param operationType The type of the operation.
* @param httpMethod The HTTP method of the operation.
* @param url The URL associated with the operation.
* @param requestHeaders The list of HTTP headers for the request.
* @param sasTokenForReuse The SAS token for reusing authentication.
* @return An AbfsRestOperation instance.
*/
AbfsRestOperation getAbfsRestOperation(final AbfsRestOperationType operationType,
final String httpMethod,
final URL url,
final List<AbfsHttpHeader> requestHeaders,
final String sasTokenForReuse) {
return new AbfsRestOperation(
operationType,
this,
httpMethod,
url,
requestHeaders, sasTokenForReuse);
} | 3.68 |
hmily_SubCoordinator_setRollbackOnly | /**
* Sets rollback only.
*/
public void setRollbackOnly() {
if (state == XaState.STATUS_PREPARING) {
state = XaState.STATUS_MARKED_ROLLBACK;
}
} | 3.68 |
MagicPlugin_BufferedMapCanvas_drawImage | // Shamelessly stolen from CraftMapCanvas.... wish they'd give us
// an extendible version or just let us create them at least :)
@Override
@SuppressWarnings("deprecation")
public void drawImage(int x, int y, Image image) {
byte[] bytes = MapPalette.imageToBytes(image);
for (int x2 = 0; x2 < image.getWidth(null); ++x2) {
for (int y2 = 0; y2 < image.getHeight(null); ++y2) {
setPixel(x + x2, y + y2, bytes[y2 * image.getWidth(null) + x2]);
}
}
} | 3.68 |
hudi_HoodieTableMetadataUtil_getPartitionIdentifier | /**
* Returns partition name for the given path.
*/
public static String getPartitionIdentifier(@Nonnull String relativePartitionPath) {
return EMPTY_PARTITION_NAME.equals(relativePartitionPath) ? NON_PARTITIONED_NAME : relativePartitionPath;
} | 3.68 |
hbase_LazyInitializedWALProvider_getProviderNoCreate | /**
* Get the provider if it already initialized, otherwise just return {@code null} instead of
* creating it.
*/
WALProvider getProviderNoCreate() {
return holder.get();
} | 3.68 |
hadoop_AbstractStoreOperation_getStoreContext | /**
* Get the store context.
* @return the context.
*/
public final StoreContext getStoreContext() {
return storeContext;
} | 3.68 |
Activiti_TablePage_getFirstResult | /**
* @return the start index of this page (ie the index of the first element in the page)
*/
public long getFirstResult() {
return firstResult;
} | 3.68 |
flink_DeclarativeAggregateFunction_mergeOperands | /**
* Merge inputs of {@link #mergeExpressions()}, these inputs are agg buffer generated by user
* definition.
*/
public final UnresolvedReferenceExpression[] mergeOperands() {
UnresolvedReferenceExpression[] aggBuffers = aggBufferAttributes();
UnresolvedReferenceExpression[] ret = new UnresolvedReferenceExpression[aggBuffers.length];
for (int i = 0; i < aggBuffers.length; i++) {
String name = String.valueOf(i);
validateOperandName(name);
ret[i] = unresolvedRef(name);
}
return ret;
} | 3.68 |
zxing_BitSource_available | /**
* @return number of bits that can be read successfully
*/
public int available() {
return 8 * (bytes.length - byteOffset) - bitOffset;
} | 3.68 |
hibernate-validator_ConfigurationSource_getPriority | /**
* Returns this sources priority. Can be used to determine which
* configuration shall apply in case of conflicting configurations by
* several providers.
*
* @return This source's priority.
*/
public int getPriority() {
return priority;
} | 3.68 |
hudi_JavaExecutionStrategy_runClusteringForGroup | /**
* Executes clustering for the group.
*/
private List<WriteStatus> runClusteringForGroup(
HoodieClusteringGroup clusteringGroup, Map<String, String> strategyParams,
boolean preserveHoodieMetadata, String instantTime) {
List<HoodieRecord<T>> inputRecords = readRecordsForGroup(clusteringGroup, instantTime);
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(getWriteConfig().getSchema()));
List<HoodieFileGroupId> inputFileIds = clusteringGroup.getSlices().stream()
.map(info -> new HoodieFileGroupId(info.getPartitionPath(), info.getFileId()))
.collect(Collectors.toList());
return performClusteringWithRecordList(inputRecords, clusteringGroup.getNumOutputFileGroups(), instantTime, strategyParams, readerSchema, inputFileIds, preserveHoodieMetadata);
} | 3.68 |
hbase_BucketCache_putIfAbsent | /**
* Return the previous associated value, or null if absent. It has the same meaning as
* {@link ConcurrentMap#putIfAbsent(Object, Object)}
*/
public RAMQueueEntry putIfAbsent(BlockCacheKey key, RAMQueueEntry entry) {
AtomicBoolean absent = new AtomicBoolean(false);
RAMQueueEntry re = delegate.computeIfAbsent(key, k -> {
// The RAMCache reference to this entry, so reference count should be increment.
entry.getData().retain();
absent.set(true);
return entry;
});
return absent.get() ? null : re;
} | 3.68 |
framework_TabSheet_hideTabs | /**
* Hides or shows the tab selection parts ("tabs").
*
* @param tabsHidden
* true if the tabs should be hidden
* @deprecated as of 7.5, use {@link #setTabsVisible(boolean)} instead
*/
@Deprecated
public void hideTabs(boolean tabsHidden) {
setTabsVisible(!tabsHidden);
} | 3.68 |
graphhopper_VectorTileDecoder_setAutoScale | /**
* Set the autoScale setting.
*
* @param autoScale
* when true, the encoder automatically scale and return all coordinates in the 0..255 range.
* when false, the encoder returns all coordinates in the 0..extent-1 range as they are encoded.
*
*/
public void setAutoScale(boolean autoScale) {
this.autoScale = autoScale;
} | 3.68 |
framework_HtmlInTabCaption_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
getLayout().setSpacing(true);
TabSheet ts = new TabSheet();
ts.setCaption("TabSheet - no <u>html</u> in tab captions");
ts.setCaptionAsHtml(true);
ts.addTab(new Label(), "<font color='red'>red</font>");
ts.addTab(new Label(), "<font color='blue'>blue</font>");
addComponent(ts);
ts = new TabSheet();
ts.setCaption("TabSheet - <b>html</b> in tab captions");
ts.setCaptionAsHtml(false);
ts.setTabCaptionsAsHtml(true);
ts.addTab(new Label(), "<font color='red'>red</font>");
ts.addTab(new Label(), "<font color='blue'>blue</font>");
addComponent(ts);
Accordion acc = new Accordion();
acc.setCaption("Accordion - no <u>html</u> in tab captions");
acc.setCaptionAsHtml(true);
acc.addTab(new Label(), "<font color='red'>red</font>");
acc.addTab(new Label(), "<font color='blue'>blue</font>");
addComponent(acc);
acc = new Accordion();
acc.setCaption("Accordion - <b>html</b> in tab captions");
acc.setCaptionAsHtml(false);
acc.setTabCaptionsAsHtml(true);
acc.addTab(new Label(), "<font color='red'>red</font>");
acc.addTab(new Label(), "<font color='blue'>blue</font>");
addComponent(acc);
} | 3.68 |
hbase_ChaosAgent_createEphemeralZNode | /***
* Function to create EPHEMERAL ZNODE with given path and data as params.
* @param path Path at which Ephemeral ZNode to create
* @param data Data to put under ZNode
*/
public void createEphemeralZNode(String path, byte[] data) {
zk.create(path, data, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL,
createEphemeralZNodeCallback, data);
} | 3.68 |
hudi_OrcUtils_filterRowKeys | /**
* Read the rowKey list matching the given filter, from the given ORC file. If the filter is empty, then this will
* return all the rowkeys.
*
* @param conf configuration to build fs object.
* @param filePath The ORC file path.
* @param filter record keys filter
* @return Set Set of pairs of row key and position matching candidateRecordKeys
*/
@Override
public Set<Pair<String, Long>> filterRowKeys(Configuration conf, Path filePath, Set<String> filter)
throws HoodieIOException {
long rowPosition = 0;
try (Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(conf));) {
TypeDescription schema = reader.getSchema();
try (RecordReader recordReader = reader.rows(new Options(conf).schema(schema))) {
Set<Pair<String, Long>> filteredRowKeys = new HashSet<>();
List<String> fieldNames = schema.getFieldNames();
VectorizedRowBatch batch = schema.createRowBatch();
// column index for the RECORD_KEY_METADATA_FIELD field
int colIndex = -1;
for (int i = 0; i < fieldNames.size(); i++) {
if (fieldNames.get(i).equals(HoodieRecord.RECORD_KEY_METADATA_FIELD)) {
colIndex = i;
break;
}
}
if (colIndex == -1) {
throw new HoodieException(String.format("Couldn't find row keys in %s.", filePath));
}
while (recordReader.nextBatch(batch)) {
BytesColumnVector rowKeys = (BytesColumnVector) batch.cols[colIndex];
for (int i = 0; i < batch.size; i++) {
String rowKey = rowKeys.toString(i);
if (filter.isEmpty() || filter.contains(rowKey)) {
filteredRowKeys.add(Pair.of(rowKey, rowPosition));
}
rowPosition++;
}
}
return filteredRowKeys;
}
} catch (IOException io) {
throw new HoodieIOException("Unable to read row keys for ORC file:" + filePath, io);
}
} | 3.68 |
querydsl_NumberExpression_sum | /**
* Create a {@code sum(this)} expression
*
* <p>Get the sum of this expression (aggregation)</p>
*
* @return sum(this)
*/
public NumberExpression<T> sum() {
if (sum == null) {
sum = Expressions.numberOperation(getType(), Ops.AggOps.SUM_AGG, mixin);
}
return sum;
} | 3.68 |
flink_StreamExecutionEnvironment_registerSlotSharingGroup | /**
* Register a slot sharing group with its resource spec.
*
* <p>Note that a slot sharing group hints the scheduler that the grouped operators CAN be
* deployed into a shared slot. There's no guarantee that the scheduler always deploy the
* grouped operators together. In cases grouped operators are deployed into separate slots, the
* slot resources will be derived from the specified group requirements.
*
* @param slotSharingGroup which contains name and its resource spec.
*/
@PublicEvolving
public StreamExecutionEnvironment registerSlotSharingGroup(SlotSharingGroup slotSharingGroup) {
final ResourceSpec resourceSpec =
SlotSharingGroupUtils.extractResourceSpec(slotSharingGroup);
if (!resourceSpec.equals(ResourceSpec.UNKNOWN)) {
this.slotSharingGroupResources.put(
slotSharingGroup.getName(),
ResourceProfile.fromResourceSpec(
SlotSharingGroupUtils.extractResourceSpec(slotSharingGroup),
MemorySize.ZERO));
}
return this;
} | 3.68 |
hmily_PropertyName_isAncestorOf | /**
* Determine if the node name of the pointing is a parent. If yes, return true.
*
* @param name name.
* @return boolean boolean
*/
public boolean isAncestorOf(final PropertyName name) {
if (this.getElements().length >= name.getElements().length) {
return false;
}
for (int i = 0; i < this.elements.length; i++) {
if (!Objects.equals(this.elements[i], name.elements[i])) {
return false;
}
}
return true;
} | 3.68 |
hadoop_TFile_main | /**
* Dumping the TFile information.
*
* @param args
* A list of TFile paths.
*/
public static void main(String[] args) {
System.out.printf("TFile Dumper (TFile %s, BCFile %s)%n", TFile.API_VERSION
.toString(), BCFile.API_VERSION.toString());
if (args.length == 0) {
System.out
.println("Usage: java ... org.apache.hadoop.io.file.tfile.TFile tfile-path [tfile-path ...]");
System.exit(0);
}
Configuration conf = new Configuration();
for (String file : args) {
System.out.println("===" + file + "===");
try {
TFileDumper.dumpInfo(file, System.out, conf);
} catch (IOException e) {
e.printStackTrace(System.err);
}
}
} | 3.68 |
hbase_HFile_getAndResetChecksumFailuresCount | /**
* Number of checksum verification failures. It also clears the counter.
*/
public static final long getAndResetChecksumFailuresCount() {
return CHECKSUM_FAILURES.sumThenReset();
} | 3.68 |
hadoop_MoveStep_getMaxDiskErrors | /**
* Gets Maximum numbers of errors to be tolerated before this
* move operation is aborted.
* @return long.
*/
@Override
public long getMaxDiskErrors() {
return maxDiskErrors;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_getPeriod | /**
* Extracts the period for the chore from the configuration.
* @param conf The configuration object.
* @return The configured chore period or the default value.
*/
static int getPeriod(Configuration conf) {
return conf.getInt(SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT);
} | 3.68 |
cron-utils_FieldConstraintsBuilder_monthsMapping | /**
* Creates months mapping.
*
* @return Map where strings month names in EEE format, and integers correspond to their 1-12 mappings
*/
private static Map<String, Integer> monthsMapping() {
final Map<String, Integer> stringMapping = new HashMap<>();
stringMapping.put("JAN", 1);
stringMapping.put("FEB", 2);
stringMapping.put("MAR", 3);
stringMapping.put("APR", 4);
stringMapping.put("MAY", 5);
stringMapping.put("JUN", 6);
stringMapping.put("JUL", 7);
stringMapping.put("AUG", 8);
stringMapping.put("SEP", 9);
stringMapping.put("OCT", 10);
stringMapping.put("NOV", 11);
stringMapping.put("DEC", 12);
return stringMapping;
} | 3.68 |
morf_TableReference_setName | /**
* @param name the name to set
* @deprecated Do not modify {@link TableReference} instances. This will be removed very soon.
*/
@Deprecated
public void setName(String name) {
this.name = name;
} | 3.68 |
druid_SQLAggregateExpr_getWithinGroup | //为了兼容之前的逻辑
@Deprecated
public SQLOrderBy getWithinGroup() {
return orderBy;
} | 3.68 |
hbase_ScannerContext_clearProgress | /**
* Clear away any progress that has been made so far. All progress fields are reset to initial
* values. Only clears progress that should reset between rows. {@link #getBlockSizeProgress()} is
* not reset because it increments for all blocks scanned whether the result is included or
* filtered.
*/
void clearProgress() {
progress.setFields(0, 0, 0, getBlockSizeProgress());
} | 3.68 |
hbase_MiniHBaseCluster_waitOnMaster | /**
* Wait for the specified master to stop. Removes this thread from list of running threads.
* @return Name of master that just went down.
*/
public String waitOnMaster(final int serverNumber) {
return this.hbaseCluster.waitOnMaster(serverNumber);
} | 3.68 |
hadoop_ReferenceCountMap_remove | /**
* Delete the reference. Decrease the reference count for the instance, if
* any. On all references removal delete the instance from the map.
*
* @param key Key to remove the reference.
*/
public void remove(E key) {
E value = referenceMap.get(key);
if (value != null && value.decrementAndGetRefCount() == 0) {
referenceMap.remove(key);
}
} | 3.68 |
flink_Path_getFileSystem | /**
* Returns the FileSystem that owns this Path.
*
* @return the FileSystem that owns this Path
* @throws IOException thrown if the file system could not be retrieved
*/
public FileSystem getFileSystem() throws IOException {
return FileSystem.get(this.toUri());
} | 3.68 |
hbase_MBeanSourceImpl_register | /**
* Register an mbean with the underlying metrics system
* @param serviceName Metrics service/system name
* @param metricsName name of the metrics obejct to expose
* @param theMbean the actual MBean
* @return ObjectName from jmx
*/
@Override
public ObjectName register(String serviceName, String metricsName, Object theMbean) {
return MBeans.register(serviceName, metricsName, theMbean);
} | 3.68 |
flink_Tuple11_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>
Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> of(
T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10) {
return new Tuple11<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10);
} | 3.68 |
hadoop_MapReduceJobPropertiesParser_extractMaxHeapOpts | /**
* Extracts the -Xmx heap option from the specified string.
*/
public static void extractMaxHeapOpts(final String javaOptions,
List<String> heapOpts,
List<String> others) {
for (String opt : javaOptions.split(" ")) {
Matcher matcher = MAX_HEAP_PATTERN.matcher(opt);
if (matcher.find()) {
heapOpts.add(opt);
} else {
others.add(opt);
}
}
} | 3.68 |
hbase_MasterProcedureScheduler_wakeRegions | /**
* Wake the procedures waiting for the specified regions
* @param procedure the procedure that was holding the regions
* @param regionInfos the list of regions the procedure was holding
*/
public void wakeRegions(final Procedure<?> procedure, final TableName table,
final RegionInfo... regionInfos) {
Arrays.sort(regionInfos, RegionInfo.COMPARATOR);
schedLock();
try {
int numProcs = 0;
final Procedure<?>[] nextProcs = new Procedure[regionInfos.length];
for (int i = 0; i < regionInfos.length; ++i) {
assert regionInfos[i].getTable().equals(table);
assert i == 0 || regionInfos[i] != regionInfos[i - 1]
: "duplicate region: " + regionInfos[i];
LockAndQueue regionLock = locking.getRegionLock(regionInfos[i].getEncodedName());
if (regionLock.releaseExclusiveLock(procedure)) {
if (!regionLock.isWaitingQueueEmpty()) {
// release one procedure at the time since regions has an xlock
nextProcs[numProcs++] = regionLock.removeFirst();
} else {
locking.removeRegionLock(regionInfos[i].getEncodedName());
}
}
}
// awake procedures if any
for (int i = numProcs - 1; i >= 0; --i) {
wakeProcedure(nextProcs[i]);
}
wakePollIfNeeded(numProcs);
// release the table shared-lock.
wakeTableSharedLock(procedure, table);
} finally {
schedUnlock();
}
} | 3.68 |
framework_WebBrowser_getTimezoneOffset | /**
* Returns the browser-reported TimeZone offset in milliseconds from GMT.
* This includes possible daylight saving adjustments, to figure out which
* TimeZone the user actually might be in, see
* {@link #getRawTimezoneOffset()}.
*
* @see WebBrowser#getRawTimezoneOffset()
* @return timezone offset in milliseconds, 0 if not available
*/
public int getTimezoneOffset() {
return timezoneOffset;
} | 3.68 |
hadoop_AbfsOutputStream_getOutputStreamStatistics | /**
* Getter method for AbfsOutputStream statistics.
*
* @return statistics for AbfsOutputStream.
*/
@VisibleForTesting
public AbfsOutputStreamStatistics getOutputStreamStatistics() {
return outputStreamStatistics;
} | 3.68 |
hbase_Scan_getBatch | /** Returns maximum number of values to return for a single call to next() */
public int getBatch() {
return this.batch;
} | 3.68 |
hadoop_ManifestSuccessData_putDiagnostic | /**
* Add a diagnostics entry.
* @param key name
* @param value value
*/
public void putDiagnostic(String key, String value) {
diagnostics.put(key, value);
} | 3.68 |
hbase_MasterRpcServices_rpcPreCheck | /**
* Checks for the following pre-checks in order:
* <ol>
* <li>Master is initialized</li>
* <li>Rpc caller has admin permissions</li>
* </ol>
* @param requestName name of rpc request. Used in reporting failures to provide context.
* @throws ServiceException If any of the above listed pre-check fails.
*/
private void rpcPreCheck(String requestName) throws ServiceException {
try {
server.checkInitialized();
requirePermission(requestName, Permission.Action.ADMIN);
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
} | 3.68 |
hudi_BaseRollbackHelper_performRollback | /**
* Performs all rollback actions that we have collected in parallel.
*/
public List<HoodieRollbackStat> performRollback(HoodieEngineContext context, HoodieInstant instantToRollback,
List<HoodieRollbackRequest> rollbackRequests) {
int parallelism = Math.max(Math.min(rollbackRequests.size(), config.getRollbackParallelism()), 1);
context.setJobStatus(this.getClass().getSimpleName(), "Perform rollback actions: " + config.getTableName());
// If not for conversion to HoodieRollbackInternalRequests, code fails. Using avro model (HoodieRollbackRequest) within spark.parallelize
// is failing with com.esotericsoftware.kryo.KryoException
// stack trace: https://gist.github.com/nsivabalan/b6359e7d5038484f8043506c8bc9e1c8
// related stack overflow post: https://issues.apache.org/jira/browse/SPARK-3601. Avro deserializes list as GenericData.Array.
List<SerializableHoodieRollbackRequest> serializableRequests = rollbackRequests.stream().map(SerializableHoodieRollbackRequest::new).collect(Collectors.toList());
return context.reduceByKey(maybeDeleteAndCollectStats(context, instantToRollback, serializableRequests, true, parallelism),
RollbackUtils::mergeRollbackStat, parallelism);
} | 3.68 |
framework_DragSourceExtensionConnector_addDraggable | /**
* Makes the given element draggable and adds class name.
*
* @param element
* Element to be set draggable.
*/
protected void addDraggable(Element element) {
element.setDraggable(Element.DRAGGABLE_TRUE);
element.addClassName(
getStylePrimaryName(element) + STYLE_SUFFIX_DRAGSOURCE);
element.addClassName(STYLE_NAME_DRAGGABLE);
} | 3.68 |
hbase_ZKUtil_createWithParents | /**
* Creates the specified node and all parent nodes required for it to exist. The creation of
* parent znodes is not atomic with the leafe znode creation but the data is written atomically
* when the leaf node is created. No watches are set and no errors are thrown if the node already
* exists. The nodes created are persistent and open access.
* @param zkw zk reference
* @param znode path of node
* @throws KeeperException if unexpected zookeeper exception
*/
public static void createWithParents(ZKWatcher zkw, String znode, byte[] data)
throws KeeperException {
try {
if (znode == null) {
return;
}
zkw.getRecoverableZooKeeper().create(znode, data, zkw.createACL(znode),
CreateMode.PERSISTENT);
} catch (KeeperException.NodeExistsException nee) {
return;
} catch (KeeperException.NoNodeException nne) {
createWithParents(zkw, getParent(znode));
createWithParents(zkw, znode, data);
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
}
} | 3.68 |
flink_InMemoryPartition_getBlockCount | /** @return number of segments owned by partition */
public int getBlockCount() {
return this.partitionPages.size();
} | 3.68 |
hadoop_MountVolumeMap_getCapacityRatioByMountAndStorageType | /**
* Return capacity ratio.
* If not exists, return 1 to use full capacity.
*/
double getCapacityRatioByMountAndStorageType(String mount,
StorageType storageType) {
if (mountVolumeMapping.containsKey(mount)) {
return mountVolumeMapping.get(mount).getCapacityRatio(storageType);
}
return 1;
} | 3.68 |
flink_BatchTask_initLocalStrategies | /**
* NOTE: This method must be invoked after the invocation of {@code #initInputReaders()} and
* {@code #initInputSerializersAndComparators(int)}!
*/
protected void initLocalStrategies(int numInputs) throws Exception {
final MemoryManager memMan = getMemoryManager();
final IOManager ioMan = getIOManager();
this.localStrategies = new CloseableInputProvider<?>[numInputs];
this.inputs = new MutableObjectIterator<?>[numInputs];
this.excludeFromReset = new boolean[numInputs];
this.inputIsCached = new boolean[numInputs];
this.inputIsAsyncMaterialized = new boolean[numInputs];
this.materializationMemory = new int[numInputs];
// set up the local strategies first, such that the can work before any temp barrier is
// created
for (int i = 0; i < numInputs; i++) {
initInputLocalStrategy(i);
}
// we do another loop over the inputs, because we want to instantiate all
// sorters, etc before requesting the first input (as this call may block)
// we have two types of materialized inputs, and both are replayable (can act as a cache)
// The first variant materializes in a different thread and hence
// acts as a pipeline breaker. this one should only be there, if a pipeline breaker is
// needed.
// the second variant spills to the side and will not read unless the result is also
// consumed
// in a pipelined fashion.
this.resettableInputs = new SpillingResettableMutableObjectIterator<?>[numInputs];
this.tempBarriers = new TempBarrier<?>[numInputs];
for (int i = 0; i < numInputs; i++) {
final int memoryPages;
final boolean async = this.config.isInputAsynchronouslyMaterialized(i);
final boolean cached = this.config.isInputCached(i);
this.inputIsAsyncMaterialized[i] = async;
this.inputIsCached[i] = cached;
if (async || cached) {
memoryPages =
memMan.computeNumberOfPages(
this.config.getRelativeInputMaterializationMemory(i));
if (memoryPages <= 0) {
throw new Exception(
"Input marked as materialized/cached, but no memory for materialization provided.");
}
this.materializationMemory[i] = memoryPages;
} else {
memoryPages = 0;
}
if (async) {
@SuppressWarnings({"unchecked", "rawtypes"})
TempBarrier<?> barrier =
new TempBarrier(
this,
getInput(i),
this.inputSerializers[i],
memMan,
ioMan,
memoryPages,
emptyList());
barrier.startReading();
this.tempBarriers[i] = barrier;
this.inputs[i] = null;
} else if (cached) {
@SuppressWarnings({"unchecked", "rawtypes"})
SpillingResettableMutableObjectIterator<?> iter =
new SpillingResettableMutableObjectIterator(
getInput(i),
this.inputSerializers[i].getSerializer(),
getMemoryManager(),
getIOManager(),
memoryPages,
this);
this.resettableInputs[i] = iter;
this.inputs[i] = iter;
}
}
} | 3.68 |
hadoop_NMTokenCache_removeToken | /**
* Removes NMToken for specified node manager
* @param nodeAddr node address (host:port)
*/
@Private
@VisibleForTesting
public void removeToken(String nodeAddr) {
nmTokens.remove(nodeAddr);
} | 3.68 |
framework_AbsoluteLayout_setCSSString | /**
* Sets the position attributes using CSS syntax. Attributes not
* included in the string are reset to their unset states.
*
* <code><pre>
* setCSSString("top:10px;left:20%;z-index:16;");
* </pre></code>
*
* @param css
*/
public void setCSSString(String css) {
topValue = rightValue = bottomValue = leftValue = null;
topUnits = rightUnits = bottomUnits = leftUnits = Unit.PIXELS;
zIndex = -1;
if (css == null) {
return;
}
for (String cssProperty : css.split(";")) {
String[] keyValuePair = cssProperty.split(":");
String key = keyValuePair[0].trim();
if (key.isEmpty()) {
continue;
}
if (key.equals("z-index")) {
zIndex = Integer.parseInt(keyValuePair[1].trim());
} else {
String value;
if (keyValuePair.length > 1) {
value = keyValuePair[1].trim();
} else {
value = "";
}
String symbol = value.replaceAll("[0-9\\.\\-]+", "");
if (!symbol.isEmpty()) {
value = value.substring(0, value.indexOf(symbol))
.trim();
}
float v = Float.parseFloat(value);
Unit unit = Unit.getUnitFromSymbol(symbol);
if (key.equals("top")) {
topValue = v;
topUnits = unit;
} else if (key.equals("right")) {
rightValue = v;
rightUnits = unit;
} else if (key.equals("bottom")) {
bottomValue = v;
bottomUnits = unit;
} else if (key.equals("left")) {
leftValue = v;
leftUnits = unit;
}
}
}
markAsDirty();
} | 3.68 |
hbase_Procedure_setParentProcId | /**
* Called by the ProcedureExecutor to assign the parent to the newly created procedure.
*/
protected void setParentProcId(long parentProcId) {
this.parentProcId = parentProcId;
} | 3.68 |
hadoop_JobMetaData_getRecurrenceId | /**
* Get {@link RecurrenceId}.
*
* @return {@link RecurrenceId}.
*/
public final RecurrenceId getRecurrenceId() {
return recurrenceId;
} | 3.68 |
hadoop_SchedulingRequest_allocationTags | /**
* Set the <code>allocationTags</code> of the request.
*
* @see SchedulingRequest#setAllocationTags(Set)
* @param allocationTags <code>allocationsTags</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder allocationTags(Set<String> allocationTags) {
schedulingRequest.setAllocationTags(allocationTags);
return this;
} | 3.68 |
pulsar_TopicsBase_buildMessage | // Build pulsar message from REST request.
private List<Message> buildMessage(ProducerMessages producerMessages, Schema schema,
String producerName, TopicName topicName) {
List<ProducerMessage> messages;
List<Message> pulsarMessages = new ArrayList<>();
messages = producerMessages.getMessages();
for (ProducerMessage message : messages) {
MessageMetadata messageMetadata = new MessageMetadata();
messageMetadata.setProducerName(producerName);
messageMetadata.setPublishTime(System.currentTimeMillis());
messageMetadata.setSequenceId(message.getSequenceId());
if (null != message.getReplicationClusters()) {
messageMetadata.addAllReplicateTos(message.getReplicationClusters());
}
if (null != message.getProperties()) {
messageMetadata.addAllProperties(message.getProperties().entrySet().stream().map(entry -> {
org.apache.pulsar.common.api.proto.KeyValue keyValue =
new org.apache.pulsar.common.api.proto.KeyValue();
keyValue.setKey(entry.getKey());
keyValue.setValue(entry.getValue());
return keyValue;
}).collect(Collectors.toList()));
}
if (null != message.getKey()) {
// If has key schema, encode partition key, else use plain text.
if (schema.getSchemaInfo().getType() == SchemaType.KEY_VALUE) {
KeyValueSchemaImpl kvSchema = (KeyValueSchemaImpl) schema;
messageMetadata.setPartitionKey(
Base64.getEncoder().encodeToString(encodeWithSchema(message.getKey(),
kvSchema.getKeySchema())));
messageMetadata.setPartitionKeyB64Encoded(true);
} else {
messageMetadata.setPartitionKey(message.getKey());
messageMetadata.setPartitionKeyB64Encoded(false);
}
}
if (null != message.getEventTime() && !message.getEventTime().isEmpty()) {
messageMetadata.setEventTime(Long.parseLong(message.getEventTime()));
}
if (message.isDisableReplication()) {
messageMetadata.clearReplicateTo();
messageMetadata.addReplicateTo("__local__");
}
if (message.getDeliverAt() != 0 && messageMetadata.hasEventTime()) {
messageMetadata.setDeliverAtTime(message.getDeliverAt());
} else if (message.getDeliverAfterMs() != 0) {
messageMetadata.setDeliverAtTime(messageMetadata.getEventTime() + message.getDeliverAfterMs());
}
if (schema.getSchemaInfo().getType() == SchemaType.KEY_VALUE) {
KeyValueSchemaImpl kvSchema = (KeyValueSchemaImpl) schema;
pulsarMessages.add(MessageImpl.create(messageMetadata,
ByteBuffer.wrap(encodeWithSchema(message.getPayload(), kvSchema.getValueSchema())),
schema, topicName.toString()));
} else {
pulsarMessages.add(MessageImpl.create(messageMetadata,
ByteBuffer.wrap(encodeWithSchema(message.getPayload(), schema)), schema,
topicName.toString()));
}
}
return pulsarMessages;
} | 3.68 |
hadoop_AbfsHttpOperation_getConnProperty | /**
* Gets the connection request property for a key.
* @param key The request property key.
* @return request peoperty value.
*/
String getConnProperty(String key) {
return connection.getRequestProperty(key);
} | 3.68 |
hadoop_DockerCommandExecutor_executeDockerCommand | /**
* Execute a docker command and return the output.
*
* @param dockerCommand the docker command to run.
* @param containerId the id of the container.
* @param env environment for the container.
* @param privilegedOperationExecutor the privileged operations executor.
* @param disableFailureLogging disable logging for known rc failures.
* @return the output of the operation.
* @throws ContainerExecutionException if the operation fails.
*/
public static String executeDockerCommand(DockerCommand dockerCommand,
String containerId, Map<String, String> env,
PrivilegedOperationExecutor privilegedOperationExecutor,
boolean disableFailureLogging, Context nmContext)
throws ContainerExecutionException {
PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation(
dockerCommand, containerId, env, nmContext);
if (disableFailureLogging) {
dockerOp.disableFailureLogging();
}
LOG.debug("Running docker command: {}", dockerCommand);
try {
String result = privilegedOperationExecutor
.executePrivilegedOperation(null, dockerOp, null,
env, true, false);
if (result != null && !result.isEmpty()) {
result = result.trim();
}
return result;
} catch (PrivilegedOperationException e) {
throw new ContainerExecutionException("Docker operation failed",
e.getExitCode(), e.getOutput(), e.getErrorOutput());
}
} | 3.68 |
framework_VAbstractCalendarPanel_onMouseOut | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.MouseOutHandler#onMouseOut(com.google
* .gwt.event.dom.client.MouseOutEvent)
*/
@Override
public void onMouseOut(MouseOutEvent event) {
if (mouseTimer != null) {
mouseTimer.cancel();
}
} | 3.68 |
hbase_ClientSnapshotDescriptionUtils_assertSnapshotRequestIsValid | /**
* Check to make sure that the description of the snapshot requested is valid
* @param snapshot description of the snapshot
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names
*/
public static void assertSnapshotRequestIsValid(SnapshotProtos.SnapshotDescription snapshot)
throws IllegalArgumentException {
// make sure the snapshot name is valid
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true);
if (snapshot.hasTable()) {
// make sure the table name is valid, this will implicitly check validity
TableName tableName = TableName.valueOf(snapshot.getTable());
if (tableName.isSystemTable()) {
throw new IllegalArgumentException("System table snapshots are not allowed");
}
}
} | 3.68 |
framework_FocusableScrollPanel_getScrollPosition | /**
* Gets the vertical scroll position.
*
* @return the vertical scroll position, in pixels
*/
public int getScrollPosition() {
if (getElement().getPropertyJSO("_vScrollTop") != null) {
return getElement().getPropertyInt("_vScrollTop");
} else {
return getElement().getScrollTop();
}
} | 3.68 |
flink_ManuallyTriggeredScheduledExecutorService_trigger | /**
* Triggers the next queued runnable and executes it synchronously. This method throws an
* exception if no Runnable is currently queued.
*/
public void trigger() {
final Runnable next;
synchronized (queuedRunnables) {
next = queuedRunnables.removeFirst();
}
next.run();
} | 3.68 |
flink_KryoSerializer_getKryoInstance | /**
* Returns the Chill Kryo Serializer which is implicitly added to the classpath via
* flink-runtime. Falls back to the default Kryo serializer if it can't be found.
*
* @return The Kryo serializer instance.
*/
private Kryo getKryoInstance() {
try {
// check if ScalaKryoInstantiator is in class path (coming from Twitter's Chill
// library).
// This will be true if Flink's Scala API is used.
Class<?> chillInstantiatorClazz =
Class.forName("org.apache.flink.runtime.types.FlinkScalaKryoInstantiator");
Object chillInstantiator = chillInstantiatorClazz.newInstance();
// obtain a Kryo instance through Twitter Chill
Method m = chillInstantiatorClazz.getMethod("newKryo");
return (Kryo) m.invoke(chillInstantiator);
} catch (ClassNotFoundException
| InstantiationException
| NoSuchMethodException
| IllegalAccessException
| InvocationTargetException e) {
if (LOG.isDebugEnabled()) {
LOG.info("Kryo serializer scala extensions are not available.", e);
} else {
LOG.info("Kryo serializer scala extensions are not available.");
}
Kryo.DefaultInstantiatorStrategy initStrategy = new Kryo.DefaultInstantiatorStrategy();
initStrategy.setFallbackInstantiatorStrategy(new StdInstantiatorStrategy());
Kryo kryo = new Kryo();
kryo.setInstantiatorStrategy(initStrategy);
if (flinkChillPackageRegistrar != null) {
flinkChillPackageRegistrar.registerSerializers(kryo);
}
return kryo;
}
} | 3.68 |
framework_Navigator_addView | /**
* Registers a view class for a view name.
* <p>
* Registering another view with a name that is already registered
* overwrites the old registration of the same type.
* <p>
* A new view instance is created every time a view is requested.
*
* @param viewName
* String that identifies a view (not null nor empty string)
* @param viewClass
* {@link View} class to instantiate when a view is requested
* (not null)
*/
public void addView(String viewName, Class<? extends View> viewClass) {
// Check parameters
if (viewName == null || viewClass == null) {
throw new IllegalArgumentException(
"view and viewClass must be non-null");
}
removeView(viewName);
addProvider(new ClassBasedViewProvider(viewName, viewClass));
} | 3.68 |
dubbo_RpcContext_asyncCall | /**
* one way async call, send request only, and result is not required
*
* @param runnable
*/
public void asyncCall(Runnable runnable) {
try {
setAttachment(Constants.RETURN_KEY, Boolean.FALSE.toString());
runnable.run();
} catch (Throwable e) {
// FIXME should put exception in future?
throw new RpcException("oneway call error ." + e.getMessage(), e);
} finally {
removeAttachment(Constants.RETURN_KEY);
}
} | 3.68 |
hadoop_StageConfig_getIoProcessors | /**
* Submitter for doing IO against the store other than
* manifest processing.
*/
public TaskPool.Submitter getIoProcessors() {
return ioProcessors;
} | 3.68 |
querydsl_JDOQueryFactory_selectFrom | /**
* Create a new {@link JDOQuery} instance with the given projection
*
* @param expr projection and source
* @param <T>
* @return select(expr).from(expr)
*/
public <T> JDOQuery<T> selectFrom(EntityPath<T> expr) {
return select(expr).from(expr);
} | 3.68 |
hbase_Client_removeExtraHeader | /**
* Remove an extra header.
*/
public void removeExtraHeader(final String name) {
extraHeaders.remove(name);
} | 3.68 |
hbase_ReplicationSourceManager_refreshSources | /**
* Close the previous replication sources of this peer id and open new sources to trigger the new
* replication state changes or new replication config changes. Here we don't need to change
* replication queue storage and only to enqueue all logs to the new replication source
* @param peerId the id of the replication peer
*/
public void refreshSources(String peerId) throws ReplicationException, IOException {
String terminateMessage = "Peer " + peerId
+ " state or config changed. Will close the previous replication source and open a new one";
ReplicationPeer peer = replicationPeers.getPeer(peerId);
ReplicationQueueId queueId = new ReplicationQueueId(server.getServerName(), peerId);
ReplicationSourceInterface src;
// synchronized on latestPaths to avoid missing the new log
synchronized (this.latestPaths) {
ReplicationSourceInterface toRemove = this.sources.remove(peerId);
if (toRemove != null) {
LOG.info("Terminate replication source for " + toRemove.getPeerId());
// Do not clear metrics
toRemove.terminate(terminateMessage, null, false);
}
src = createRefreshedSource(queueId, peer);
this.sources.put(peerId, src);
for (NavigableSet<String> walsByGroup : walsById.get(queueId).values()) {
walsByGroup.forEach(wal -> src.enqueueLog(new Path(this.logDir, wal)));
}
}
LOG.info("Startup replication source for " + src.getPeerId());
src.startup();
List<ReplicationSourceInterface> toStartup = new ArrayList<>();
// synchronized on oldsources to avoid race with NodeFailoverWorker
synchronized (this.oldsources) {
List<ReplicationQueueId> oldSourceQueueIds = new ArrayList<>();
for (Iterator<ReplicationSourceInterface> iter = this.oldsources.iterator(); iter
.hasNext();) {
ReplicationSourceInterface oldSource = iter.next();
if (oldSource.getPeerId().equals(peerId)) {
oldSourceQueueIds.add(oldSource.getQueueId());
oldSource.terminate(terminateMessage);
iter.remove();
}
}
for (ReplicationQueueId oldSourceQueueId : oldSourceQueueIds) {
ReplicationSourceInterface recoveredReplicationSource =
createRefreshedSource(oldSourceQueueId, peer);
this.oldsources.add(recoveredReplicationSource);
for (NavigableSet<Path> walsByGroup : walsByIdRecoveredQueues.get(oldSourceQueueId)
.values()) {
walsByGroup.forEach(wal -> recoveredReplicationSource.enqueueLog(wal));
}
toStartup.add(recoveredReplicationSource);
}
}
for (ReplicationSourceInterface replicationSource : toStartup) {
replicationSource.startup();
}
} | 3.68 |
graphhopper_AbstractSRTMElevationProvider_calcIntKey | // use int key instead of string for lower memory usage
int calcIntKey(double lat, double lon) {
// we could use LinearKeyAlgo but this is simpler as we only need integer precision:
return (down(lat) + 90) * 1000 + down(lon) + 180;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithConcatenation | /**
* Tests concatenation in a select with {@linkplain FieldReference}s and
* {@linkplain FieldLiteral}s.
*/
@Test
public void testSelectWithConcatenation() {
SelectStatement stmt = new SelectStatement(new ConcatenatedField(new FieldReference("assetDescriptionLine1"), new FieldLiteral(
" "), new FieldReference("assetDescriptionLine2")).as("assetDescription")).from(new TableReference("schedule"));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedSelectWithConcatenation1(), result);
stmt = new SelectStatement(new ConcatenatedField(new FieldReference("assetDescriptionLine1"), new FieldLiteral("XYZ"),
new FieldReference("assetDescriptionLine2")).as("assetDescription")).from(new TableReference("schedule"));
result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedSelectWithConcatenation2(), result);
} | 3.68 |
hbase_JmxCacheBuster_clearJmxCache | /**
* For JMX to forget about all previously exported metrics.
*/
public static void clearJmxCache() {
if (LOG.isTraceEnabled()) {
LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception()));
}
// If there are more then 100 ms before the executor will run then everything should be merged.
ScheduledFuture future = fut.get();
if ((future != null && (!future.isDone() && future.getDelay(TimeUnit.MILLISECONDS) > 100))) {
// BAIL OUT
return;
}
if (stopped.get()) {
return;
}
future = executor.getExecutor().schedule(new JmxCacheBusterRunnable(), 5, TimeUnit.SECONDS);
fut.set(future);
} | 3.68 |
flink_FsStateBackend_getWriteBufferSize | /**
* Gets the write buffer size for created checkpoint stream.
*
* <p>If not explicitly configured, this is the default value of {@link
* CheckpointingOptions#FS_WRITE_BUFFER_SIZE}.
*
* @return The write buffer size, in bytes.
*/
public int getWriteBufferSize() {
return writeBufferSize >= 0
? writeBufferSize
: CheckpointingOptions.FS_WRITE_BUFFER_SIZE.defaultValue();
} | 3.68 |
hadoop_FederationNamespaceInfo_getClusterId | /**
* The HDFS cluster id for this namespace.
*
* @return Cluster identifier.
*/
public String getClusterId() {
return this.clusterId;
} | 3.68 |
hadoop_CommitContext_getIOStatisticsContext | /**
* IOStatistics context of the created thread.
* @return the IOStatistics.
*/
public IOStatisticsContext getIOStatisticsContext() {
return ioStatisticsContext;
} | 3.68 |
flink_ExecNodeUtil_createOneInputTransformation | /** Create a {@link OneInputTransformation} with memoryBytes. */
public static <I, O> OneInputTransformation<I, O> createOneInputTransformation(
Transformation<I> input,
TransformationMetadata transformationMeta,
StreamOperatorFactory<O> operatorFactory,
TypeInformation<O> outputType,
int parallelism,
long memoryBytes,
boolean parallelismConfigured) {
OneInputTransformation<I, O> transformation =
new OneInputTransformation<>(
input,
transformationMeta.getName(),
operatorFactory,
outputType,
parallelism,
parallelismConfigured);
setManagedMemoryWeight(transformation, memoryBytes);
transformationMeta.fill(transformation);
return transformation;
} | 3.68 |
hbase_RequestConverter_buildMoveRegionRequest | /**
* Create a protocol buffer MoveRegionRequest
* @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
ServerName destServerName) {
MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder();
builder
.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, encodedRegionName));
if (destServerName != null) {
builder.setDestServerName(ProtobufUtil.toServerName(destServerName));
}
return builder.build();
} | 3.68 |
pulsar_SingletonCleanerListener_objectMapperFactoryClearCaches | // Call ObjectMapperFactory.clearCaches() using reflection to clear up classes held in
// the singleton Jackson ObjectMapper instances
private static void objectMapperFactoryClearCaches() {
if (OBJECTMAPPERFACTORY_CLEARCACHES_METHOD != null) {
try {
OBJECTMAPPERFACTORY_CLEARCACHES_METHOD.invoke(null);
} catch (IllegalAccessException | InvocationTargetException e) {
LOG.warn("Cannot clean singleton ObjectMapper caches", e);
}
}
} | 3.68 |
hudi_TransactionUtils_getLastCompletedTxnInstantAndMetadata | /**
* Get the last completed transaction hoodie instant and {@link HoodieCommitMetadata#getExtraMetadata()}.
*
* @param metaClient
* @return
*/
public static Option<Pair<HoodieInstant, Map<String, String>>> getLastCompletedTxnInstantAndMetadata(
HoodieTableMetaClient metaClient) {
Option<HoodieInstant> hoodieInstantOption = metaClient.getActiveTimeline().getCommitsTimeline()
.filterCompletedInstants().lastInstant();
return getHoodieInstantAndMetaDataPair(metaClient, hoodieInstantOption);
} | 3.68 |
hadoop_NamenodeStatusReport_setNamenodeInfo | /**
* Set the namenode blocks information.
*
* @param numCorruptFiles number of corrupt files.
* @param numOfMissingBlocksWithReplicationFactorOne number of missing
* blocks with rep one.
* @param highestPriorityLowRedundancyRepBlocks number of high priority low
* redundancy rep blocks.
* @param highPriorityLowRedundancyECBlocks number of high priority low
* redundancy EC blocks.
*/
public void setNamenodeInfo(int numCorruptFiles,
long numOfMissingBlocksWithReplicationFactorOne,
long highestPriorityLowRedundancyRepBlocks,
long highPriorityLowRedundancyECBlocks) {
this.corruptFilesCount = numCorruptFiles;
this.numberOfMissingBlocksWithReplicationFactorOne =
numOfMissingBlocksWithReplicationFactorOne;
this.highestPriorityLowRedundancyReplicatedBlocks =
highestPriorityLowRedundancyRepBlocks;
this.highestPriorityLowRedundancyECBlocks =
highPriorityLowRedundancyECBlocks;
} | 3.68 |
hadoop_DatanodeLocalInfo_getSoftwareVersion | /** get software version */
public String getSoftwareVersion() {
return this.softwareVersion;
} | 3.68 |
pulsar_SchemaDataValidator_validateSchemaData | /**
* Validate if the schema data is well formed.
*
* @param schemaData schema data to validate
* @throws InvalidSchemaDataException if the schema data is not in a valid form.
*/
static void validateSchemaData(SchemaData schemaData) throws InvalidSchemaDataException {
switch (schemaData.getType()) {
case AVRO:
case JSON:
case PROTOBUF:
StructSchemaDataValidator.of().validate(schemaData);
break;
case PROTOBUF_NATIVE:
ProtobufNativeSchemaDataValidator.of().validate(schemaData);
break;
case STRING:
StringSchemaDataValidator.of().validate(schemaData);
break;
case BOOLEAN:
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT:
case DOUBLE:
case DATE:
case TIME:
case TIMESTAMP:
case INSTANT:
case LOCAL_DATE:
case LOCAL_TIME:
case LOCAL_DATE_TIME:
PrimitiveSchemaDataValidator.of().validate(schemaData);
break;
case NONE:
case BYTES:
// `NONE` and `BYTES` schema is not stored
break;
case AUTO:
case AUTO_CONSUME:
case AUTO_PUBLISH:
throw new InvalidSchemaDataException(
"Schema " + schemaData.getType() + " is a client-side schema type");
case KEY_VALUE:
KeyValue<SchemaData, SchemaData> kvSchema =
KeyValueSchemaCompatibilityCheck.decodeKeyValueSchemaData(schemaData);
validateSchemaData(kvSchema.getKey());
validateSchemaData(kvSchema.getValue());
break;
default:
throw new InvalidSchemaDataException("Unknown schema type : " + schemaData.getType());
}
} | 3.68 |
flink_JobSchedulingPlan_empty | /**
* Create an empty {@link JobSchedulingPlan} with no information about vertices or allocations.
*/
public static JobSchedulingPlan empty() {
return new JobSchedulingPlan(VertexParallelism.empty(), Collections.emptyList());
} | 3.68 |
querydsl_GeometryExpression_srid | /**
* Returns the Spatial Reference System ID for this geometric object. This will normally be a
* foreign key to an index of reference systems stored in either the same or some other datastore.
*
* @return SRID
*/
public NumberExpression<Integer> srid() {
if (srid == null) {
srid = Expressions.numberOperation(Integer.class, SpatialOps.SRID, mixin);
}
return srid;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.