name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_HiveParserSqlFunctionConverter_getName | // TODO: this is not valid. Function names for built-in UDFs are specified in
// FunctionRegistry, and only happen to match annotations. For user UDFs, the
// name is what user specifies at creation time (annotation can be absent,
// different, or duplicate some other function).
private static String getName(GenericUDF hiveUDF) {
String udfName = null;
if (hiveUDF instanceof GenericUDFBridge) {
udfName = hiveUDF.getUdfName();
} else {
Class<? extends GenericUDF> udfClass = hiveUDF.getClass();
Description udfAnnotation = udfClass.getAnnotation(Description.class);
if (udfAnnotation != null) {
udfName = udfAnnotation.name();
if (udfName != null) {
String[] aliases = udfName.split(",");
if (aliases.length > 0) {
udfName = aliases[0];
}
}
}
if (udfName == null || udfName.isEmpty()) {
udfName = hiveUDF.getClass().getName();
int indx = udfName.lastIndexOf(".");
if (indx >= 0) {
indx += 1;
udfName = udfName.substring(indx);
}
}
}
return udfName;
} | 3.68 |
hbase_TableDescriptorBuilder_getValues | /**
* Getter for fetching an unmodifiable {@link #values} map.
* @return unmodifiable map {@link #values}.
* @see #values
*/
@Override
public Map<Bytes, Bytes> getValues() {
// shallow pointer copy
return Collections.unmodifiableMap(values);
} | 3.68 |
druid_SQLMethodInvokeExpr_getParameters | /**
* instead of getArguments
*
* @deprecated
*/
public List<SQLExpr> getParameters() {
return this.arguments;
} | 3.68 |
framework_AbstractComponentConnector_onDragSourceDetached | /**
* Invoked when a {@link DragSourceExtensionConnector} has been removed from
* this component.
* <p>
* By default, does nothing.
* <p>
* This is a framework internal method, and should not be invoked manually.
*
* @since 8.1
* @see #onDragSourceAttached()
*/
public void onDragSourceDetached() {
} | 3.68 |
flink_CheckpointConfig_setTolerableCheckpointFailureNumber | /**
* This defines how many consecutive checkpoint failures will be tolerated, before the whole job
* is failed over. The default value is `0`, which means no checkpoint failures will be
* tolerated, and the job will fail on first reported checkpoint failure.
*/
public void setTolerableCheckpointFailureNumber(int tolerableCheckpointFailureNumber) {
if (tolerableCheckpointFailureNumber < 0) {
throw new IllegalArgumentException(
"The tolerable failure checkpoint number must be non-negative.");
}
configuration.set(
ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER,
tolerableCheckpointFailureNumber);
} | 3.68 |
flink_FlinkContainersSettings_build | /**
* Returns a {@code FlinkContainersConfig} built from the parameters previously set.
*
* @return A {@code FlinkContainersConfig} built with parameters of this {@code
* FlinkContainersConfig.Builder}.
*/
public FlinkContainersSettings build() {
return new FlinkContainersSettings(this);
} | 3.68 |
hadoop_HdfsConfiguration_init | /**
* This method is here so that when invoked, HdfsConfiguration is class-loaded
* if it hasn't already been previously loaded. Upon loading the class, the
* static initializer block above will be executed to add the deprecated keys
* and to add the default resources. It is safe for this method to be called
* multiple times as the static initializer block will only get invoked once.
*
* This replaces the previously, dangerous practice of other classes calling
* Configuration.addDefaultResource("hdfs-default.xml") directly without
* loading this class first, thereby skipping the key deprecation.
*/
public static void init() {
} | 3.68 |
framework_SharedUtil_propertyIdToHumanFriendly | /**
* Converts a property id to a human friendly format. Handles nested
* properties by only considering the last part, e.g. "address.streetName"
* is equal to "streetName" for this method.
*
* @since 7.4
* @param propertyId
* The propertyId to format
* @return A human friendly version of the property id
*/
public static String propertyIdToHumanFriendly(Object propertyId) {
String string = propertyId.toString();
if (string.isEmpty()) {
return "";
}
// For nested properties, only use the last part
int dotLocation = string.lastIndexOf('.');
if (dotLocation > 0 && dotLocation < string.length() - 1) {
string = string.substring(dotLocation + 1);
}
if (string.matches("^[0-9A-Z_]+$")) {
// Deal with UPPER_CASE_PROPERTY_IDS
return upperCaseUnderscoreToHumanFriendly(string);
}
return camelCaseToHumanFriendly(string);
} | 3.68 |
flink_JavaFieldPredicates_isStatic | /**
* Match the static modifier of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if and only if the tested {@link
* JavaField} has the static modifier.
*/
public static DescribedPredicate<JavaField> isStatic() {
return DescribedPredicate.describe(
"static", field -> field.getModifiers().contains(JavaModifier.STATIC));
} | 3.68 |
framework_ColorPickerPopup_setHSVTabVisible | /**
* Sets the HSV tab visibility.
*
* @param visible
* The visibility of the HSV tab
*/
public void setHSVTabVisible(boolean visible) {
if (visible && !isTabVisible(hsvTab)) {
tabs.addTab(hsvTab, "HSV", null);
checkIfTabsNeeded();
} else if (!visible && isTabVisible(hsvTab)) {
tabs.removeComponent(hsvTab);
checkIfTabsNeeded();
}
} | 3.68 |
hadoop_AbstractS3ACommitter_getDestinationFS | /**
* Get the destination filesystem from the output path and the configuration.
* @param out output path
* @param config job/task config
* @return the associated FS
* @throws PathCommitException output path isn't to an S3A FS instance.
* @throws IOException failure to instantiate the FS.
*/
protected FileSystem getDestinationFS(Path out, Configuration config)
throws IOException {
return getS3AFileSystem(out, config,
requiresDelayedCommitOutputInFileSystem());
} | 3.68 |
flink_ArchivedExecutionGraph_createSparseArchivedExecutionGraph | /**
* Create a sparse ArchivedExecutionGraph for a job. Most fields will be empty, only job status
* and error-related fields are set.
*/
public static ArchivedExecutionGraph createSparseArchivedExecutionGraph(
JobID jobId,
String jobName,
JobStatus jobStatus,
@Nullable Throwable throwable,
@Nullable JobCheckpointingSettings checkpointingSettings,
long initializationTimestamp) {
return createSparseArchivedExecutionGraph(
jobId,
jobName,
jobStatus,
Collections.emptyMap(),
Collections.emptyList(),
throwable,
checkpointingSettings,
initializationTimestamp);
} | 3.68 |
flink_DataStream_printToErr | /**
* Writes a DataStream to the standard error stream (stderr).
*
* <p>For each element of the DataStream the result of {@link Object#toString()} is written.
*
* <p>NOTE: This will print to stderr on the machine where the code is executed, i.e. the Flink
* worker.
*
* @param sinkIdentifier The string to prefix the output with.
* @return The closed DataStream.
*/
@PublicEvolving
public DataStreamSink<T> printToErr(String sinkIdentifier) {
PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, true);
return addSink(printFunction).name("Print to Std. Err");
} | 3.68 |
hadoop_ActiveAuditManagerS3A_prune | /**
* Prune all null weak references, calling the referenceLost
* callback for each one.
*
* non-atomic and non-blocking.
* @return the number of entries pruned.
*/
@VisibleForTesting
int prune() {
return activeSpanMap.prune();
} | 3.68 |
flink_SplitEnumeratorContext_registeredReadersOfAttempts | /**
* Get the currently registered readers of all the subtask attempts. The mapping is from subtask
* id to a map which maps an attempt to its reader info.
*
* @return the currently registered readers.
*/
default Map<Integer, Map<Integer, ReaderInfo>> registeredReadersOfAttempts() {
throw new UnsupportedOperationException();
} | 3.68 |
rocketmq-connect_ClusterManagementServiceImpl_prepare | /**
* Preparation before startup
*
* @param connectConfig
*/
private void prepare(WorkerConfig connectConfig) {
String consumerGroup = this.defaultMQPullConsumer.getConsumerGroup();
Set<String> consumerGroupSet = ConnectUtil.fetchAllConsumerGroupList(connectConfig);
if (!consumerGroupSet.contains(consumerGroup)) {
log.info("try to create consumerGroup: {}!", consumerGroup);
ConnectUtil.createSubGroup(connectConfig, consumerGroup);
}
String clusterStoreTopic = connectConfig.getClusterStoreTopic();
if (!ConnectUtil.isTopicExist(connectConfig, clusterStoreTopic)) {
log.info("try to create cluster store topic: {}!", clusterStoreTopic);
TopicConfig topicConfig = new TopicConfig(clusterStoreTopic, 1, 1, 6);
ConnectUtil.createTopic(connectConfig, topicConfig);
}
} | 3.68 |
framework_Table_setColumnHeader | /**
* Sets the column header for the specified column.
*
* @param propertyId
* the propertyId identifying the column.
* @param header
* the header to set.
*/
public void setColumnHeader(Object propertyId, String header) {
if (header == null) {
columnHeaders.remove(propertyId);
} else {
columnHeaders.put(propertyId, header);
}
markAsDirty();
} | 3.68 |
framework_VaadinService_getCurrentRequest | /**
* Gets the currently processed Vaadin request. The current request is
* automatically defined when the request is started. The current request
* can not be used in e.g. background threads because of the way server
* implementations reuse request instances.
*
* @return the current Vaadin request instance if available, otherwise
* <code>null</code>
*
* @see #setCurrentInstances(VaadinRequest, VaadinResponse)
*/
public static VaadinRequest getCurrentRequest() {
return VaadinRequest.getCurrent();
} | 3.68 |
framework_ExternalResource_setMIMEType | /**
* Sets the MIME type of the resource.
*/
public void setMIMEType(String mimeType) {
this.mimeType = mimeType;
} | 3.68 |
hadoop_PartitionResourcesInfo_getUserAmLimit | /**
* @return the userAmLimit
*/
public ResourceInfo getUserAmLimit() {
return userAmLimit;
} | 3.68 |
hadoop_AMRMClientAsyncImpl_serviceStop | /**
* Tells the heartbeat and handler threads to stop and waits for them to
* terminate.
*/
@Override
protected void serviceStop() throws Exception {
keepRunning = false;
heartbeatThread.interrupt();
try {
heartbeatThread.join();
} catch (InterruptedException ex) {
LOG.error("Error joining with heartbeat thread", ex);
}
client.stop();
handlerThread.interrupt();
super.serviceStop();
} | 3.68 |
morf_AbstractSqlDialectTest_expectedLeftTrim | /**
* @return The expected SQL for a Left Trim
*/
protected String expectedLeftTrim() {
return "SELECT LTRIM(field1) FROM " + tableName("schedule");
} | 3.68 |
hudi_FlinkOptions_getPropertiesWithPrefix | /**
* Collects the config options that start with specified prefix {@code prefix} into a 'key'='value' list.
*/
public static Map<String, String> getPropertiesWithPrefix(Map<String, String> options, String prefix) {
final Map<String, String> hoodieProperties = new HashMap<>();
if (hasPropertyOptions(options, prefix)) {
options.keySet().stream()
.filter(key -> key.startsWith(prefix))
.forEach(key -> {
final String value = options.get(key);
final String subKey = key.substring(prefix.length());
hoodieProperties.put(subKey, value);
});
}
return hoodieProperties;
} | 3.68 |
flink_CheckpointConfig_enableApproximateLocalRecovery | /**
* Enables the approximate local recovery mode.
*
* <p>In this recovery mode, when a task fails, the entire downstream of the tasks (including
* the failed task) restart.
*
* <p>Notice that 1. Approximate recovery may lead to data loss. The amount of data which leads
* the failed task from the state of the last completed checkpoint to the state when the task
* fails is lost. 2. In the next version, we will support restarting the set of failed set of
* tasks only. In this version, we only support downstream restarts when a task fails. 3. It is
* only an internal feature for now.
*
* @param enabled Flag to indicate whether approximate local recovery is enabled .
*/
@Experimental
public void enableApproximateLocalRecovery(boolean enabled) {
configuration.set(ExecutionCheckpointingOptions.APPROXIMATE_LOCAL_RECOVERY, enabled);
} | 3.68 |
pulsar_FunctionRuntimeManager_getFunctionStats | /**
* Get stats of all function instances.
*
* @param tenant the tenant the function belongs to
* @param namespace the namespace the function belongs to
* @param functionName the function name
* @return a list of function statuses
* @throws PulsarAdminException
*/
public FunctionStatsImpl getFunctionStats(String tenant, String namespace,
String functionName, URI uri) throws PulsarAdminException {
Collection<Assignment> assignments = this.findFunctionAssignments(tenant, namespace, functionName);
FunctionStatsImpl functionStats = new FunctionStatsImpl();
if (assignments.isEmpty()) {
return functionStats;
}
if (runtimeFactory.externallyManaged()) {
Assignment assignment = assignments.iterator().next();
boolean isOwner = this.workerConfig.getWorkerId().equals(assignment.getWorkerId());
if (isOwner) {
int parallelism = assignment.getInstance().getFunctionMetaData().getFunctionDetails().getParallelism();
for (int i = 0; i < parallelism; ++i) {
FunctionInstanceStatsDataImpl functionInstanceStatsData =
getFunctionInstanceStats(tenant, namespace, functionName, i, null);
FunctionInstanceStatsImpl functionInstanceStats = new FunctionInstanceStatsImpl();
functionInstanceStats.setInstanceId(i);
functionInstanceStats.setMetrics(functionInstanceStatsData);
functionStats.addInstance(functionInstanceStats);
}
} else {
// find the hostname/port of the worker who is the owner
List<WorkerInfo> workerInfoList = this.membershipManager.getCurrentMembership();
WorkerInfo workerInfo = null;
for (WorkerInfo entry : workerInfoList) {
if (assignment.getWorkerId().equals(entry.getWorkerId())) {
workerInfo = entry;
}
}
if (workerInfo == null) {
return functionStats;
}
if (uri == null) {
throw new WebApplicationException(Response.serverError()
.status(Status.INTERNAL_SERVER_ERROR).build());
} else {
URI redirect = UriBuilder.fromUri(uri)
.host(workerInfo.getWorkerHostname()).port(workerInfo.getPort()).build();
throw new WebApplicationException(Response.temporaryRedirect(redirect).build());
}
}
} else {
for (Assignment assignment : assignments) {
boolean isOwner = this.workerConfig.getWorkerId().equals(assignment.getWorkerId());
FunctionInstanceStatsDataImpl functionInstanceStatsData;
if (isOwner) {
functionInstanceStatsData = getFunctionInstanceStats(tenant, namespace, functionName,
assignment.getInstance().getInstanceId(), null);
} else {
functionInstanceStatsData =
(FunctionInstanceStatsDataImpl) this.functionAdmin.functions().getFunctionStats(
assignment.getInstance().getFunctionMetaData().getFunctionDetails().getTenant(),
assignment.getInstance().getFunctionMetaData().getFunctionDetails().getNamespace(),
assignment.getInstance().getFunctionMetaData().getFunctionDetails().getName(),
assignment.getInstance().getInstanceId());
}
FunctionInstanceStatsImpl functionInstanceStats = new FunctionInstanceStatsImpl();
functionInstanceStats.setInstanceId(assignment.getInstance().getInstanceId());
functionInstanceStats.setMetrics(functionInstanceStatsData);
functionStats.addInstance(functionInstanceStats);
}
}
return functionStats.calculateOverall();
} | 3.68 |
hbase_MetaTableAccessor_updateLocation | /**
* Updates the location of the specified region to be the specified server.
* <p>
* Connects to the specified server which should be hosting the specified catalog region name to
* perform the edit.
* @param connection connection we're using
* @param regionInfo region to update location of
* @param sn Server name
* @param openSeqNum the latest sequence number obtained when the region was open
* @param masterSystemTime wall clock time from master if passed in the open region RPC
* @throws IOException In particular could throw {@link java.net.ConnectException} if the server
* is down on other end.
*/
private static void updateLocation(Connection connection, RegionInfo regionInfo, ServerName sn,
long openSeqNum, long masterSystemTime) throws IOException {
// region replicas are kept in the primary region's row
Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), masterSystemTime);
addRegionInfo(put, regionInfo);
addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
putToMetaTable(connection, put);
LOG.info("Updated row {} with server=", regionInfo.getRegionNameAsString(), sn);
} | 3.68 |
druid_SQLCommitStatement_getChain | // mysql
public Boolean getChain() {
return chain;
} | 3.68 |
flink_ProcessorArchitecture_getAddressSize | /** Gets the address size of the memory (32 bit, 64 bit). */
public MemoryAddressSize getAddressSize() {
return addressSize;
} | 3.68 |
dubbo_StringUtils_decodeHexByte | /**
* Decode a 2-digit hex byte from within a string.
*/
public static byte decodeHexByte(CharSequence s, int pos) {
int hi = decodeHexNibble(s.charAt(pos));
int lo = decodeHexNibble(s.charAt(pos + 1));
if (hi == -1 || lo == -1) {
throw new IllegalArgumentException(
String.format("invalid hex byte '%s' at index %d of '%s'", s.subSequence(pos, pos + 2), pos, s));
}
return (byte) ((hi << 4) + lo);
} | 3.68 |
hbase_ZKUtil_getPath | /** Returns path to znode where the ZKOp will occur */
public String getPath() {
return path;
} | 3.68 |
hbase_AdvancedScanResultConsumer_onHeartbeat | /**
* Indicate that there is a heartbeat message but we have not cumulated enough cells to call
* {@link #onNext(Result[], ScanController)}.
* <p>
* Note that this method will always be called when RS returns something to us but we do not have
* enough cells to call {@link #onNext(Result[], ScanController)}. Sometimes it may not be a
* 'heartbeat' message for RS, for example, we have a large row with many cells and size limit is
* exceeded before sending all the cells for this row. For RS it does send some data to us and the
* time limit has not been reached, but we can not return the data to client so here we call this
* method to tell client we have already received something.
* <p>
* This method give you a chance to terminate a slow scan operation.
* @param controller used to suspend or terminate the scan. Notice that the {@code controller}
* instance is only valid within the scope of onHeartbeat method. You can only
* call its method in onHeartbeat, do NOT store it and call it later outside
* onHeartbeat.
*/
default void onHeartbeat(ScanController controller) {
} | 3.68 |
hadoop_AssumedRoleCredentialProvider_buildSessionName | /**
* Build the session name from the current user's shortname.
* @return a string for the session name.
* @throws IOException failure to get the current user
*/
static String buildSessionName() throws IOException {
return sanitize(UserGroupInformation.getCurrentUser()
.getShortUserName());
} | 3.68 |
hmily_NacosConfig_fileName | /**
* File name string.
*
* @return the string
*/
public String fileName() {
return dataId + "." + fileExtension;
} | 3.68 |
hadoop_ZStandardCompressor_getBytesWritten | /**
* Returns the total number of compressed bytes output so far.
*
* @return the total (non-negative) number of compressed bytes output so far
*/
@Override
public long getBytesWritten() {
checkStream();
return bytesWritten;
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableNumRunningFlushes | /** Returns the number of currently running flushes. */
public void enableNumRunningFlushes() {
this.properties.add(RocksDBProperty.NumRunningFlushes.getRocksDBProperty());
} | 3.68 |
hadoop_TimelineReaderWebServicesUtils_parseEventFilters | /**
* Parse a delimited string and convert it into a set of strings. For
* instance, if delimiter is ",", then the string should be represented as
* "value1,value2,value3".
* @param str delimited string.
* @param delimiter string is delimited by this delimiter.
* @return set of strings.
*/
static TimelineFilterList parseEventFilters(String expr)
throws TimelineParseException {
return parseFilters(new TimelineParserForExistFilters(expr,
TimelineParseConstants.COMMA_CHAR));
} | 3.68 |
hadoop_OBSFileSystem_isEnableTrash | /**
* Return a flag that indicates if fast delete is enabled.
*
* @return the flag
*/
boolean isEnableTrash() {
return enableTrash;
} | 3.68 |
open-banking-gateway_FintechConsentAccessImpl_delete | /**
* Deletes consent from the database.
*/
@Override
public void delete(ProtocolFacingConsent consent) {
consents.delete(((ProtocolFacingConsentImpl) consent).getConsent());
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_generateMissingFavoredNode | /*
* Generates a missing favored node based on the input favored nodes. This helps to generate new
* FN when there is already 2 FN and we need a third one. For eg, while generating new FN for
* split daughters after inheriting 2 FN from the parent. If the cluster has only one rack it
* generates from the same rack. If the cluster has multiple racks, then it ensures the new FN
* respects the rack constraints similar to HDFS. For eg: if there are 3 FN, they will be spread
* across 2 racks.
*/
public ServerName generateMissingFavoredNode(List<ServerName> favoredNodes) throws IOException {
if (this.uniqueRackList.size() == 1) {
return generateMissingFavoredNodeSingleRack(favoredNodes, null);
} else {
return generateMissingFavoredNodeMultiRack(favoredNodes, null);
}
} | 3.68 |
flink_CoGroupOperatorBase_getGroupOrderForInputOne | /**
* Gets the order of elements within a group for the first input. If no such order has been set,
* this method returns null.
*
* @return The group order for the first input.
*/
public Ordering getGroupOrderForInputOne() {
return getGroupOrder(0);
} | 3.68 |
flink_MemorySize_parse | /**
* Parses the given string with a default unit.
*
* @param text The string to parse.
* @param defaultUnit specify the default unit.
* @return The parsed MemorySize.
* @throws IllegalArgumentException Thrown, if the expression cannot be parsed.
*/
public static MemorySize parse(String text, MemoryUnit defaultUnit)
throws IllegalArgumentException {
if (!hasUnit(text)) {
return parse(text + defaultUnit.getUnits()[0]);
}
return parse(text);
} | 3.68 |
pulsar_WebServer_addRestResource | /**
* Add a REST resource to the servlet context.
*
* @param basePath The base path for the resource.
* @param attribute An attribute associated with the resource.
* @param attributeValue The value of the attribute.
* @param resourceClass The class representing the resource.
* @param requireAuthentication A boolean indicating whether authentication is required for this resource.
*/
public void addRestResource(String basePath, String attribute, Object attributeValue,
Class<?> resourceClass, boolean requireAuthentication) {
ResourceConfig config = new ResourceConfig();
config.register(resourceClass);
config.register(JsonMapperProvider.class);
ServletHolder servletHolder = new ServletHolder(new ServletContainer(config));
servletHolder.setAsyncSupported(true);
// This method has not historically checked for existing paths, so we don't check here either. The
// method call is added to reduce code duplication.
addServlet(basePath, servletHolder, Collections.singletonList(Pair.of(attribute, attributeValue)),
requireAuthentication, false);
} | 3.68 |
streampipes_HTMLFetcher_fetch | /**
* Fetches the document at the given URL, using {@link URLConnection}.
*
* @param url
* @return
* @throws IOException
*/
public static HTMLDocument fetch(final URL url) throws IOException {
final URLConnection conn = url.openConnection();
final String ct = conn.getContentType();
if (ct == null || !(ct.equals("text/html") || ct.startsWith("text/html;"))) {
throw new IOException("Unsupported content type: " + ct);
}
Charset cs = Charset.forName("Cp1252");
if (ct != null) {
Matcher m = PAT_CHARSET.matcher(ct);
if (m.find()) {
final String charset = m.group(1);
try {
cs = Charset.forName(charset);
} catch (UnsupportedCharsetException e) {
// keep default
}
}
}
InputStream in = conn.getInputStream();
final String encoding = conn.getContentEncoding();
if (encoding != null) {
if ("gzip".equalsIgnoreCase(encoding)) {
in = new GZIPInputStream(in);
} else {
System.err.println("WARN: unsupported Content-Encoding: " + encoding);
}
}
ByteArrayOutputStream bos = new ByteArrayOutputStream();
byte[] buf = new byte[4096];
int r;
while ((r = in.read(buf)) != -1) {
bos.write(buf, 0, r);
}
in.close();
final byte[] data = bos.toByteArray();
return new HTMLDocument(data, cs);
} | 3.68 |
hadoop_SnappyDecompressor_setInput | /**
* Sets input data for decompression.
* This should be called if and only if {@link #needsInput()} returns
* <code>true</code> indicating that more input data is required.
* (Both native and non-native versions of various Decompressors require
* that the data passed in via <code>b[]</code> remain unmodified until
* the caller is explicitly notified--via {@link #needsInput()}--that the
* buffer may be safely modified. With this requirement, an extra
* buffer-copy can be avoided.)
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
setInputFromSavedData();
// Reinitialize snappy's output direct-buffer
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
} | 3.68 |
morf_SqlDialect_columns | /**
* @see org.alfasoftware.morf.metadata.Table#columns()
*/
@Override
public List<Column> columns() {
List<Column> columns = new ArrayList<>();
columns.add(SchemaUtils.column(ID_INCREMENTOR_TABLE_COLUMN_NAME, DataType.STRING, 132).primaryKey());
columns.add(SchemaUtils.column(ID_INCREMENTOR_TABLE_COLUMN_VALUE, DataType.BIG_INTEGER));
return columns;
} | 3.68 |
dubbo_ClassUtils_resolveClass | /**
* Resolve the {@link Class} by the specified name and {@link ClassLoader}
*
* @param className the name of {@link Class}
* @param classLoader {@link ClassLoader}
* @return If can't be resolved , return <code>null</code>
* @since 2.7.6
*/
public static Class<?> resolveClass(String className, ClassLoader classLoader) {
Class<?> targetClass = null;
try {
targetClass = forName(className, classLoader);
} catch (Exception ignored) { // Ignored
}
return targetClass;
} | 3.68 |
framework_VScrollTable_cancel | /**
* Cancels the current context touch timeout.
*/
public void cancel() {
if (contextTouchTimeout != null) {
contextTouchTimeout.cancel();
contextTouchTimeout = null;
}
touchStart = null;
} | 3.68 |
hudi_BaseHoodieWriteClient_updateColumnComment | /**
* update col comment for hudi table.
*
* @param colName col name to be changed. if we want to change col from a nested filed, the fullName should be specified
* @param doc .
*/
public void updateColumnComment(String colName, String doc) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyColumnCommentChange(colName, doc);
commitTableChange(newSchema, pair.getRight());
} | 3.68 |
hbase_PrivateCellUtil_compareQualifier | /**
* Compare cell's qualifier against given comparator
* @param cell the cell to use for comparison
* @param comparator the {@link CellComparator} to use for comparison
* @return result comparing cell's qualifier
*/
public static int compareQualifier(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) cell).getQualifierByteBuffer(),
((ByteBufferExtendedCell) cell).getQualifierPosition(), cell.getQualifierLength());
}
return comparator.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength());
} | 3.68 |
flink_DeletePushDownUtils_prepareFilter | /** Prepare the filter with reducing && simplifying. */
private static Filter prepareFilter(Filter filter) {
// we try to reduce and simplify the filter
ReduceExpressionsRuleProxy reduceExpressionsRuleProxy = ReduceExpressionsRuleProxy.INSTANCE;
SimplifyFilterConditionRule simplifyFilterConditionRule =
SimplifyFilterConditionRule.INSTANCE();
// max iteration num for reducing and simplifying filter,
// we use 5 as the max iteration num which is same with the iteration num in Flink's plan
// optimizing.
int maxIteration = 5;
boolean changed = true;
int iteration = 1;
// iterate until it reaches max iteration num or there's no changes in one iterate
while (changed && iteration <= maxIteration) {
changed = false;
// first apply the rule to reduce condition in filter
RexNode newCondition = filter.getCondition();
List<RexNode> expList = new ArrayList<>();
expList.add(newCondition);
if (reduceExpressionsRuleProxy.reduce(filter, expList)) {
// get the new condition
newCondition = expList.get(0);
changed = true;
}
// create a new filter
filter = filter.copy(filter.getTraitSet(), filter.getInput(), newCondition);
// then apply the rule to simplify filter
Option<Filter> changedFilter =
simplifyFilterConditionRule.simplify(filter, new boolean[] {false});
if (changedFilter.isDefined()) {
filter = changedFilter.get();
changed = true;
}
iteration += 1;
}
return filter;
} | 3.68 |
hbase_AbstractProcedureScheduler_schedLock | // ==========================================================================
// Internal helpers
// ==========================================================================
protected void schedLock() {
schedulerLock.lock();
} | 3.68 |
hadoop_KMSAudit_op | /**
* Logs to the audit service a single operation on the KMS or on a key.
*
* @param opStatus
* The outcome of the audited event
* @param op
* The operation being audited (either {@link KMS.KMSOp} or
* {@link Type} N.B this is passed as an {@link Object} to allow
* either enum to be passed in.
* @param ugi
* The user's security context
* @param key
* The String name of the key if applicable
* @param remoteHost
* The hostname of the requesting service
* @param extraMsg
* Any extra details for auditing
*/
private void op(final OpStatus opStatus, final Object op,
final UserGroupInformation ugi, final String key, final String remoteHost,
final String extraMsg) {
final String user = ugi == null ? null: ugi.getUserName();
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
&& (op != null)
&& AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op);
if (opStatus == OpStatus.UNAUTHORIZED) {
cache.invalidate(cacheKey);
logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
} else {
try {
AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
@Override
public AuditEvent call() throws Exception {
return new AuditEvent(op, ugi, key, remoteHost, extraMsg);
}
});
// Log first access (initialized as -1 so
// incrementAndGet() == 0 implies first access)
if (event.getAccessCount().incrementAndGet() == 0) {
event.getAccessCount().incrementAndGet();
logEvent(opStatus, event);
}
} catch (ExecutionException ex) {
throw new RuntimeException(ex);
}
}
} else {
logEvent(opStatus, new AuditEvent(op, ugi, key, remoteHost, extraMsg));
}
} | 3.68 |
hbase_WALKeyImpl_getSequenceId | /**
* SequenceId is only available post WAL-assign. Calls before this will get you a
* {@link SequenceId#NO_SEQUENCE_ID}. See the comment on FSHLog#append and #getWriteNumber in this
* method for more on when this sequenceId comes available.
* @return long the new assigned sequence number
*/
@Override
public long getSequenceId() {
return this.sequenceId;
} | 3.68 |
hbase_EncodedDataBlock_getIterator | /**
* Provides access to compressed value.
* @param headerSize header size of the block.
* @return Forwards sequential iterator.
*/
public Iterator<Cell> getIterator(int headerSize) {
final int rawSize = rawKVs.length;
byte[] encodedDataWithHeader = getEncodedData();
int bytesToSkip = headerSize + Bytes.SIZEOF_SHORT;
ByteArrayInputStream bais = new ByteArrayInputStream(encodedDataWithHeader, bytesToSkip,
encodedDataWithHeader.length - bytesToSkip);
final DataInputStream dis = new DataInputStream(bais);
return new Iterator<Cell>() {
private ByteBuffer decompressedData = null;
private Iterator<Boolean> it = isTagsLenZero.iterator();
@Override
public boolean hasNext() {
if (decompressedData == null) {
return rawSize > 0;
}
return decompressedData.hasRemaining();
}
@Override
public Cell next() {
if (decompressedData == null) {
try {
decompressedData = dataBlockEncoder.decodeKeyValues(dis,
dataBlockEncoder.newDataBlockDecodingContext(conf, meta));
} catch (IOException e) {
throw new RuntimeException("Problem with data block encoder, "
+ "most likely it requested more bytes than are available.", e);
}
decompressedData.rewind();
}
int offset = decompressedData.position();
int klen = decompressedData.getInt();
int vlen = decompressedData.getInt();
int tagsLen = 0;
ByteBufferUtils.skip(decompressedData, klen + vlen);
// Read the tag length in case when stream contain tags
if (meta.isIncludesTags()) {
boolean noTags = true;
if (it.hasNext()) {
noTags = it.next();
}
// ROW_INDEX_V1 will not put tagsLen back in cell if it is zero, there is no need
// to read short here.
if (!(encoding.equals(DataBlockEncoding.ROW_INDEX_V1) && noTags)) {
tagsLen = ((decompressedData.get() & 0xff) << 8) ^ (decompressedData.get() & 0xff);
ByteBufferUtils.skip(decompressedData, tagsLen);
}
}
KeyValue kv =
new KeyValue(decompressedData.array(), decompressedData.arrayOffset() + offset,
(int) KeyValue.getKeyValueDataStructureSize(klen, vlen, tagsLen));
if (meta.isIncludesMvcc()) {
long mvccVersion = ByteBufferUtils.readVLong(decompressedData);
kv.setSequenceId(mvccVersion);
}
return kv;
}
@Override
public void remove() {
throw new NotImplementedException("remove() is not supported!");
}
@Override
public String toString() {
return "Iterator of: " + dataBlockEncoder.getClass().getName();
}
};
} | 3.68 |
framework_CalendarEvent_getCalendarEvent | /**
* @return the {@link com.vaadin.addon.calendar.event.CalendarEvent
* CalendarEvent} that has changed
*/
public CalendarEvent getCalendarEvent() {
return source;
} | 3.68 |
rocketmq-connect_Worker_stopConnectors | /**
* stop connectors
*
* @param ids
*/
private void stopConnectors(Collection<String> ids) {
for (String connectorName : ids) {
stopConnector(connectorName);
}
} | 3.68 |
open-banking-gateway_ProcessResultEventHandler_add | /**
* Adds the subscriber to the BPMN process. If any already exists - old one will be removed.
*
* @param processId BPMN process id to subscribe to
* @param subscriber Internal BPMN event handling function
*/
void add(String processId, Consumer<InternalProcessResult> subscriber) {
InternalProcessResult delayedMessage;
synchronized (lock) {
delayedMessage = deadLetterQueue.remove(processId);
if (null == delayedMessage) {
subscribers.put(processId, subscriber);
return;
}
}
subscriber.accept(delayedMessage);
} | 3.68 |
flink_RequestedLocalProperties_filterBySemanticProperties | /**
* Filters these properties by what can be preserved by the given SemanticProperties when
* propagated down to the given input.
*
* @param props The SemanticProperties which define which fields are preserved.
* @param input The index of the operator's input.
* @return The filtered RequestedLocalProperties
*/
public RequestedLocalProperties filterBySemanticProperties(
SemanticProperties props, int input) {
// no semantic properties, all local properties are filtered
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
if (this.ordering != null) {
Ordering newOrdering = new Ordering();
for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int targetField = this.ordering.getInvolvedIndexes().get(i);
int sourceField = props.getForwardingSourceField(input, targetField);
if (sourceField >= 0) {
newOrdering.appendOrdering(
sourceField, this.ordering.getType(i), this.ordering.getOrder(i));
} else {
return null;
}
}
return new RequestedLocalProperties(newOrdering);
} else if (this.groupedFields != null) {
FieldSet newGrouping = new FieldSet();
// check, whether the local key grouping is preserved
for (Integer targetField : this.groupedFields) {
int sourceField = props.getForwardingSourceField(input, targetField);
if (sourceField >= 0) {
newGrouping = newGrouping.addField(sourceField);
} else {
return null;
}
}
return new RequestedLocalProperties(newGrouping);
} else {
return null;
}
} | 3.68 |
dubbo_ServiceDiscoveryRegistryDirectory_destroyAllInvokers | /**
* Close all invokers
*/
@Override
protected void destroyAllInvokers() {
Map<ProtocolServiceKeyWithAddress, Invoker<T>> localUrlInvokerMap = this.urlInvokerMap; // local reference
if (localUrlInvokerMap != null) {
for (Invoker<T> invoker : new ArrayList<>(localUrlInvokerMap.values())) {
try {
invoker.destroy();
} catch (Throwable t) {
logger.warn(
PROTOCOL_FAILED_DESTROY_INVOKER,
"",
"",
"Failed to destroy service " + serviceKey + " to provider " + invoker.getUrl(),
t);
}
}
localUrlInvokerMap.clear();
}
this.urlInvokerMap = null;
this.destroyInvokers();
} | 3.68 |
flink_HiveStatsUtil_createTableColumnStats | /** Create Flink ColumnStats from Hive ColumnStatisticsData. */
private static CatalogColumnStatisticsDataBase createTableColumnStats(
DataType colType, ColumnStatisticsData stats, String hiveVersion) {
HiveShim hiveShim = HiveShimLoader.loadHiveShim(hiveVersion);
if (stats.isSetBinaryStats()) {
BinaryColumnStatsData binaryStats = stats.getBinaryStats();
return new CatalogColumnStatisticsDataBinary(
binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null,
binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null,
binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null);
} else if (stats.isSetBooleanStats()) {
BooleanColumnStatsData booleanStats = stats.getBooleanStats();
return new CatalogColumnStatisticsDataBoolean(
booleanStats.isSetNumTrues() ? booleanStats.getNumTrues() : null,
booleanStats.isSetNumFalses() ? booleanStats.getNumFalses() : null,
booleanStats.isSetNumNulls() ? booleanStats.getNumNulls() : null);
} else if (hiveShim.isDateStats(stats)) {
return hiveShim.toFlinkDateColStats(stats);
} else if (stats.isSetDoubleStats()) {
DoubleColumnStatsData doubleStats = stats.getDoubleStats();
return new CatalogColumnStatisticsDataDouble(
doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null,
doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null,
doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null,
doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null);
} else if (stats.isSetLongStats()) {
LongColumnStatsData longColStats = stats.getLongStats();
return new CatalogColumnStatisticsDataLong(
longColStats.isSetLowValue() ? longColStats.getLowValue() : null,
longColStats.isSetHighValue() ? longColStats.getHighValue() : null,
longColStats.isSetNumDVs() ? longColStats.getNumDVs() : null,
longColStats.isSetNumNulls() ? longColStats.getNumNulls() : null);
} else if (stats.isSetStringStats()) {
StringColumnStatsData stringStats = stats.getStringStats();
return new CatalogColumnStatisticsDataString(
stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null,
stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null,
stringStats.isSetNumDVs() ? stringStats.getNumDVs() : null,
stringStats.isSetNumDVs() ? stringStats.getNumNulls() : null);
} else if (stats.isSetDecimalStats()) {
DecimalColumnStatsData decimalStats = stats.getDecimalStats();
// for now, just return CatalogColumnStatisticsDataDouble for decimal columns
Double max = null;
if (decimalStats.isSetHighValue()) {
max = toHiveDecimal(decimalStats.getHighValue()).doubleValue();
}
Double min = null;
if (decimalStats.isSetLowValue()) {
min = toHiveDecimal(decimalStats.getLowValue()).doubleValue();
}
Long ndv = decimalStats.isSetNumDVs() ? decimalStats.getNumDVs() : null;
Long nullCount = decimalStats.isSetNumNulls() ? decimalStats.getNumNulls() : null;
return new CatalogColumnStatisticsDataDouble(min, max, ndv, nullCount);
} else {
LOG.warn(
"Flink does not support converting ColumnStatisticsData '{}' for Hive column type '{}' yet.",
stats,
colType);
return null;
}
} | 3.68 |
hadoop_StageConfig_withProgressable | /**
* Optional progress callback.
* @param value new value
* @return this
*/
public StageConfig withProgressable(final Progressable value) {
checkOpen();
progressable = value;
return this;
} | 3.68 |
flink_UploadThrottle_releaseCapacity | /**
* Release previously {@link #seizeCapacity(long) seized} capacity. Called by {@link
* BatchingStateChangeUploadScheduler} (IO thread).
*/
public void releaseCapacity(long bytes) {
inFlightBytesCounter -= bytes;
} | 3.68 |
flink_StreamExecutionEnvironment_executeAsync | /**
* Triggers the program execution asynchronously. The environment will execute all parts of the
* program that have resulted in a "sink" operation. Sink operations are for example printing
* results or forwarding them to a message queue.
*
* @param streamGraph the stream graph representing the transformations
* @return A {@link JobClient} that can be used to communicate with the submitted job, completed
* on submission succeeded.
* @throws Exception which occurs during job execution.
*/
@Internal
public JobClient executeAsync(StreamGraph streamGraph) throws Exception {
checkNotNull(streamGraph, "StreamGraph cannot be null.");
final PipelineExecutor executor = getPipelineExecutor();
CompletableFuture<JobClient> jobClientFuture =
executor.execute(streamGraph, configuration, userClassloader);
try {
JobClient jobClient = jobClientFuture.get();
jobListeners.forEach(jobListener -> jobListener.onJobSubmitted(jobClient, null));
collectIterators.forEach(iterator -> iterator.setJobClient(jobClient));
collectIterators.clear();
return jobClient;
} catch (ExecutionException executionException) {
final Throwable strippedException =
ExceptionUtils.stripExecutionException(executionException);
jobListeners.forEach(
jobListener -> jobListener.onJobSubmitted(null, strippedException));
throw new FlinkException(
String.format("Failed to execute job '%s'.", streamGraph.getJobName()),
strippedException);
}
} | 3.68 |
hbase_HBaseRpcServicesBase_checkOOME | /**
* Check if an OOME and, if so, abort immediately to avoid creating more objects.
* @return True if we OOME'd and are aborting.
*/
@Override
public boolean checkOOME(Throwable e) {
return OOMEChecker.exitIfOOME(e, getClass().getSimpleName());
} | 3.68 |
hbase_Mutation_getRow | /**
* Method for retrieving the delete's row
*/
@Override
public byte[] getRow() {
return this.row;
} | 3.68 |
hbase_MemoryBoundedLogMessageBuffer_dumpTo | /**
* Dump the contents of the buffer to the given stream.
*/
public synchronized void dumpTo(PrintWriter out) {
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
for (LogMessage msg : messages) {
out.write(df.format(new Date(msg.timestamp)));
out.write(" ");
out.println(new String(msg.message, Charsets.UTF_8));
}
} | 3.68 |
querydsl_JPAExpressions_min | /**
* Create a min(col) expression
*
* @param left collection
* @return min(col)
*/
public static <A extends Comparable<? super A>> ComparableExpression<A> min(CollectionExpression<?,A> left) {
return Expressions.comparableOperation((Class) left.getParameter(0), Ops.QuantOps.MIN_IN_COL, (Expression<?>) left);
} | 3.68 |
hadoop_Quota_getEachQuotaUsage | /**
* Get quota usage for the federation path.
* @param path Federation path.
* @return quota usage for each remote location.
* @throws IOException If the quota system is disabled.
*/
Map<RemoteLocation, QuotaUsage> getEachQuotaUsage(String path)
throws IOException {
rpcServer.checkOperation(OperationCategory.READ);
if (!router.isQuotaEnabled()) {
throw new IOException("The quota system is disabled in Router.");
}
final List<RemoteLocation> quotaLocs = getValidQuotaLocations(path);
RemoteMethod method = new RemoteMethod("getQuotaUsage",
new Class<?>[] {String.class}, new RemoteParam());
Map<RemoteLocation, QuotaUsage> results = rpcClient.invokeConcurrent(
quotaLocs, method, true, false, QuotaUsage.class);
return results;
} | 3.68 |
hudi_JdbcSource_validatePropsAndGetDataFrameReader | /**
* Validates all user properties and prepares the {@link DataFrameReader} to read from RDBMS.
*
* @param session The {@link SparkSession}.
* @param properties The JDBC connection properties and data source options.
* @return The {@link DataFrameReader} to read from RDBMS
* @throws HoodieException
*/
private static DataFrameReader validatePropsAndGetDataFrameReader(final SparkSession session,
final TypedProperties properties)
throws HoodieException {
DataFrameReader dataFrameReader;
FSDataInputStream passwordFileStream = null;
try {
dataFrameReader = session.read().format("jdbc");
dataFrameReader = dataFrameReader.option(
Config.URL_PROP, getStringWithAltKeys(properties, JdbcSourceConfig.URL));
dataFrameReader = dataFrameReader.option(
Config.USER_PROP, getStringWithAltKeys(properties, JdbcSourceConfig.USER));
dataFrameReader = dataFrameReader.option(
Config.DRIVER_PROP, getStringWithAltKeys(properties, JdbcSourceConfig.DRIVER_CLASS));
dataFrameReader = dataFrameReader.option(
Config.RDBMS_TABLE_PROP, getStringWithAltKeys(properties, JdbcSourceConfig.RDBMS_TABLE_NAME));
if (containsConfigProperty(properties, JdbcSourceConfig.PASSWORD)) {
LOG.info("Reading JDBC password from properties file....");
dataFrameReader = dataFrameReader.option(Config.PASSWORD_PROP,
getStringWithAltKeys(properties, JdbcSourceConfig.PASSWORD));
} else if (containsConfigProperty(properties, JdbcSourceConfig.PASSWORD_FILE)
&& !StringUtils.isNullOrEmpty(getStringWithAltKeys(properties, JdbcSourceConfig.PASSWORD_FILE))) {
LOG.info(String.format("Reading JDBC password from password file %s", getStringWithAltKeys(properties, JdbcSourceConfig.PASSWORD_FILE)));
FileSystem fileSystem = FileSystem.get(session.sparkContext().hadoopConfiguration());
passwordFileStream = fileSystem.open(new Path(getStringWithAltKeys(properties, JdbcSourceConfig.PASSWORD_FILE)));
byte[] bytes = new byte[passwordFileStream.available()];
passwordFileStream.read(bytes);
dataFrameReader = dataFrameReader.option(Config.PASSWORD_PROP, new String(bytes));
} else {
throw new IllegalArgumentException(String.format("JDBCSource needs either a %s or %s to connect to RDBMS "
+ "datasource", JdbcSourceConfig.PASSWORD_FILE.key(), JdbcSourceConfig.PASSWORD.key()));
}
addExtraJdbcOptions(properties, dataFrameReader);
if (getBooleanWithAltKeys(properties, JdbcSourceConfig.IS_INCREMENTAL)) {
checkRequiredConfigProperties(properties, Collections.singletonList(JdbcSourceConfig.INCREMENTAL_COLUMN));
}
return dataFrameReader;
} catch (Exception e) {
throw new HoodieException("Failed to validate properties", e);
} finally {
IOUtils.closeStream(passwordFileStream);
}
} | 3.68 |
flink_FunctionContext_getCachedFile | /**
* Gets the local temporary file copy of a distributed cache files.
*
* @param name distributed cache file name
* @return local temporary file copy of a distributed cache file.
*/
public File getCachedFile(String name) {
if (context == null) {
throw new TableException(
"Calls to FunctionContext.getCachedFile are not available "
+ "at the current location.");
}
return context.getDistributedCache().getFile(name);
} | 3.68 |
flink_PojoSerializerSnapshot_restoreSerializers | /**
* Transforms a {@link LinkedHashMap} with {@link TypeSerializerSnapshot}s as the value to
* {@link TypeSerializer} as the value by restoring the snapshot.
*/
private static <K> LinkedHashMap<K, TypeSerializer<?>> restoreSerializers(
LinkedHashMap<K, TypeSerializerSnapshot<?>> snapshotsMap) {
final LinkedHashMap<K, TypeSerializer<?>> restoredSerializersMap =
CollectionUtil.newLinkedHashMapWithExpectedSize(snapshotsMap.size());
snapshotsMap.forEach(
(key, snapshot) -> restoredSerializersMap.put(key, snapshot.restoreSerializer()));
return restoredSerializersMap;
} | 3.68 |
dubbo_ThreadLocalCache_get | /**
* API to return stored value using a key against the calling thread specific store.
* @param key Unique identifier for cache lookup
* @return Return stored object against key
*/
@Override
public Object get(Object key) {
return store.get().get(key);
} | 3.68 |
hadoop_OBSFileSystem_getDefaultPort | /**
* Return the default port for this FileSystem.
*
* @return -1 to indicate the port is undefined, which agrees with the
* contract of {@link URI#getPort()}
*/
@Override
public int getDefaultPort() {
return OBSConstants.OBS_DEFAULT_PORT;
} | 3.68 |
hadoop_MawoConfiguration_getJobQueueStorageEnabled | /**
* Check if Job Queue Storage is Enabled.
* @return True if Job queue storage is enabled otherwise False
*/
public boolean getJobQueueStorageEnabled() {
return Boolean.parseBoolean(configsMap.get(JOB_QUEUE_STORAGE_ENABLED));
} | 3.68 |
querydsl_StringExpression_notLike | /**
* Create a {@code this not like str} expression
*
* @param str string
* @return this not like str
*/
public BooleanExpression notLike(Expression<String> str, char escape) {
return like(str, escape).not();
} | 3.68 |
AreaShop_RegionGroup_getSettings | /**
* Get the configurationsection with the settings of this group.
* @return The ConfigurationSection with the settings of the group
*/
public ConfigurationSection getSettings() {
ConfigurationSection result = plugin.getFileManager().getGroupSettings(name);
if(result != null) {
return result;
} else {
return new YamlConfiguration();
}
} | 3.68 |
flink_KvStateRegistry_createTaskRegistry | /**
* Creates a {@link TaskKvStateRegistry} facade for the {@link Task} identified by the given
* JobID and JobVertexID instance.
*
* @param jobId JobID of the task
* @param jobVertexId JobVertexID of the task
* @return A {@link TaskKvStateRegistry} facade for the task
*/
public TaskKvStateRegistry createTaskRegistry(JobID jobId, JobVertexID jobVertexId) {
return new TaskKvStateRegistry(this, jobId, jobVertexId);
} | 3.68 |
hadoop_InputSplit_getLocationInfo | /**
* Gets info about which nodes the input split is stored on and how it is
* stored at each location.
*
* @return list of <code>SplitLocationInfo</code>s describing how the split
* data is stored at each location. A null value indicates that all the
* locations have the data stored on disk.
* @throws IOException
*/
@Evolving
public SplitLocationInfo[] getLocationInfo() throws IOException {
return null;
} | 3.68 |
flink_FineGrainedSlotManager_declareNeededResources | /** DO NOT call this method directly. Use {@link #declareNeededResourcesWithDelay()} instead. */
private void declareNeededResources() {
Map<InstanceID, WorkerResourceSpec> unWantedTaskManagers =
taskManagerTracker.getUnWantedTaskManager();
Map<WorkerResourceSpec, Set<InstanceID>> unWantedTaskManagerBySpec =
unWantedTaskManagers.entrySet().stream()
.collect(
Collectors.groupingBy(
Map.Entry::getValue,
Collectors.mapping(Map.Entry::getKey, Collectors.toSet())));
// registered TaskManagers except unwanted worker.
Stream<WorkerResourceSpec> registeredTaskManagerStream =
taskManagerTracker.getRegisteredTaskManagers().stream()
.filter(t -> !unWantedTaskManagers.containsKey(t.getInstanceId()))
.map(
t ->
WorkerResourceSpec.fromTotalResourceProfile(
t.getTotalResource(), t.getDefaultNumSlots()));
// pending TaskManagers.
Stream<WorkerResourceSpec> pendingTaskManagerStream =
taskManagerTracker.getPendingTaskManagers().stream()
.map(
t ->
WorkerResourceSpec.fromTotalResourceProfile(
t.getTotalResourceProfile(), t.getNumSlots()));
Map<WorkerResourceSpec, Integer> requiredWorkers =
Stream.concat(registeredTaskManagerStream, pendingTaskManagerStream)
.collect(
Collectors.groupingBy(
Function.identity(), Collectors.summingInt(e -> 1)));
Set<WorkerResourceSpec> workerResourceSpecs = new HashSet<>(requiredWorkers.keySet());
workerResourceSpecs.addAll(unWantedTaskManagerBySpec.keySet());
List<ResourceDeclaration> resourceDeclarations = new ArrayList<>();
workerResourceSpecs.forEach(
spec ->
resourceDeclarations.add(
new ResourceDeclaration(
spec,
requiredWorkers.getOrDefault(spec, 0),
unWantedTaskManagerBySpec.getOrDefault(
spec, Collections.emptySet()))));
resourceAllocator.declareResourceNeeded(resourceDeclarations);
} | 3.68 |
hadoop_ContainerReapContext_build | /**
* Builds the context with the attributes set.
*
* @return the context.
*/
public ContainerReapContext build() {
return new ContainerReapContext(this);
} | 3.68 |
flink_LinkedOptionalMap_absentKeysOrValues | /** Returns the key names of any keys or values that are absent. */
public Set<String> absentKeysOrValues() {
return underlyingMap.entrySet().stream()
.filter(LinkedOptionalMap::keyOrValueIsAbsent)
.map(Entry::getKey)
.collect(Collectors.toCollection(LinkedHashSet::new));
} | 3.68 |
pulsar_AbstractHierarchicalLedgerManager_isLedgerParentNode | /**
* whether the child of ledgersRootPath is a top level parent znode for
* ledgers (in HierarchicalLedgerManager) or znode of a ledger (in
* FlatLedgerManager).
*/
public boolean isLedgerParentNode(String path) {
return path.matches(getLedgerParentNodeRegex());
} | 3.68 |
hudi_HoodieWriteCommitPulsarCallback_createProducer | /**
* Method helps to create {@link Producer}.
*
* @param hoodieConfig Pulsar configs
* @return A {@link Producer}
*/
public Producer<String> createProducer(HoodieConfig hoodieConfig) throws PulsarClientException {
MessageRoutingMode routeMode = Enum.valueOf(MessageRoutingMode.class,
PRODUCER_ROUTE_MODE.defaultValue());
Duration sendTimeout =
parseDuration(hoodieConfig.getString(PRODUCER_SEND_TIMEOUT));
int pendingQueueSize =
hoodieConfig.getInt(PRODUCER_PENDING_QUEUE_SIZE);
int pendingSize =
hoodieConfig.getInt(PRODUCER_PENDING_SIZE);
boolean blockIfQueueFull =
hoodieConfig.getBoolean(PRODUCER_BLOCK_QUEUE_FULL);
return client
.newProducer(Schema.STRING)
.topic(topic)
.messageRoutingMode(routeMode)
.sendTimeout((int) sendTimeout.toMillis(), TimeUnit.MILLISECONDS)
.maxPendingMessages(pendingQueueSize)
.maxPendingMessagesAcrossPartitions(pendingSize)
.blockIfQueueFull(blockIfQueueFull)
.create();
} | 3.68 |
morf_AbstractConnectionResources_getConnection | /**
* @see javax.sql.DataSource#getConnection(java.lang.String, java.lang.String)
*/
@Override
public Connection getConnection(String username, String password) throws SQLException {
log.info("Opening new database connection to [" + AbstractConnectionResources.this.getJdbcUrl() + "] with username [" + username + "] for schema [" + AbstractConnectionResources.this.getSchemaName() + "]");
loadJdbcDriver();
Connection connection = openConnection(username, password);
return log.isDebugEnabled() ? new LoggingConnection(connection) : connection;
} | 3.68 |
framework_Profiler_getOwnTime | /**
* Gets the total time spent in this node, excluding time spent in sub
* nodes.
*
* @return the total time spent, in milliseconds
*/
public double getOwnTime() {
double time = getTimeSpent();
for (Node node : children.values()) {
time -= node.getTimeSpent();
}
return time;
} | 3.68 |
flink_PartitionedFileWriter_releaseQuietly | /** Used to close and delete the failed {@link PartitionedFile} when any exception occurs. */
public void releaseQuietly() {
IOUtils.closeQuietly(this);
IOUtils.deleteFileQuietly(dataFilePath);
IOUtils.deleteFileQuietly(indexFilePath);
} | 3.68 |
hibernate-validator_ReflectionHelper_getIndexedValue | /**
* Tries to retrieve the indexed value from the specified object.
*
* @param value The object from which to retrieve the indexed value. The object has to be non {@code null} and
* either a collection or array.
* @param index The index.
*
* @return The indexed value or {@code null} if {@code value} is {@code null} or not a collection or array.
* {@code null} is also returned in case the index does not exist.
*/
public static Object getIndexedValue(Object value, int index) {
if ( value == null ) {
return null;
}
Iterable<?> iterable;
Type type = value.getClass();
if ( isIterable( type ) ) {
iterable = ( (Iterable<?>) value );
}
else if ( TypeHelper.isArray( type ) ) {
iterable = CollectionHelper.iterableFromArray( value );
}
else {
return null;
}
int i = 0;
for ( Object o : iterable ) {
if ( i == index ) {
return o;
}
i++;
}
return null;
} | 3.68 |
hbase_ArrayBackedTag_getValueArray | /** Returns The byte array backing this Tag. */
@Override
public byte[] getValueArray() {
return this.bytes;
} | 3.68 |
hadoop_ReplicaInfo_setVolume | /**
* Set the volume where this replica is located on disk.
*/
void setVolume(FsVolumeSpi vol) {
this.volume = vol;
} | 3.68 |
hibernate-validator_MetaConstraints_getWrappedValueType | /**
* Returns the sub-types binding for the single type parameter of the super-type. E.g. for {@code IntegerProperty}
* and {@code Property<T>}, {@code Integer} would be returned.
*/
private static Class<?> getWrappedValueType(TypeResolutionHelper typeResolutionHelper, Type declaredType, ValueExtractorDescriptor valueExtractorDescriptor) {
ResolvedType resolvedType = typeResolutionHelper.getTypeResolver().resolve( declaredType );
List<ResolvedType> resolvedTypeParameters = resolvedType.typeParametersFor( valueExtractorDescriptor.getContainerType() );
if ( resolvedTypeParameters == null || resolvedTypeParameters.isEmpty() ) {
throw LOG.getNoValueExtractorFoundForUnwrapException( declaredType );
}
return resolvedTypeParameters.get( TypeVariables.getTypeParameterIndex( valueExtractorDescriptor.getExtractedTypeParameter() ) ).getErasedType();
} | 3.68 |
flink_PrioritizedDeque_clear | /** Removes all priority and non-priority elements. */
public void clear() {
deque.clear();
numPriorityElements = 0;
} | 3.68 |
pulsar_MultiTopicsConsumerImpl_removeConsumerAsync | // Remove a consumer for a topic
public CompletableFuture<Void> removeConsumerAsync(String topicName) {
checkArgument(TopicName.isValid(topicName), "Invalid topic name:" + topicName);
if (getState() == State.Closing || getState() == State.Closed) {
return FutureUtil.failedFuture(
new PulsarClientException.AlreadyClosedException("Topics Consumer was already closed"));
}
CompletableFuture<Void> unsubscribeFuture = new CompletableFuture<>();
String topicPartName = TopicName.get(topicName).getPartitionedTopicName();
List<ConsumerImpl<T>> consumersToClose = consumers.values().stream()
.filter(consumer -> {
String consumerTopicName = consumer.getTopic();
return TopicName.get(consumerTopicName).getPartitionedTopicName().equals(topicPartName);
}).collect(Collectors.toList());
List<CompletableFuture<Void>> futureList = consumersToClose.stream()
.map(ConsumerImpl::closeAsync).collect(Collectors.toList());
FutureUtil.waitForAll(futureList)
.whenComplete((r, ex) -> {
if (ex == null) {
consumersToClose.forEach(consumer1 -> {
consumers.remove(consumer1.getTopic());
pausedConsumers.remove(consumer1);
allTopicPartitionsNumber.decrementAndGet();
});
removeTopic(topicName);
if (unAckedMessageTracker instanceof UnAckedTopicMessageTracker) {
((UnAckedTopicMessageTracker) unAckedMessageTracker).removeTopicMessages(topicName);
}
unsubscribeFuture.complete(null);
log.info("[{}] [{}] [{}] Removed Topics Consumer, allTopicPartitionsNumber: {}",
topicName, subscription, consumerName, allTopicPartitionsNumber);
} else {
unsubscribeFuture.completeExceptionally(ex);
setState(State.Failed);
log.error("[{}] [{}] [{}] Could not remove Topics Consumer",
topicName, subscription, consumerName, ex.getCause());
}
});
return unsubscribeFuture;
} | 3.68 |
hadoop_Container_setAllocationRequestId | /**
* Set the optional <em>ID</em> corresponding to the original {@code
* ResourceRequest{@link #setAllocationRequestId(long)}
* etAllocationRequestId()}}s which is satisfied by this allocated {@code
* Container}.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}s.
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* If the ID is not set, scheduler will continue to work as previously and all
* allocated {@code Container}(s) will have the default ID, -1.
* <p>
*
* @param allocationRequestID the <em>ID</em> corresponding to the original
* allocation request which is satisfied by this
* allocation.
*/
@Private
@Unstable
public void setAllocationRequestId(long allocationRequestID) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_HMaster_getNamespace | /**
* Get a Namespace
* @param name Name of the Namespace
* @return Namespace descriptor for <code>name</code>
*/
NamespaceDescriptor getNamespace(String name) throws IOException {
checkInitialized();
if (this.cpHost != null) this.cpHost.preGetNamespaceDescriptor(name);
NamespaceDescriptor nsd = this.clusterSchemaService.getNamespace(name);
if (this.cpHost != null) this.cpHost.postGetNamespaceDescriptor(nsd);
return nsd;
} | 3.68 |
framework_VAbstractSplitPanel_setLocked | /**
* For internal use only. May be removed or replaced in the future.
*
* @param newValue
* {@code true} if split position should be locked, {@code false}
* otherwise
*/
public void setLocked(boolean newValue) {
if (locked != newValue) {
locked = newValue;
splitterSize = -1;
setStylenames();
}
} | 3.68 |
hadoop_IOStatisticsStoreImpl_lookupQuietly | /**
* Get a reference to the map type providing the
* value for a specific key, returning null if it not found.
* @param <T> type of map/return type.
* @param map map to look up
* @param key statistic name
* @return the value
*/
private static <T> T lookupQuietly(final Map<String, T> map, String key) {
return map.get(key);
} | 3.68 |
framework_FileUploadHandler_streamToReceiver | /**
* @param in
* @param streamVariable
* @param filename
* @param type
* @param contentLength
* @return true if the streamvariable has informed that the terminal can
* forget this variable
* @throws UploadException
*/
protected final boolean streamToReceiver(VaadinSession session,
final InputStream in, StreamVariable streamVariable,
String filename, String type, long contentLength)
throws UploadException {
if (streamVariable == null) {
throw new IllegalStateException(
"StreamVariable for the post not found");
}
OutputStream out = null;
long totalBytes = 0;
StreamingStartEventImpl startedEvent = new StreamingStartEventImpl(
filename, type, contentLength);
try {
boolean listenProgress;
session.lock();
try {
streamVariable.streamingStarted(startedEvent);
out = streamVariable.getOutputStream();
listenProgress = streamVariable.listenProgress();
} finally {
session.unlock();
}
// Gets the output target stream
if (out == null) {
throw new NoOutputStreamException();
}
if (null == in) {
// No file, for instance non-existent filename in html upload
throw new NoInputStreamException();
}
final byte[] buffer = new byte[MAX_UPLOAD_BUFFER_SIZE];
long lastStreamingEvent = 0;
int bytesReadToBuffer = 0;
do {
bytesReadToBuffer = in.read(buffer);
if (bytesReadToBuffer > 0) {
out.write(buffer, 0, bytesReadToBuffer);
totalBytes += bytesReadToBuffer;
}
if (listenProgress) {
long now = System.currentTimeMillis();
// to avoid excessive session locking and event storms,
// events are sent in intervals, or at the end of the file.
if (lastStreamingEvent + getProgressEventInterval() <= now
|| bytesReadToBuffer <= 0) {
lastStreamingEvent = now;
session.lock();
try {
StreamingProgressEventImpl progressEvent = new StreamingProgressEventImpl(
filename, type, contentLength, totalBytes);
streamVariable.onProgress(progressEvent);
} finally {
session.unlock();
}
}
}
if (streamVariable.isInterrupted()) {
throw new UploadInterruptedException();
}
} while (bytesReadToBuffer > 0);
// upload successful
out.close();
StreamingEndEvent event = new StreamingEndEventImpl(filename, type,
totalBytes);
session.lock();
try {
streamVariable.streamingFinished(event);
} finally {
session.unlock();
}
} catch (UploadInterruptedException e) {
// Download interrupted by application code
tryToCloseStream(out);
StreamingErrorEvent event = new StreamingErrorEventImpl(filename,
type, contentLength, totalBytes, e);
session.lock();
try {
streamVariable.streamingFailed(event);
} finally {
session.unlock();
}
return true;
// Note, we are not throwing interrupted exception forward as it is
// not a terminal level error like all other exception.
} catch (final Exception e) {
tryToCloseStream(out);
session.lock();
try {
StreamingErrorEvent event = new StreamingErrorEventImpl(
filename, type, contentLength, totalBytes, e);
streamVariable.streamingFailed(event);
// throw exception for terminal to be handled (to be passed to
// terminalErrorHandler)
throw new UploadException(e);
} finally {
session.unlock();
}
}
return startedEvent.isDisposed();
} | 3.68 |
flink_TypeTransformation_transform | /**
* Transforms the given data type to a different data type.
*
* <p>This method provides a {@link DataTypeFactory} if available.
*/
default DataType transform(@Nullable DataTypeFactory factory, DataType typeToTransform) {
return transform(typeToTransform);
} | 3.68 |
dubbo_DubboConfigBeanDefinitionConflictApplicationListener_resolveUniqueApplicationConfigBean | /**
* Resolve the unique {@link ApplicationConfig} Bean
*
* @param registry {@link BeanDefinitionRegistry} instance
* @param beanFactory {@link ConfigurableListableBeanFactory} instance
* @see EnableDubboConfig
*/
private void resolveUniqueApplicationConfigBean(BeanDefinitionRegistry registry, ListableBeanFactory beanFactory) {
String[] beansNames = beanNamesForTypeIncludingAncestors(beanFactory, ApplicationConfig.class);
if (beansNames.length < 2) { // If the number of ApplicationConfig beans is less than two, return immediately.
return;
}
Environment environment = beanFactory.getBean(ENVIRONMENT_BEAN_NAME, Environment.class);
// Remove ApplicationConfig Beans that are configured by "dubbo.application.*"
Stream.of(beansNames)
.filter(beansName -> isConfiguredApplicationConfigBeanName(environment, beansName))
.forEach(registry::removeBeanDefinition);
beansNames = beanNamesForTypeIncludingAncestors(beanFactory, ApplicationConfig.class);
if (beansNames.length > 1) {
throw new IllegalStateException(String.format(
"There are more than one instances of %s, whose bean definitions : %s",
ApplicationConfig.class.getSimpleName(),
Stream.of(beansNames).map(registry::getBeanDefinition).collect(Collectors.toList())));
}
} | 3.68 |
dubbo_ServiceInvokeRestFilter_executeResponseIntercepts | /**
* execute response Intercepts
*
* @param restFilterContext
* @throws Exception
*/
public void executeResponseIntercepts(RestInterceptContext restFilterContext) throws Exception {
for (RestResponseInterceptor restResponseInterceptor : restResponseInterceptors) {
restResponseInterceptor.intercept(restFilterContext);
if (restFilterContext.complete()) {
break;
}
}
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.