name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DefaultOBSClientFactory_createHuaweiObsClient | /**
* Creates an {@link ObsClient} from the established configuration.
*
* @param conf Hadoop configuration
* @param obsConf ObsConfiguration
* @param name URL
* @return ObsClient client
* @throws IOException on any failure to create Huawei OBS client
*/
private static ObsClient createHuaweiObsClient(final Configuration conf,
final ObsConfiguration obsConf, final URI name)
throws IOException {
Class<?> credentialsProviderClass;
BasicSessionCredential credentialsProvider;
ObsClient obsClient;
try {
credentialsProviderClass = conf.getClass(
OBSConstants.OBS_CREDENTIALS_PROVIDER, null);
} catch (RuntimeException e) {
Throwable c = e.getCause() != null ? e.getCause() : e;
throw new IOException(
"From option " + OBSConstants.OBS_CREDENTIALS_PROVIDER + ' '
+ c, c);
}
if (credentialsProviderClass == null) {
return createObsClientWithoutCredentialsProvider(conf, obsConf,
name);
}
try {
Constructor<?> cons =
credentialsProviderClass.getDeclaredConstructor(URI.class,
Configuration.class);
credentialsProvider = (BasicSessionCredential) cons.newInstance(
name, conf);
} catch (NoSuchMethodException
| SecurityException
| IllegalAccessException
| InstantiationException
| InvocationTargetException e) {
Throwable c = e.getCause() != null ? e.getCause() : e;
throw new IOException(
"From option " + OBSConstants.OBS_CREDENTIALS_PROVIDER + ' '
+ c, c);
}
String sessionToken = credentialsProvider.getSessionToken();
String ak = credentialsProvider.getOBSAccessKeyId();
String sk = credentialsProvider.getOBSSecretKey();
String endPoint = conf.getTrimmed(OBSConstants.ENDPOINT, "");
obsConf.setEndPoint(endPoint);
if (sessionToken != null && sessionToken.length() != 0) {
obsClient = new ObsClient(ak, sk, sessionToken, obsConf);
} else {
obsClient = new ObsClient(ak, sk, obsConf);
}
return obsClient;
} | 3.68 |
morf_OracleDialect_renameTableStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#renameTableStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> renameTableStatements(Table fromTable, Table toTable) {
String from = truncatedTableName(fromTable.getName());
String fromConstraint = primaryKeyConstraintName(fromTable.getName());
String to = truncatedTableName(toTable.getName());
String toConstraint = primaryKeyConstraintName(toTable.getName());
ArrayList<String> statements = new ArrayList<>();
if (!primaryKeysForTable(fromTable).isEmpty()) {
// Rename the PK constraint
statements.add("ALTER TABLE " + schemaNamePrefix() + from + " RENAME CONSTRAINT " + fromConstraint + " TO " + toConstraint);
// Rename the index for the PK constraint the Oracle uses to manage the PK
statements.add("ALTER INDEX " + schemaNamePrefix() + fromConstraint + " RENAME TO " + toConstraint);
}
// Rename the table itself
statements.add("ALTER TABLE " + schemaNamePrefix() + from + " RENAME TO " + to);
statements.add(commentOnTable(to));
return statements;
} | 3.68 |
hudi_SqlQueryBuilder_where | /**
* Appends a WHERE clause to a query.
*
* @param predicate The predicate for WHERE clause.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder where(String predicate) {
if (StringUtils.isNullOrEmpty(predicate)) {
throw new IllegalArgumentException("No predicate provided with WHERE clause. Please provide a predicate to filter records.");
}
sqlBuilder.append(" where ");
sqlBuilder.append(predicate);
return this;
} | 3.68 |
framework_VAbstractPopupCalendar_setFocusedDate | /**
* Sets the content of a special field for assistive devices, so that they
* can recognize the change and inform the user (reading out in case of
* screen reader).
*
* @param selectedDate
* Date that is currently selected
*/
public void setFocusedDate(Date selectedDate) {
this.selectedDate.setText(DateTimeFormat.getFormat("dd, MMMM, yyyy")
.format(selectedDate));
} | 3.68 |
hadoop_HamletImpl_subView | /**
* Sub-classes should override this to do something interesting.
* @param cls the sub-view class
*/
protected void subView(Class<? extends SubView> cls) {
indent(of(ENDTAG)); // not an inline view
sb.setLength(0);
out.print(sb.append('[').append(cls.getName()).append(']').toString());
out.println();
} | 3.68 |
hbase_MemStoreFlusher_getMemStoreHeapSize | /** Returns Return memstore heap size or null if <code>r</code> is null */
private static long getMemStoreHeapSize(HRegion r) {
return r == null ? 0 : r.getMemStoreHeapSize();
} | 3.68 |
framework_VaadinSession_setConverterFactory | /**
* Sets the {@code ConverterFactory} used to locate a suitable
* {@code Converter} for fields in the session.
* <p>
* The {@code ConverterFactory} is used to find a suitable converter when
* binding data to a UI component and the data type does not match the UI
* component type, e.g. binding a Double to a TextField (which is based on a
* String).
* <p>
* Note that the this and {@code #getConverterFactory()} use Object and not
* {@code ConverterFactory} in Vaadin 8 to avoid a core dependency on the
* compatibility packages.
* <p>
* The converter factory must never be set to null.
*
* @param converterFactory
* The converter factory used in the session
* @since 8.0
*/
@Deprecated
public void setConverterFactory(Object converterFactory) {
assert hasLock();
this.converterFactory = converterFactory;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_isAuthenticatedAccess | /**
* Private method to check for authenticated access.
*
* @ returns boolean -- true if access is credentialed and authenticated and
* false otherwise.
*/
private boolean isAuthenticatedAccess() throws AzureException {
if (isAnonymousCredentials) {
// Access to this storage account is unauthenticated.
return false;
}
// Access is authenticated.
return true;
} | 3.68 |
flink_BoundedBlockingSubpartition_isFinished | /**
* Checks if writing is finished. Readers cannot be created until writing is finished, and no
* further writes can happen after that.
*/
public boolean isFinished() {
return isFinished;
} | 3.68 |
flink_KvStateLocation_registerKvState | /**
* Registers a KvState instance for the given key group index.
*
* @param keyGroupRange Key group range to register
* @param kvStateId ID of the KvState instance at the key group index.
* @param kvStateAddress Server address of the KvState instance at the key group index.
* @throws IndexOutOfBoundsException If key group range start < 0 or key group range end >=
* Number of key groups
*/
public void registerKvState(
KeyGroupRange keyGroupRange, KvStateID kvStateId, InetSocketAddress kvStateAddress) {
if (keyGroupRange.getStartKeyGroup() < 0
|| keyGroupRange.getEndKeyGroup() >= numKeyGroups) {
throw new IndexOutOfBoundsException("Key group index");
}
for (int kgIdx = keyGroupRange.getStartKeyGroup();
kgIdx <= keyGroupRange.getEndKeyGroup();
++kgIdx) {
if (kvStateIds[kgIdx] == null && kvStateAddresses[kgIdx] == null) {
numRegisteredKeyGroups++;
}
kvStateIds[kgIdx] = kvStateId;
kvStateAddresses[kgIdx] = kvStateAddress;
}
} | 3.68 |
hadoop_FlowRunColumn_getColumnQualifier | /**
* @return the column name value
*/
private String getColumnQualifier() {
return columnQualifier;
} | 3.68 |
flink_ScalarFunction_getParameterTypes | /**
* Returns {@link TypeInformation} about the operands of the evaluation method with a given
* signature.
*
* @deprecated This method uses the old type system and is based on the old reflective
* extraction logic. The method will be removed in future versions and is only called when
* using the deprecated {@code TableEnvironment.registerFunction(...)} method. The new
* reflective extraction logic (possibly enriched with {@link DataTypeHint} and {@link
* FunctionHint}) should be powerful enough to cover most use cases. For advanced users, it
* is possible to override {@link UserDefinedFunction#getTypeInference(DataTypeFactory)}.
*/
@Deprecated
public TypeInformation<?>[] getParameterTypes(Class<?>[] signature) {
final TypeInformation<?>[] types = new TypeInformation<?>[signature.length];
for (int i = 0; i < signature.length; i++) {
try {
types[i] = TypeExtractor.getForClass(signature[i]);
} catch (InvalidTypesException e) {
throw new ValidationException(
"Parameter types of scalar function "
+ this.getClass().getCanonicalName()
+ " cannot be automatically determined. Please provide type information manually.");
}
}
return types;
} | 3.68 |
hadoop_CallableSupplier_get | /**
* Active any span and then call the supplied callable.
* @return the result.
*/
@Override
public T get() {
try {
if (auditSpan != null) {
auditSpan.activate();
}
return call.call();
} catch (RuntimeException e) {
throw e;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (Exception e) {
throw new UncheckedIOException(new IOException(e));
}
} | 3.68 |
hudi_HDFSParquetImporterUtils_parseSchema | /**
* Parse Schema from file.
*
* @param fs File System
* @param schemaFile Schema File
*/
public static String parseSchema(FileSystem fs, String schemaFile) throws Exception {
// Read schema file.
Path p = new Path(schemaFile);
if (!fs.exists(p)) {
throw new Exception(String.format("Could not find - %s - schema file.", schemaFile));
}
long len = fs.getFileStatus(p).getLen();
ByteBuffer buf = ByteBuffer.allocate((int) len);
try (FSDataInputStream inputStream = fs.open(p)) {
inputStream.readFully(0, buf.array(), 0, buf.array().length);
}
return new String(buf.array(), StandardCharsets.UTF_8);
} | 3.68 |
open-banking-gateway_HbciRestorePreValidationContext_lastRedirectionTarget | // FIXME SerializerUtil does not support nestedness
private LastRedirectionTarget lastRedirectionTarget(BaseContext current) {
if (null == current.getLastRedirection()) {
return null;
}
LastRedirectionTarget target = current.getLastRedirection();
target.setRequestScoped(current.getRequestScoped());
return target;
} | 3.68 |
hadoop_ManifestCommitter_exitStage | /**
* Remove stage from common audit context.
* @param stage stage exited.
*/
@Override
public void exitStage(String stage) {
AuditingIntegration.exitStage();
} | 3.68 |
hadoop_NMContainerStatus_getExecutionType | /**
* Get the <code>ExecutionType</code> of the container.
* @return <code>ExecutionType</code> of the container
*/
public ExecutionType getExecutionType() {
return ExecutionType.GUARANTEED;
} | 3.68 |
hudi_FlinkCreateHandle_deleteInvalidDataFile | /**
* The flink checkpoints start in sequence and asynchronously, when one write task finish the checkpoint(A)
* (thus the fs view got the written data files some of which may be invalid),
* it goes on with the next round checkpoint(B) write immediately,
* if it tries to reuse the last small data bucket(small file) of an invalid data file,
* finally, when the coordinator receives the checkpoint success event of checkpoint(A),
* the invalid data file would be cleaned,
* and this merger got a FileNotFoundException when it close the write file handle.
*
* <p> To solve, deletes the invalid data file eagerly
* so that the invalid file small bucket would never be reused.
*
* @param lastAttemptId The last attempt ID
*/
private void deleteInvalidDataFile(long lastAttemptId) {
final String lastWriteToken = FSUtils.makeWriteToken(getPartitionId(), getStageId(), lastAttemptId);
final String lastDataFileName = FSUtils.makeBaseFileName(instantTime,
lastWriteToken, this.fileId, hoodieTable.getBaseFileExtension());
final Path path = makeNewFilePath(partitionPath, lastDataFileName);
try {
if (fs.exists(path)) {
LOG.info("Deleting invalid INSERT file due to task retry: " + lastDataFileName);
fs.delete(path, false);
}
} catch (IOException e) {
throw new HoodieException("Error while deleting the INSERT file due to task retry: " + lastDataFileName, e);
}
} | 3.68 |
framework_AbstractSingleSelect_setValue | /**
* Sets the value of this object which is an item to select. If the new
* value is not equal to {@code getValue()}, fires a value change event. If
* value is {@code null} then it deselects currently selected item.
* <p>
* The call is delegated to {@link #setSelectedItem(Object)}.
*
* @see #setSelectedItem(Object)
* @see Single#setSelectedItem(Object)
*
* @param value
* the item to select or {@code null} to clear selection
*/
@Override
public void setValue(T value) {
setSelectedItem(value);
} | 3.68 |
hudi_HoodieTimelineArchiver_archiveIfRequired | /**
* Check if commits need to be archived. If yes, archive commits.
*/
public int archiveIfRequired(HoodieEngineContext context, boolean acquireLock) throws IOException {
try {
if (acquireLock) {
// there is no owner or instant time per se for archival.
txnManager.beginTransaction(Option.empty(), Option.empty());
}
// Sort again because the cleaning and rollback instants could break the sequence.
List<ActiveAction> instantsToArchive = getInstantsToArchive().sorted().collect(Collectors.toList());
if (!instantsToArchive.isEmpty()) {
LOG.info("Archiving instants " + instantsToArchive);
Consumer<Exception> exceptionHandler = e -> {
if (this.config.isFailOnTimelineArchivingEnabled()) {
throw new HoodieException(e);
}
};
this.timelineWriter.write(instantsToArchive, Option.of(action -> deleteAnyLeftOverMarkers(context, action)), Option.of(exceptionHandler));
LOG.info("Deleting archived instants " + instantsToArchive);
deleteArchivedInstants(instantsToArchive, context);
// triggers compaction and cleaning only after archiving action
this.timelineWriter.compactAndClean(context);
} else {
LOG.info("No Instants to archive");
}
return instantsToArchive.size();
} finally {
if (acquireLock) {
txnManager.endTransaction(Option.empty());
}
}
} | 3.68 |
hbase_LogLevel_connect | /**
* Connect to the URL. Supports HTTP and supports SPNEGO authentication. It falls back to simple
* authentication if it fails to initiate SPNEGO.
* @param url the URL address of the daemon servlet
* @return a connected connection
* @throws Exception if it can not establish a connection.
*/
private HttpURLConnection connect(URL url) throws Exception {
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
AuthenticatedURL aUrl;
SSLFactory clientSslFactory;
HttpURLConnection connection;
// If https is chosen, configures SSL client.
if (PROTOCOL_HTTPS.equals(url.getProtocol())) {
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, this.getConf());
clientSslFactory.init();
SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
aUrl = new AuthenticatedURL(new KerberosAuthenticator(), clientSslFactory);
connection = aUrl.openConnection(url, token);
HttpsURLConnection httpsConn = (HttpsURLConnection) connection;
httpsConn.setSSLSocketFactory(sslSocketF);
} else {
aUrl = new AuthenticatedURL(new KerberosAuthenticator());
connection = aUrl.openConnection(url, token);
}
connection.connect();
return connection;
} | 3.68 |
framework_CalendarWeekDropHandler_isLocationValid | /**
* Checks if the location is a valid drop location
*
* @param elementOver
* The element to check
* @return
*/
private boolean isLocationValid(Element elementOver) {
Element weekGridElement = calendarConnector.getWidget().getWeekGrid()
.getElement();
Element timeBarElement = calendarConnector.getWidget().getWeekGrid()
.getTimeBar().getElement();
Element todayBarElement = null;
if (calendarConnector.getWidget().getWeekGrid().hasToday()) {
todayBarElement = calendarConnector.getWidget().getWeekGrid()
.getDateCellOfToday().getTodaybarElement();
}
// drops are not allowed in:
// - weekday header
// - allday event list
// - todaybar
// - timebar
// - events
return DOM.isOrHasChild(weekGridElement, elementOver)
&& !DOM.isOrHasChild(timeBarElement, elementOver)
&& todayBarElement != elementOver
&& (WidgetUtil.findWidget(elementOver,
DateCellDayEvent.class) == null);
} | 3.68 |
pulsar_NamespaceName_getTopicName | /**
* Compose the topic name from namespace + topic.
*
* @param domain
* @param topic
* @return
*/
String getTopicName(TopicDomain domain, String topic) {
if (domain == null) {
throw new IllegalArgumentException("invalid null domain");
}
NamedEntity.checkName(topic);
return String.format("%s://%s/%s", domain.toString(), namespace, topic);
} | 3.68 |
framework_AccordionConnector_onConnectorHierarchyChange | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ConnectorHierarchyChangeEvent.
* ConnectorHierarchyChangeHandler
* #onConnectorHierarchyChange(com.vaadin.client
* .ConnectorHierarchyChangeEvent)
*/
@Override
public void onConnectorHierarchyChange(
ConnectorHierarchyChangeEvent connectorHierarchyChangeEvent) {
} | 3.68 |
hbase_MasterObserver_preCreateTable | /**
* Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called
* as part of create table RPC call.
* @param ctx the environment to interact with the framework and master
* @param desc the TableDescriptor for the table
* @param regions the initial regions created for the table
*/
default void preCreateTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor desc, RegionInfo[] regions) throws IOException {
} | 3.68 |
pulsar_ConsumerBase_trackUnAckedMsgIfNoListener | // if listener is not null, we will track unAcked msg in callMessageListener
protected void trackUnAckedMsgIfNoListener(MessageId messageId, int redeliveryCount) {
if (listener == null) {
unAckedMessageTracker.add(messageId, redeliveryCount);
}
} | 3.68 |
hudi_BaseHoodieWriteClient_scheduleClustering | /**
* Schedules a new clustering instant.
* @param extraMetadata Extra Metadata to be stored
*/
public Option<String> scheduleClustering(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleClusteringAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
} | 3.68 |
framework_Label_setPropertyDataSource | /**
* Sets the property as data-source for viewing. Since Vaadin 7.2 a
* ValueChangeEvent is fired if the new value is different from previous.
*
* @param newDataSource
* the new data source Property
* @see Property.Viewer#setPropertyDataSource(Property)
*/
@Override
public void setPropertyDataSource(Property newDataSource) {
// Stops listening the old data source changes
if (dataSource != null && Property.ValueChangeNotifier.class
.isAssignableFrom(dataSource.getClass())) {
((Property.ValueChangeNotifier) dataSource).removeListener(this);
}
// Check if the current converter is compatible.
if (newDataSource != null
&& !ConverterUtil.canConverterPossiblyHandle(getConverter(),
getType(), newDataSource.getType())) {
// There is no converter set or there is no way the current
// converter can be compatible.
Converter<String, ?> c = ConverterUtil.getConverter(String.class,
newDataSource.getType(), getSession());
setConverter(c);
}
dataSource = newDataSource;
if (dataSource != null) {
// Update the value from the data source. If data source was set to
// null, retain the old value
updateValueFromDataSource();
}
// Listens the new data source if possible
if (dataSource != null && Property.ValueChangeNotifier.class
.isAssignableFrom(dataSource.getClass())) {
((Property.ValueChangeNotifier) dataSource).addListener(this);
}
markAsDirty();
} | 3.68 |
hadoop_LightWeightLinkedSet_clear | /**
* Clear the set. Resize it to the original capacity.
*/
@Override
public void clear() {
super.clear();
this.head = null;
this.tail = null;
this.resetBookmark();
} | 3.68 |
framework_AbstractContainer_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addItemSetChangeListener(Container.ItemSetChangeListener)}
*/
@Deprecated
protected void removeListener(Container.ItemSetChangeListener listener) {
removeItemSetChangeListener(listener);
} | 3.68 |
pulsar_ManagedLedgerConfig_setEnsembleSize | /**
* @param ensembleSize
* the ensembleSize to set
*/
public ManagedLedgerConfig setEnsembleSize(int ensembleSize) {
this.ensembleSize = ensembleSize;
return this;
} | 3.68 |
flink_StreamExecutionEnvironment_getParallelism | /**
* Gets the parallelism with which operation are executed by default. Operations can
* individually override this value to use a specific parallelism.
*
* @return The parallelism used by operations, unless they override that value.
*/
public int getParallelism() {
return config.getParallelism();
} | 3.68 |
open-banking-gateway_FintechConsentAccessImpl_getAvailableConsentsForCurrentPsu | /**
* Returns available consents to the PSU (not FinTech).
*/
@Override
public Collection<ProtocolFacingConsent> getAvailableConsentsForCurrentPsu() {
return Collections.emptyList();
} | 3.68 |
flink_KvStateInfo_getStateValueSerializer | /** @return The serializer for the values kept in the state. */
public TypeSerializer<V> getStateValueSerializer() {
return stateValueSerializer;
} | 3.68 |
AreaShop_GeneralRegion_getName | /**
* Get the name of the region.
* @return The region name
*/
@Override
public String getName() {
return config.getString("general.name");
} | 3.68 |
flink_SavepointReader_readKeyedState | /**
* Read keyed state from an operator in a {@code Savepoint}.
*
* @param identifier The identifier of the operator.
* @param function The {@link KeyedStateReaderFunction} that is called for each key in state.
* @param keyTypeInfo The type information of the key in state.
* @param outTypeInfo The type information of the output of the transform reader function.
* @param <K> The type of the key in state.
* @param <OUT> The output type of the transform function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException If the savepoint does not contain operator state with the given uid.
*/
public <K, OUT> DataStream<OUT> readKeyedState(
OperatorIdentifier identifier,
KeyedStateReaderFunction<K, OUT> function,
TypeInformation<K> keyTypeInfo,
TypeInformation<OUT> outTypeInfo)
throws IOException {
OperatorState operatorState = metadata.getOperatorState(identifier);
KeyedStateInputFormat<K, VoidNamespace, OUT> inputFormat =
new KeyedStateInputFormat<>(
operatorState,
stateBackend,
MutableConfig.of(env.getConfiguration()),
new KeyedStateReaderOperator<>(function, keyTypeInfo));
return SourceBuilder.fromFormat(env, inputFormat, outTypeInfo);
} | 3.68 |
hbase_RegistryEndpointsRefresher_create | /**
* Create a {@link RegistryEndpointsRefresher}. If the interval secs configured via
* {@code intervalSecsConfigName} is less than zero, will return null here, which means disable
* refreshing of endpoints.
*/
static RegistryEndpointsRefresher create(Configuration conf, String initialDelaySecsConfigName,
String intervalSecsConfigName, String minIntervalSecsConfigName, Refresher refresher) {
long periodicRefreshMs = TimeUnit.SECONDS
.toMillis(conf.getLong(intervalSecsConfigName, PERIODIC_REFRESH_INTERVAL_SECS_DEFAULT));
if (periodicRefreshMs <= 0) {
return null;
}
long initialDelayMs = Math.max(1,
TimeUnit.SECONDS.toMillis(conf.getLong(initialDelaySecsConfigName, periodicRefreshMs / 10)));
long minTimeBetweenRefreshesMs = TimeUnit.SECONDS
.toMillis(conf.getLong(minIntervalSecsConfigName, MIN_SECS_BETWEEN_REFRESHES_DEFAULT));
Preconditions.checkArgument(minTimeBetweenRefreshesMs < periodicRefreshMs);
return new RegistryEndpointsRefresher(initialDelayMs, periodicRefreshMs,
minTimeBetweenRefreshesMs, refresher);
} | 3.68 |
hadoop_ProtocolProxy_isMethodSupported | /**
* Check if a method is supported by the server or not.
*
* @param methodName a method's name in String format
* @param parameterTypes a method's parameter types
* @return true if the method is supported by the server
* @throws IOException raised on errors performing I/O.
*/
public synchronized boolean isMethodSupported(String methodName,
Class<?>... parameterTypes)
throws IOException {
if (!supportServerMethodCheck) {
return true;
}
Method method;
try {
method = protocol.getDeclaredMethod(methodName, parameterTypes);
} catch (SecurityException e) {
throw new IOException(e);
} catch (NoSuchMethodException e) {
throw new IOException(e);
}
if (!serverMethodsFetched) {
fetchServerMethods(method);
}
if (serverMethods == null) { // client & server have the same protocol
return true;
}
return serverMethods.contains(
Integer.valueOf(ProtocolSignature.getFingerprint(method)));
} | 3.68 |
streampipes_SpOpcUaClient_createListSubscription | /***
* Register subscriptions for given OPC UA nodes
* @param nodes List of {@link org.eclipse.milo.opcua.stack.core.types.builtin.NodeId}
* @param opcUaAdapter current instance of {@link OpcUaAdapter}
* @throws Exception
*/
public void createListSubscription(List<NodeId> nodes,
OpcUaAdapter opcUaAdapter) throws Exception {
client.getSubscriptionManager().addSubscriptionListener(new UaSubscriptionManager.SubscriptionListener() {
@Override
public void onSubscriptionTransferFailed(UaSubscription subscription, StatusCode statusCode) {
LOG.warn("Transfer for subscriptionId={} failed: {}", subscription.getSubscriptionId(), statusCode);
try {
initSubscription(nodes, opcUaAdapter);
} catch (Exception e) {
LOG.error("Re-creating the subscription failed", e);
}
}
});
initSubscription(nodes, opcUaAdapter);
} | 3.68 |
hbase_QualifierFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof QualifierFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.68 |
morf_AbstractSqlDialectTest_testDropViewStatements | /**
* Tests SQL for dropping a view.
*/
@SuppressWarnings("unchecked")
@Test
public void testDropViewStatements() {
compareStatements(
expectedDropViewStatements(),
testDialect.dropStatements(testView));
} | 3.68 |
hadoop_SinglePendingCommit_getTaskId | /** @return Task ID, if known. */
public String getTaskId() {
return taskId;
} | 3.68 |
hadoop_HAState_getLastHATransitionTime | /**
* Gets the most recent HA transition time in milliseconds from the epoch.
*
* @return the most recent HA transition time in milliseconds from the epoch.
*/
public long getLastHATransitionTime() {
return lastHATransitionTime;
} | 3.68 |
flink_StreamProjection_projectTuple12 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>
SingleOutputStreamOperator<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>
projectTuple12() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType =
new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(
fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<IN, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
framework_AbstractProperty_isReadOnly | /**
* {@inheritDoc}
*
* Override for additional restrictions on what is considered a read-only
* property.
*/
@Override
public boolean isReadOnly() {
return readOnly;
} | 3.68 |
flink_TaskManagerLocation_getHostName | /**
* Returns the textual representation of the TaskManager's IP address as host name.
*
* @return The textual representation of the TaskManager's IP address.
*/
@Override
public String getHostName() {
return inetAddress.getHostAddress();
} | 3.68 |
flink_AbstractOrcColumnVector_createFlinkVectorFromConstant | /** Create flink vector by hive vector from constant. */
public static org.apache.flink.table.data.columnar.vector.ColumnVector
createFlinkVectorFromConstant(LogicalType type, Object value, int batchSize) {
return createFlinkVector(createHiveVectorFromConstant(type, value, batchSize), type);
} | 3.68 |
hbase_Append_isReturnResults | /** Returns current setting for returnResults */
// This method makes public the superclasses's protected method.
@Override
public boolean isReturnResults() {
return super.isReturnResults();
} | 3.68 |
hbase_WALEdit_isReplicationMarkerEdit | /**
* Checks whether this edit is a replication marker edit.
* @param edit edit
* @return true if the cell within an edit has column = METAFAMILY and qualifier =
* REPLICATION_MARKER, false otherwise
*/
public static boolean isReplicationMarkerEdit(WALEdit edit) {
// Check just the first cell from the edit. ReplicationMarker edit will have only 1 cell.
return edit.getCells().size() == 1
&& CellUtil.matchingColumn(edit.getCells().get(0), METAFAMILY, REPLICATION_MARKER);
} | 3.68 |
hadoop_TrashProcedure_moveToTrash | /**
* Delete source path to trash.
*/
void moveToTrash() throws IOException {
Path src = context.getSrc();
if (srcFs.exists(src)) {
TrashOption trashOption = context.getTrashOpt();
switch (trashOption) {
case TRASH:
conf.setFloat(FS_TRASH_INTERVAL_KEY, 60);
if (!Trash.moveToAppropriateTrash(srcFs, src, conf)) {
throw new IOException("Failed move " + src + " to trash.");
}
break;
case DELETE:
if (!srcFs.delete(src, true)) {
throw new IOException("Failed delete " + src);
}
LOG.info("{} is deleted.", src);
break;
case SKIP:
break;
default:
throw new IOException("Unexpected trash option=" + trashOption);
}
}
} | 3.68 |
morf_JdbcUrlElements_withHost | /**
* Sets the host. Defaults to null (no host specified).
*
* @param host The host name.
* @return this
*/
public Builder withHost(String host) {
this.host = host;
return this;
} | 3.68 |
framework_LoginForm_setLoginButtonCaption | /**
* Sets the caption of the login button. Note that the caption can only be
* set with this method before the login form has been initialized
* (attached).
* <p>
* As an alternative to calling this method, the method
* {@link #createLoginButton()} can be overridden.
*
* @param loginButtonCaption
* new caption
*/
public void setLoginButtonCaption(String loginButtonCaption) {
this.loginButtonCaption = loginButtonCaption;
} | 3.68 |
framework_PropertysetItem_addItemProperty | /**
* Tries to add a new Property into the Item.
*
* @param id
* the ID of the new Property.
* @param property
* the Property to be added and associated with the id.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
*/
@Override
public boolean addItemProperty(Object id, Property property) {
// Null ids are not accepted
if (id == null) {
throw new NullPointerException("Item property id can not be null");
}
// Can't add a property twice
if (map.containsKey(id)) {
return false;
}
// Put the property to map
map.put(id, property);
list.add(id);
// Send event
fireItemPropertySetChange();
return true;
} | 3.68 |
flink_HadoopInputFormatCommonBase_getCredentialsFromUGI | /**
* @param ugi The user information
* @return new credentials object from the user information.
*/
public static Credentials getCredentialsFromUGI(UserGroupInformation ugi) {
return ugi.getCredentials();
} | 3.68 |
flink_ConnectedStreams_getType2 | /**
* Gets the type of the second input.
*
* @return The type of the second input
*/
public TypeInformation<IN2> getType2() {
return inputStream2.getType();
} | 3.68 |
hudi_CloudObjectsSelector_createAmazonSqsClient | /**
* Amazon SQS Client Builder.
*/
public SqsClient createAmazonSqsClient() {
return SqsClient.builder().region(Region.of(regionName)).build();
} | 3.68 |
hadoop_ImageVisitor_visit | // Convenience methods to automatically convert numeric value types to strings
void visit(ImageElement element, int value) throws IOException {
visit(element, Integer.toString(value));
} | 3.68 |
flink_FactoryUtil_getFormatPrefix | /** Returns the required option prefix for options of the given format. */
public static String getFormatPrefix(
ConfigOption<String> formatOption, String formatIdentifier) {
final String formatOptionKey = formatOption.key();
if (formatOptionKey.equals(FORMAT.key())) {
return formatIdentifier + ".";
} else if (formatOptionKey.endsWith(FORMAT_SUFFIX)) {
// extract the key prefix, e.g. extract 'key' from 'key.format'
String keyPrefix =
formatOptionKey.substring(0, formatOptionKey.length() - FORMAT_SUFFIX.length());
return keyPrefix + "." + formatIdentifier + ".";
} else {
throw new ValidationException(
"Format identifier key should be 'format' or suffix with '.format', "
+ "don't support format identifier key '"
+ formatOptionKey
+ "'.");
}
} | 3.68 |
querydsl_SQLExpressions_groupConcat | /**
* Get a group_concat(expr, separator) expression
*
* @param expr expression to be aggregated
* @param separator separator string
* @return group_concat(expr, separator)
*/
public static StringExpression groupConcat(Expression<String> expr, String separator) {
return Expressions.stringOperation(SQLOps.GROUP_CONCAT2, expr, Expressions.constant(separator));
} | 3.68 |
flink_MemorySegment_copyToUnsafe | /**
* Bulk copy method. Copies {@code numBytes} bytes to target unsafe object and pointer. NOTE:
* This is an unsafe method, no check here, please be careful.
*
* @param offset The position where the bytes are started to be read from in this memory
* segment.
* @param target The unsafe memory to copy the bytes to.
* @param targetPointer The position in the target unsafe memory to copy the chunk to.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If the source segment does not contain the given number of
* bytes (starting from offset).
*/
public void copyToUnsafe(int offset, Object target, int targetPointer, int numBytes) {
final long thisPointer = this.address + offset;
if (thisPointer + numBytes > addressLimit) {
throw new IndexOutOfBoundsException(
String.format(
"offset=%d, numBytes=%d, address=%d", offset, numBytes, this.address));
}
UNSAFE.copyMemory(this.heapMemory, thisPointer, target, targetPointer, numBytes);
} | 3.68 |
hbase_RSGroupInfoManagerImpl_getOnlineServers | /** Returns Set of online Servers named for their hostname and port (not ServerName). */
private Set<Address> getOnlineServers() {
return masterServices.getServerManager().getOnlineServers().keySet().stream()
.map(ServerName::getAddress).collect(Collectors.toSet());
} | 3.68 |
framework_WeekGrid_getDateCellWidths | /**
* @return an int-array containing the widths of the cells (days)
*/
public int[] getDateCellWidths() {
return cellWidths;
} | 3.68 |
hudi_CloudObjectsSelector_createListPartitions | /**
* Create partitions of list using specific batch size. we can't use third party API for this
* functionality, due to https://github.com/apache/hudi/blob/master/style/checkstyle.xml#L270
*/
protected List<List<Message>> createListPartitions(List<Message> singleList, int eachBatchSize) {
List<List<Message>> listPartitions = new ArrayList<>();
if (singleList.size() == 0 || eachBatchSize < 1) {
return listPartitions;
}
for (int start = 0; start < singleList.size(); start += eachBatchSize) {
int end = Math.min(start + eachBatchSize, singleList.size());
if (start > end) {
throw new IndexOutOfBoundsException(
"Index " + start + " is out of the list range <0," + (singleList.size() - 1) + ">");
}
listPartitions.add(new ArrayList<>(singleList.subList(start, end)));
}
return listPartitions;
} | 3.68 |
hadoop_AbfsConfiguration_get | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value.
* @param key Account-agnostic configuration key
* @return value if one exists, else null
*/
public String get(String key) {
return rawConfig.get(accountConf(key), rawConfig.get(key));
} | 3.68 |
flink_TaskEventDispatcher_publish | /**
* Publishes the event to the registered {@link EventListener} instances.
*
* <p>This method is either called directly from a {@link LocalInputChannel} or the network I/O
* thread on behalf of a {@link RemoteInputChannel}.
*
* @return whether the event was published to a registered event handler (initiated via {@link
* #registerPartition(ResultPartitionID)}) or not
*/
@Override
public boolean publish(ResultPartitionID partitionId, TaskEvent event) {
checkNotNull(partitionId);
checkNotNull(event);
TaskEventHandler taskEventHandler;
synchronized (registeredHandlers) {
taskEventHandler = registeredHandlers.get(partitionId);
}
if (taskEventHandler != null) {
taskEventHandler.publish(event);
return true;
}
return false;
} | 3.68 |
flink_JobEdge_setDownstreamSubtaskStateMapper | /**
* Sets the channel state rescaler used for rescaling persisted data on downstream side of this
* JobEdge.
*
* @param downstreamSubtaskStateMapper The channel state rescaler selector to use.
*/
public void setDownstreamSubtaskStateMapper(SubtaskStateMapper downstreamSubtaskStateMapper) {
this.downstreamSubtaskStateMapper = checkNotNull(downstreamSubtaskStateMapper);
} | 3.68 |
querydsl_ComparableExpression_loeAll | /**
* Create a {@code this <= all right} expression
*
* @param right rhs of the comparison
* @return this <= all right
*/
public BooleanExpression loeAll(SubQueryExpression<? extends T> right) {
return loe(ExpressionUtils.all(right));
} | 3.68 |
AreaShop_AreaShop_getBukkitHandler | /**
* Get the BukkitHandler, for sign interactions.
* @return BukkitHandler
*/
public BukkitInterface getBukkitHandler() {
return this.bukkitInterface;
} | 3.68 |
hudi_HoodieBaseFile_getFileIdAndCommitTimeFromFileName | /**
* Parses the file ID and commit time from the fileName.
* @param fileName Name of the file
* @return String array of size 2 with fileId as the first and commitTime as the second element.
*/
private static String[] getFileIdAndCommitTimeFromFileName(String fileName) {
return ExternalFilePathUtil.isExternallyCreatedFile(fileName) ? handleExternallyGeneratedFile(fileName) : handleHudiGeneratedFile(fileName);
} | 3.68 |
hbase_Call_setResponse | /**
* Set the return value when there is no error. Notify the caller the call is done.
* @param response return value of the call.
* @param cells Can be null
*/
public void setResponse(Message response, final CellScanner cells) {
synchronized (this) {
if (done) {
return;
}
this.done = true;
this.response = response;
this.cells = cells;
}
callComplete();
} | 3.68 |
flink_CanalJsonFormatFactory_validateEncodingFormatOptions | /** Validator for canal encoding format. */
private static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);
} | 3.68 |
morf_XmlDataSetProducer_getSchema | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#getSchema()
*/
@Override
public Schema getSchema() {
return new PullProcessorMetaDataProvider(xmlStreamProvider);
} | 3.68 |
querydsl_AbstractMySQLQuery_ignoreIndex | /**
* The alternative syntax IGNORE INDEX (index_list) can be used to tell MySQL to not use some
* particular index or indexes.
*
* @param indexes index names
* @return the current object
*/
public C ignoreIndex(String... indexes) {
return addJoinFlag(" ignore index (" + String.join(", ", indexes) + ")", JoinFlag.Position.END);
} | 3.68 |
hbase_BloomFilterUtil_computeMaxKeys | /**
* The maximum number of keys we can put into a Bloom filter of a certain size to get the given
* error rate, with the given number of hash functions.
* @return the maximum number of keys that can be inserted in a Bloom filter to maintain the
* target error rate, if the number of hash functions is provided.
*/
public static long computeMaxKeys(long bitSize, double errorRate, int hashCount) {
return (long) (-bitSize * 1.0 / hashCount
* Math.log(1 - Math.exp(Math.log(errorRate) / hashCount)));
} | 3.68 |
querydsl_GenericExporter_export | /**
* Export the given classes
*
* @param classes classes to be scanned
*/
public void export(Class<?>...classes) {
for (Class<?> cl : classes) {
handleClass(cl);
}
innerExport();
} | 3.68 |
hbase_RegionState_isReadyToOffline | /**
* Check if a region state can transition to offline
*/
public boolean isReadyToOffline() {
return isMerged() || isSplit() || isOffline() || isSplittingNew() || isMergingNew();
} | 3.68 |
framework_ServerRpcHandler_handleRpc | /**
* Reads JSON containing zero or more serialized RPC calls (including legacy
* variable changes) and executes the calls.
*
* @param ui
* The {@link UI} receiving the calls. Cannot be null.
* @param reader
* The {@link Reader} used to read the JSON.
* @param request
* The {@link VaadinRequest} to handle.
* @throws IOException
* If reading the message fails.
* @throws InvalidUIDLSecurityKeyException
* If the received security key does not match the one stored in
* the session.
*/
public void handleRpc(UI ui, Reader reader, VaadinRequest request)
throws IOException, InvalidUIDLSecurityKeyException {
ui.getSession().setLastRequestTimestamp(System.currentTimeMillis());
String changeMessage = getMessage(reader);
if (changeMessage == null || changeMessage.isEmpty()) {
// The client sometimes sends empty messages, this is probably a bug
return;
}
RpcRequest rpcRequest = new RpcRequest(changeMessage, request);
// Security: double cookie submission pattern unless disabled by
// property
if (!VaadinService.isCsrfTokenValid(ui.getSession(),
rpcRequest.getCsrfToken())) {
throw new InvalidUIDLSecurityKeyException("");
}
checkWidgetsetVersion(rpcRequest.getWidgetsetVersion());
int expectedId = ui.getLastProcessedClientToServerId() + 1;
if (rpcRequest.getClientToServerId() != -1
&& rpcRequest.getClientToServerId() != expectedId) {
// Invalid message id, skip RPC processing but force a full
// re-synchronization of the client as it might have not received
// the previous response (e.g. due to a bad connection)
// Must resync also for duplicate messages because the server might
// have generated a response for the first message but the response
// did not reach the client. When the client re-sends the message,
// it would only get an empty response (because the dirty flags have
// been cleared on the server) and would be out of sync
ui.getSession().getCommunicationManager().repaintAll(ui);
if (rpcRequest.getClientToServerId() < expectedId) {
// Just a duplicate message due to a bad connection or similar
// It has already been handled by the server so it is safe to
// ignore
getLogger()
.fine("Ignoring old message from the client. Expected: "
+ expectedId + ", got: "
+ rpcRequest.getClientToServerId());
} else {
getLogger().warning(
"Unexpected message id from the client. Expected: "
+ expectedId + ", got: "
+ rpcRequest.getClientToServerId());
}
} else {
// Message id ok, process RPCs
ui.setLastProcessedClientToServerId(expectedId);
handleInvocations(ui, rpcRequest.getSyncId(),
rpcRequest.getRpcInvocationsData());
}
if (rpcRequest.isResynchronize()) {
ui.getSession().getCommunicationManager().repaintAll(ui);
}
} | 3.68 |
framework_LayoutManager_getPaddingTop | /**
* Gets the top padding of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured top padding of the element in pixels.
*/
public int getPaddingTop(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getPaddingTop();
} | 3.68 |
flink_CastRuleProvider_resolve | /**
* Resolve a {@link CastRule} for the provided input type and target type. Returns {@code null}
* if no rule can be resolved.
*/
public static @Nullable CastRule<?, ?> resolve(LogicalType inputType, LogicalType targetType) {
return INSTANCE.internalResolve(inputType, targetType);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_getNode | /**
* Find the node containing the given key.
*
* @param keySegment memory segment storing the key.
* @param keyOffset offset of the key.
* @param keyLen length of the key.
* @return the state. Null will be returned if key does not exist.
*/
@VisibleForTesting
@Nullable
S getNode(MemorySegment keySegment, int keyOffset, int keyLen) {
SkipListIterateAndProcessResult result =
iterateAndProcess(
keySegment,
keyOffset,
keyLen,
(pointers, isRemoved) -> {
long currentNode = pointers.currentNode;
return isRemoved ? null : getNodeStateHelper(currentNode);
});
return result.isKeyFound ? result.state : null;
} | 3.68 |
flink_DataSetUtils_zipWithUniqueId | /**
* Method that assigns a unique {@link Long} value to all elements in the input data set as
* described below.
*
* <ul>
* <li>a map function is applied to the input data set
* <li>each map task holds a counter c which is increased for each record
* <li>c is shifted by n bits where n = log2(number of parallel tasks)
* <li>to create a unique ID among all tasks, the task id is added to the counter
* <li>for each record, the resulting counter is collected
* </ul>
*
* @param input the input data set
* @return a data set of tuple 2 consisting of ids and initial values.
*/
public static <T> DataSet<Tuple2<Long, T>> zipWithUniqueId(DataSet<T> input) {
return input.mapPartition(
new RichMapPartitionFunction<T, Tuple2<Long, T>>() {
long maxBitSize = getBitSize(Long.MAX_VALUE);
long shifter = 0;
long start = 0;
long taskId = 0;
long label = 0;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
shifter = getBitSize(getRuntimeContext().getNumberOfParallelSubtasks() - 1);
taskId = getRuntimeContext().getIndexOfThisSubtask();
}
@Override
public void mapPartition(Iterable<T> values, Collector<Tuple2<Long, T>> out)
throws Exception {
for (T value : values) {
label = (start << shifter) + taskId;
if (getBitSize(start) + shifter < maxBitSize) {
out.collect(new Tuple2<>(label, value));
start++;
} else {
throw new Exception(
"Exceeded Long value range while generating labels");
}
}
}
});
} | 3.68 |
framework_VCalendarPanel_isAcceptedByRangeEnd | /**
* Accepts dates earlier than or equal to rangeStart, depending on the
* resolution. If the resolution is set to DAY, the range will compare on a
* day-basis. If the resolution is set to YEAR, only years are compared. So
* even if the range is set to one millisecond in next year, also next year
* will be included.
*
* @param date
* @param minResolution
* @return
*/
private boolean isAcceptedByRangeEnd(Date date, Resolution minResolution) {
assert (date != null);
// rangeEnd == null means that we accept all values above rangeStart
if (rangeEnd == null) {
return true;
}
Date valueDuplicate = (Date) date.clone();
Date rangeEndDuplicate = (Date) rangeEnd.clone();
if (minResolution == Resolution.YEAR) {
return valueDuplicate.getYear() <= rangeEndDuplicate.getYear();
}
if (minResolution == Resolution.MONTH) {
valueDuplicate = clearDateBelowMonth(valueDuplicate);
rangeEndDuplicate = clearDateBelowMonth(rangeEndDuplicate);
} else {
valueDuplicate = clearDateBelowDay(valueDuplicate);
rangeEndDuplicate = clearDateBelowDay(rangeEndDuplicate);
}
return !rangeEndDuplicate.before(valueDuplicate);
} | 3.68 |
hudi_HoodieConsistentBucketLayout_layoutPartitionerClass | /**
* Consistent hashing will tag all incoming records, so we could go ahead reusing an existing Partitioner
*/
@Override
public Option<String> layoutPartitionerClass() {
return Option.empty();
} | 3.68 |
hadoop_ColumnHeader_getSelector | /**
* Get the selector field for the TH.
* @return Selector.
*/
public String getSelector() {
return this.selector;
} | 3.68 |
zxing_EdifactEncoder_handleEOD | /**
* Handle "end of data" situations
*
* @param context the encoder context
* @param buffer the buffer with the remaining encoded characters
*/
private static void handleEOD(EncoderContext context, CharSequence buffer) {
try {
int count = buffer.length();
if (count == 0) {
return; //Already finished
}
if (count == 1) {
//Only an unlatch at the end
context.updateSymbolInfo();
int available = context.getSymbolInfo().getDataCapacity() - context.getCodewordCount();
int remaining = context.getRemainingCharacters();
// The following two lines are a hack inspired by the 'fix' from https://sourceforge.net/p/barcode4j/svn/221/
if (remaining > available) {
context.updateSymbolInfo(context.getCodewordCount() + 1);
available = context.getSymbolInfo().getDataCapacity() - context.getCodewordCount();
}
if (remaining <= available && available <= 2) {
return; //No unlatch
}
}
if (count > 4) {
throw new IllegalStateException("Count must not exceed 4");
}
int restChars = count - 1;
String encoded = encodeToCodewords(buffer);
boolean endOfSymbolReached = !context.hasMoreCharacters();
boolean restInAscii = endOfSymbolReached && restChars <= 2;
if (restChars <= 2) {
context.updateSymbolInfo(context.getCodewordCount() + restChars);
int available = context.getSymbolInfo().getDataCapacity() - context.getCodewordCount();
if (available >= 3) {
restInAscii = false;
context.updateSymbolInfo(context.getCodewordCount() + encoded.length());
//available = context.symbolInfo.dataCapacity - context.getCodewordCount();
}
}
if (restInAscii) {
context.resetSymbolInfo();
context.pos -= restChars;
} else {
context.writeCodewords(encoded);
}
} finally {
context.signalEncoderChange(HighLevelEncoder.ASCII_ENCODATION);
}
} | 3.68 |
flink_AbstractAggregatingMetricsHandler_getAggregatedMetricValues | /**
* Extracts and aggregates all requested metrics from the given metric stores, and maps the
* result to a JSON string.
*
* @param stores available metrics
* @param requestedMetrics ids of requested metrics
* @param requestedAggregationsFactories requested aggregations
* @return JSON string containing the requested metrics
*/
private AggregatedMetricsResponseBody getAggregatedMetricValues(
Collection<? extends MetricStore.ComponentMetricStore> stores,
List<String> requestedMetrics,
MetricAccumulatorFactory requestedAggregationsFactories) {
Collection<AggregatedMetric> aggregatedMetrics = new ArrayList<>(requestedMetrics.size());
for (String requestedMetric : requestedMetrics) {
final Collection<Double> values = new ArrayList<>(stores.size());
try {
for (MetricStore.ComponentMetricStore store : stores) {
String stringValue = store.metrics.get(requestedMetric);
if (stringValue != null) {
values.add(Double.valueOf(stringValue));
}
}
} catch (NumberFormatException nfe) {
log.warn(
"The metric {} is not numeric and can't be aggregated.",
requestedMetric,
nfe);
// metric is not numeric so we can't perform aggregations => ignore it
continue;
}
if (!values.isEmpty()) {
Iterator<Double> valuesIterator = values.iterator();
MetricAccumulator acc =
requestedAggregationsFactories.get(requestedMetric, valuesIterator.next());
valuesIterator.forEachRemaining(acc::add);
aggregatedMetrics.add(acc.get());
} else {
return new AggregatedMetricsResponseBody(Collections.emptyList());
}
}
return new AggregatedMetricsResponseBody(aggregatedMetrics);
} | 3.68 |
hbase_MasterObserver_preIsRpcThrottleEnabled | /**
* Called before getting if is rpc throttle enabled.
* @param ctx the coprocessor instance's environment
*/
default void preIsRpcThrottleEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
flink_BackgroundTask_runAfter | /**
* Runs the given task after this background task has completed (normally or exceptionally).
*
* @param task task to run after this background task has completed
* @param executor executor to run the task
* @param <V> type of the result
* @return new {@link BackgroundTask} representing the new task to execute
*/
<V> BackgroundTask<V> runAfter(
SupplierWithException<? extends V, ? extends Exception> task, Executor executor) {
return new BackgroundTask<>(terminationFuture, task, executor);
} | 3.68 |
querydsl_Expressions_path | /**
* Create a new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T> SimplePath<T> path(Class<? extends T> type, PathMetadata metadata) {
return simplePath(type, metadata);
} | 3.68 |
hbase_MasterObserver_preDeleteNamespace | /**
* Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a namespace
* @param ctx the environment to interact with the framework and master
* @param namespace the name of the namespace
*/
default void preDeleteNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String namespace) throws IOException {
} | 3.68 |
hbase_RawFloat_decodeFloat | /**
* Read a {@code float} value from the buffer {@code buff}.
*/
public float decodeFloat(byte[] buff, int offset) {
return Bytes.toFloat(buff, offset);
} | 3.68 |
hadoop_ServiceLauncher_launchServiceAndExit | /**
* Launch the service and exit.
*
* <ol>
* <li>Parse the command line.</li>
* <li>Build the service configuration from it.</li>
* <li>Start the service.</li>
* <li>If it is a {@link LaunchableService}: execute it</li>
* <li>Otherwise: wait for it to finish.</li>
* <li>Exit passing the status code to the {@link #exit(int, String)}
* method.</li>
* </ol>
* @param args arguments to the service. {@code arg[0]} is
* assumed to be the service classname.
*/
public void launchServiceAndExit(List<String> args) {
StringBuilder builder = new StringBuilder();
for (String arg : args) {
builder.append('"').append(arg).append("\" ");
}
String argumentString = builder.toString();
if (LOG.isDebugEnabled()) {
LOG.debug(startupShutdownMessage(serviceName, args));
LOG.debug(argumentString);
}
registerFailureHandling();
// set up the configs, using reflection to push in the -site.xml files
loadConfigurationClasses();
Configuration conf = createConfiguration();
for (URL resourceUrl : confResourceUrls) {
conf.addResource(resourceUrl);
}
bindCommandOptions();
ExitUtil.ExitException exitException;
try {
List<String> processedArgs = extractCommandOptions(conf, args);
exitException = launchService(conf, processedArgs, true, true);
} catch (ExitUtil.ExitException e) {
exitException = e;
noteException(exitException);
}
if (exitException.getExitCode() == LauncherExitCodes.EXIT_USAGE) {
// something went wrong. Print the usage and commands
System.err.println(getUsageMessage());
System.err.println("Command: " + argumentString);
}
System.out.flush();
System.err.flush();
exit(exitException);
} | 3.68 |
framework_AbstractOrderedLayout_setMargin | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Layout.MarginHandler#setMargin(MarginInfo)
*/
@Override
public void setMargin(MarginInfo marginInfo) {
getState().marginsBitmask = marginInfo.getBitMask();
} | 3.68 |
hudi_AvroSchemaCompatibility_getCompatibility | /**
* Returns the SchemaCompatibilityType, always non-null.
*
* @return a SchemaCompatibilityType instance, always non-null
*/
public SchemaCompatibilityType getCompatibility() {
return mCompatibilityType;
} | 3.68 |
flink_CheckpointConfig_setForceUnalignedCheckpoints | /**
* Checks whether unaligned checkpoints are forced, despite currently non-checkpointable
* iteration feedback or custom partitioners.
*
* @param forceUnalignedCheckpoints The flag to force unaligned checkpoints.
*/
@PublicEvolving
public void setForceUnalignedCheckpoints(boolean forceUnalignedCheckpoints) {
configuration.set(ExecutionCheckpointingOptions.FORCE_UNALIGNED, forceUnalignedCheckpoints);
} | 3.68 |
hudi_KafkaOffsetGen_isValidTimestampCheckpointType | /**
* Check if the checkpoint is a timestamp.
* @param lastCheckpointStr
* @return
*/
private Boolean isValidTimestampCheckpointType(Option<String> lastCheckpointStr) {
if (!lastCheckpointStr.isPresent()) {
return false;
}
Pattern pattern = Pattern.compile("[-+]?[0-9]+(\\.[0-9]+)?");
Matcher isNum = pattern.matcher(lastCheckpointStr.get());
return isNum.matches() && (lastCheckpointStr.get().length() == 13 || lastCheckpointStr.get().length() == 10);
} | 3.68 |
hmily_HmilyRepositoryFacade_createHmilyParticipant | /**
* Create hmily participant.
*
* @param hmilyParticipant the hmily participant
*/
public void createHmilyParticipant(final HmilyParticipant hmilyParticipant) {
checkRows(hmilyRepository.createHmilyParticipant(hmilyParticipant));
} | 3.68 |
flink_FactoryUtil_checkFormatIdentifierMatchesWithEnrichingOptions | /**
* This function assumes that the format config is used only and only if the original
* configuration contains the format config option. It will fail if there is a mismatch of
* the identifier between the format in the plan table map and the one in enriching table
* map.
*/
private void checkFormatIdentifierMatchesWithEnrichingOptions(
ConfigOption<String> formatOption, String identifierFromPlan) {
Optional<String> identifierFromEnrichingOptions =
enrichingOptions.getOptional(formatOption);
if (!identifierFromEnrichingOptions.isPresent()) {
return;
}
if (identifierFromPlan == null) {
throw new ValidationException(
String.format(
"The persisted plan has no format option '%s' specified, while the catalog table has it with value '%s'. "
+ "This is invalid, as either only the persisted plan table defines the format, "
+ "or both the persisted plan table and the catalog table defines the same format.",
formatOption, identifierFromEnrichingOptions.get()));
}
if (!Objects.equals(identifierFromPlan, identifierFromEnrichingOptions.get())) {
throw new ValidationException(
String.format(
"Both persisted plan table and catalog table define the format option '%s', "
+ "but they mismatch: '%s' != '%s'.",
formatOption,
identifierFromPlan,
identifierFromEnrichingOptions.get()));
}
} | 3.68 |
rocketmq-connect_FilterTransform_start | /**
* Start the component
*
* @param config component context
*/
@Override
public void start(KeyValue config) {
this.keyValue = config;
log.info("transform config {}", this.keyValue);
} | 3.68 |
hbase_HRegionServer_getCopyOfOnlineRegionsSortedByOnHeapSize | /**
* @return A new Map of online regions sorted by region heap size with the first entry being the
* biggest.
*/
SortedMap<Long, Collection<HRegion>> getCopyOfOnlineRegionsSortedByOnHeapSize() {
// we'll sort the regions in reverse
SortedMap<Long, Collection<HRegion>> sortedRegions = new TreeMap<>(Comparator.reverseOrder());
// Copy over all regions. Regions are sorted by size with biggest first.
for (HRegion region : this.onlineRegions.values()) {
addRegion(sortedRegions, region, region.getMemStoreHeapSize());
}
return sortedRegions;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.