name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
rocketmq-connect_JdbcSourceTask_buildAndAddQuerier_rdh | /**
* build and add querier
*
* @param loadMode
* @param querySuffix
* @param incrementingColumn
* @param timestampColumns
* @param timestampDelayInterval
* @param timeZone
* @param tableOrQuery
* @param offset
*/private void buildAndAddQuerier(TableLoadMode loadMode, String querySuffix, String incrementingColumn, List<String> timestampColumns, Long timestampDelayInterval, TimeZone timeZone, String tableOrQuery, Map<String, Object> offset) {
String topicPrefix = config.getTopicPrefix();
QueryMode queryMode = (!StringUtils.isEmpty(config.getQuery())) ? QueryMode.QUERY : QueryMode.TABLE;
Querier querier;
switch (loadMode) {
case MODE_BULK :
querier = new BulkQuerier(dialect, getContext(querySuffix, tableOrQuery, topicPrefix, queryMode));
tableQueue.add(querier);
break;
case MODE_INCREMENTING :
querier = new TimestampIncrementingQuerier(dialect, this.getIncrementContext(querySuffix, tableOrQuery, topicPrefix, queryMode, null, incrementingColumn, offset, timestampDelayInterval, timeZone));
tableQueue.add(querier);
break;
case MODE_TIMESTAMP :querier = new TimestampIncrementingQuerier(dialect, this.getIncrementContext(querySuffix, tableOrQuery, topicPrefix, queryMode, timestampColumns, null, offset, timestampDelayInterval, timeZone));
tableQueue.add(querier);
break;
case MODE_TIMESTAMP_INCREMENTING :
querier = new TimestampIncrementingQuerier(dialect, this.getIncrementContext(querySuffix, tableOrQuery, topicPrefix, queryMode, timestampColumns, incrementingColumn, offset, timestampDelayInterval, timeZone));
tableQueue.add(querier);
break;
}
} | 3.26 |
rocketmq-connect_JdbcSourceTask_validate_rdh | /**
* Should invoke before start the connector.
*
* @param config
* @return error message
*/
@Override
public void validate(KeyValue config) {
} | 3.26 |
rocketmq-connect_JdbcSourceTask_sleepIfNeed_rdh | /**
* Sleep if need
*
* @param querier
* @return */
private boolean sleepIfNeed(Querier querier) {
if (!querier.querying()) {
final long nextUpdate = querier.getLastUpdate()
+ config.getPollIntervalMs();
final long now = System.currentTimeMillis();
final long sleepMs = Math.min(nextUpdate - now, 100);
if (sleepMs > 0) {
log.trace("Waiting {} ms to poll {} next", nextUpdate - now, querier);
try {
Thread.sleep(sleepMs);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return true;
}
}
return false;
} | 3.26 |
rocketmq-connect_JdbcSourceTask_start_rdh | /**
* start jdbc task
*/
@Override
public void start(KeyValue props) {
// init config
config = new JdbcSourceTaskConfig(props);
this.dialect = DatabaseDialectLoader.getDatabaseDialect(config);
cachedConnectionProvider = connectionProvider(config.getAttempts(), config.getBackoffMs());
log.info("Using JDBC dialect {}", dialect.name());
// compute table offset
Map<String, Map<String, Object>> offsetValues = SourceOffsetCompute.initOffset(config, sourceTaskContext, dialect, cachedConnectionProvider);
for (String tableOrQuery : offsetValues.keySet()) {
this.buildAndAddQuerier(TableLoadMode.findTableLoadModeByName(this.config.getMode()), this.config.getQuerySuffix(), this.config.getIncrementingColumnName(), this.config.getTimestampColumnNames(), this.config.getTimestampDelayIntervalMs(), this.config.getTimeZone(), tableOrQuery, offsetValues.get(tableOrQuery));}
running.set(true);
log.info("Started JDBC source task");
} | 3.26 |
rocketmq-connect_JdbcSourceTask_getIncrementContext_rdh | // Increment context
private IncrementContext getIncrementContext(String querySuffix, String tableOrQuery, String topicPrefix, QueryMode queryMode, List<String> timestampColumnNames, String incrementingColumnName, Map<String, Object> offsetMap, Long timestampDelay, TimeZone timeZone) {IncrementContext context = new IncrementContext(queryMode, queryMode == QueryMode.TABLE ? dialect.parseTableNameToTableId(tableOrQuery) : null, queryMode == QueryMode.QUERY ? tableOrQuery : null, topicPrefix, this.config.getOffsetSuffix(), querySuffix, config.getBatchMaxRows(), timestampColumnNames != null ? timestampColumnNames : Collections.emptyList(), incrementingColumnName, offsetMap, timestampDelay, timeZone);
return context;
} | 3.26 |
rocketmq-connect_TaskClassSetter_setTaskClass_rdh | /**
* set connector class
*
* @param config
*/
default void setTaskClass(KeyValue config) {config.put(TaskConfig.TASK_CLASS_CONFIG, getTaskClass());
} | 3.26 |
rocketmq-connect_ConnectorConfig_originalConfig_rdh | /**
* original config
*
* @return */
public Map<String, String> originalConfig() {
return config.getProperties();
} | 3.26 |
rocketmq-connect_JsonSchemaUtils_validate_rdh | /**
* validate object
*
* @param schema
* @param value
* @throws JsonProcessingException
* @throws ValidationException
*/
public static void validate(Schema schema, Object value) throws JsonProcessingException, ValidationException {
Object primitiveValue = NONE_MARKER;
if (isPrimitive(value)) {
primitiveValue = value;
} else if (value instanceof BinaryNode) {
primitiveValue = ((BinaryNode) (value)).asText();
} else if (value instanceof BooleanNode) {
primitiveValue = ((BooleanNode)
(value)).asBoolean();
} else if (value instanceof NullNode) {
primitiveValue = null;
} else if (value instanceof NumericNode) {
primitiveValue = ((NumericNode) (value)).numberValue();
} else if (value instanceof TextNode) {
primitiveValue = ((TextNode) (value)).asText();
}
if (primitiveValue != NONE_MARKER) {
schema.validate(primitiveValue);
} else {
Object jsonObject;
if (value instanceof ArrayNode) {
jsonObject
= OBJECT_MAPPER.treeToValue(((ArrayNode) (value)), JSONArray.class);
} else if (value instanceof JsonNode) {
jsonObject
= new JSONObject(OBJECT_MAPPER.writeValueAsString(value));
} else if (value.getClass().isArray()) {
jsonObject =
OBJECT_MAPPER.convertValue(value, JSONArray.class);
} else {
jsonObject = OBJECT_MAPPER.convertValue(value, JSONObject.class);
}
schema.validate(jsonObject);
}
} | 3.26 |
rocketmq-connect_WorkerDirectTask_m1_rdh | /**
* Get the Task Name of connector.
*
* @return task name
*/
@Override
public String m1() {
return id().task() + "";
} | 3.26 |
rocketmq-connect_WorkerDirectTask_configs_rdh | /**
* Get the configurations of current task.
*
* @return the configuration of current task.
*/
@Override
public KeyValue configs() {
return taskConfig;
} | 3.26 |
rocketmq-connect_WorkerDirectTask_resetOffset_rdh | /**
* Reset the consumer offset for the given queue.
*
* @param recordPartition
* the partition to reset offset.
* @param recordOffset
* the offset to reset to.
*/
@Override
public void resetOffset(RecordPartition recordPartition, RecordOffset recordOffset) {
// no-op
} | 3.26 |
rocketmq-connect_WorkerDirectTask_getTaskName_rdh | /**
* Get the Task Id of connector.
*
* @return task name
*/
@Override
public String getTaskName() {
return id().task() + "";
} | 3.26 |
rocketmq-connect_WorkerDirectTask_m2_rdh | /**
* Current task assignment processing partition
*
* @return the partition list
*/
@Override
public Set<RecordPartition> m2() {
return null;
} | 3.26 |
rocketmq-connect_WorkerDirectTask_execute_rdh | /**
* execute poll and send record
*/
@Override
protected void execute() {
while (isRunning()) {
updateCommittableOffsets();if (shouldPause()) {
onPause();
try {
// wait unpause
if (awaitUnpause()) {onResume();
}
continue;
} catch (InterruptedException e) {
// do exception
}}
try {
Collection<ConnectRecord> toSendEntries = sourceTask.poll();
if (!toSendEntries.isEmpty()) {
sendRecord(toSendEntries);
}
} catch (Exception e) {
log.error("Direct task runtime exception", e);
finalOffsetCommit(true);
onFailure(e);
}} } | 3.26 |
rocketmq-connect_WorkerDirectTask_pause_rdh | /**
* Pause consumption of messages from the specified partition.
*
* @param partitions
* the partition list to be reset offset.
*/
@Override
public void pause(List<RecordPartition> partitions) {
// no-op
} | 3.26 |
rocketmq-connect_WorkerDirectTask_initializeAndStart_rdh | /**
* initinalize and start
*/
@Override
protected void initializeAndStart() {
m0();
startSourceTask();
log.info("Direct task start, config:{}", JSON.toJSONString(taskConfig));
} | 3.26 |
rocketmq-connect_WorkerDirectTask_resume_rdh | /**
* Resume consumption of messages from previously paused Partition.
*
* @param partitions
* the partition list to be resume.
*/
@Override
public void resume(List<RecordPartition> partitions) {
// no-op
} | 3.26 |
rocketmq-connect_JsonConverter_convertToJson_rdh | /**
* Convert this object, in the org.apache.kafka.connect.data format, into a JSON object, returning both the schema
* and the converted object.
*/
private Object convertToJson(Schema schema, Object
value) {
if (value == null) {
if (schema == null) {
return
null;
}if (schema.getDefaultValue() != null) {
return convertToJson(schema, schema.getDefaultValue());
}
if (schema.isOptional()) {
return null;
}
throw new ConnectException("Conversion error: null value for field that is required and has no default value");
}
if ((schema != null) && (schema.getName() != null)) {
LogicalTypeConverter logicalConverter = LOGICAL_CONVERTERS.get(schema.getName());
if (logicalConverter != null) {
if (value == null) {
return null;
} else {
return logicalConverter.toJson(schema, value, converterConfig);}
}
}
try {
final FieldType schemaType;
if (schema == null) {
schemaType = Schema.schemaType(value.getClass());
if (schemaType == null) {
throw new ConnectException(("Java class " + value.getClass()) + " does not have corresponding schema type.");
}
} else {
schemaType = schema.getFieldType();
}
switch (schemaType) {
case INT8 :
case INT16 :
case INT32 :
case INT64 :
case FLOAT32 :
case FLOAT64 :
case BOOLEAN :
case STRING :
return value;case BYTES :
if (value instanceof byte[]) {
return ((byte[]) (value));
} else if (value instanceof ByteBuffer) {
return ((ByteBuffer) (value)).array();
} else
{
throw new ConnectException("Invalid type for bytes type: " + value.getClass());
}
case ARRAY :
{
Collection collection = ((Collection) (value));
List list = new ArrayList();
for (Object elem : collection) {
Schema valueSchema = (schema == null) ? null : schema.getValueSchema();
Object fieldValue = convertToJson(valueSchema, elem);
list.add(fieldValue);
}
return list;
}
case MAP :
{
Map<?, ?> map = ((Map<?, ?>) (value));
boolean objectMode;
if (schema == null) {
objectMode = true;
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (!(entry.getKey() instanceof String)) {
objectMode = false;
break;
}
}} else {
objectMode = schema.getKeySchema().getFieldType() == FieldType.STRING; }
JSONArray resultArray = new JSONArray();
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<?, ?> entry : map.entrySet()) {
Schema keySchema = (schema == null) ? null : schema.getKeySchema();
Schema valueSchema = (schema == null) ? null :
schema.getValueSchema();
Object v44 = convertToJson(keySchema, entry.getKey());
Object mapValue = convertToJson(valueSchema, entry.getValue());
if (objectMode) {resultMap.put(((String) (v44)), mapValue);
} else {
JSONArray entryArray = new JSONArray();
entryArray.add(0, v44);
entryArray.add(1, mapValue);
resultArray.add(entryArray);
}
}
return objectMode ? resultMap : resultArray;
}
case STRUCT :
{
Struct struct = ((Struct) (value));
if (!struct.schema().equals(schema)) {
throw new ConnectException("Mismatching schema.");
}
JSONObject obj =
new JSONObject(new LinkedHashMap());
for (Field field : struct.schema().getFields()) {
obj.put(field.getName(), convertToJson(field.getSchema(), struct.get(field)));
}
return obj;
}
}
throw new ConnectException(("Couldn't convert " + value) + " to JSON.");
} catch (ClassCastException e) {
String schemaTypeStr = (schema != null) ? schema.getFieldType().toString() : "unknown schema";
throw new ConnectException((("Invalid type for " + schemaTypeStr) + ": ") + value.getClass());}
} | 3.26 |
rocketmq-connect_JsonConverter_toConnectData_rdh | /**
* Convert a native object to a Rocketmq Connect data object.
*
* @param topic
* the topic associated with the data
* @param value
* the value to convert
* @return an object containing the {@link Schema} and the converted value
*/@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
Object jsonValue;
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (Exception e) {
throw new ConnectException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
JSONObject newJsonValue;
if (!converterConfig.schemasEnabled()) {
// schema disabled
JSONObject envelope =
new JSONObject();
envelope.put(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.put(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
newJsonValue = envelope;
} else {
// schema enabled
newJsonValue = ((JSONObject) (jsonValue));
}
Object jsonSchema = newJsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME);
Schema schema = asConnectSchema(jsonSchema == null ? null : ((JSONObject) (jsonSchema)));
return new SchemaAndValue(schema, convertToConnect(schema, newJsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)));
} | 3.26 |
rocketmq-connect_JsonConverter_fromConnectData_rdh | /**
* Convert a rocketmq Connect data object to a native object for serialization.
*
* @param topic
* the topic associated with the data
* @param schema
* the schema for the value
* @param value
* the value to convert
* @return the serialized value
*/
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if ((schema == null) && (value == null)) {
return
null;
}
Object jsonValue = (converterConfig.schemasEnabled()) ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (Exception e) {
throw new ConnectException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
} | 3.26 |
rocketmq-connect_JsonConverter_convertToJsonWithoutEnvelope_rdh | /**
* convert to json without envelop
*
* @param schema
* @param value
* @return */
private Object convertToJsonWithoutEnvelope(Schema schema, Object value) {
return convertToJson(schema, value);
} | 3.26 |
rocketmq-connect_JsonConverter_asConnectSchema_rdh | /**
* convert json to schema if not empty
*
* @param jsonSchema
* @return */
public Schema asConnectSchema(JSONObject jsonSchema) {
// schema null
if (jsonSchema == null) {return null;}
Schema cached = toConnectSchemaCache.get(jsonSchema);
if (cached != null) {
return cached;
}
String schemaType = String.valueOf(jsonSchema.get(JsonSchema.SCHEMA_TYPE_FIELD_NAME));
if (StringUtils.isEmpty(schemaType)) {
throw new ConnectException("Schema must contain 'type' field");
}
final SchemaBuilder builder;
switch (schemaType) {
case JsonSchema.BOOLEAN_TYPE_NAME :
builder = SchemaBuilder.bool();
break;
case JsonSchema.INT8_TYPE_NAME :
builder = SchemaBuilder.int8();
break;
case JsonSchema.INT16_TYPE_NAME :
builder = SchemaBuilder.int16();
break;
case JsonSchema.INT32_TYPE_NAME :
builder = SchemaBuilder.int32();
break;case JsonSchema.INT64_TYPE_NAME :
builder = SchemaBuilder.int64();
break;
case JsonSchema.FLOAT_TYPE_NAME :
builder = SchemaBuilder.float32();
break;
case JsonSchema.DOUBLE_TYPE_NAME :
builder = SchemaBuilder.float64();
break;
case JsonSchema.BYTES_TYPE_NAME :
builder = SchemaBuilder.bytes();
break;
case JsonSchema.STRING_TYPE_NAME :
builder = SchemaBuilder.string();
break;
case JsonSchema.ARRAY_TYPE_NAME :
JSONObject elemSchema = ((JSONObject)
(jsonSchema.get(JsonSchema.ARRAY_ITEMS_FIELD_NAME)));
if (Objects.isNull(elemSchema)) {
throw new ConnectException("Array schema did not specify the element type");
}
builder = SchemaBuilder.array(asConnectSchema(elemSchema));
break;
case JsonSchema.MAP_TYPE_NAME :
JSONObject keySchema = ((JSONObject) (jsonSchema.get(JsonSchema.MAP_KEY_FIELD_NAME)));
if (keySchema == null) {
throw new
ConnectException("Map schema did not specify the key type");
}
JSONObject valueSchema = ((JSONObject) (jsonSchema.get(JsonSchema.MAP_VALUE_FIELD_NAME)));
if (valueSchema == null) {
throw new ConnectException("Map schema did not specify the value type");
}
builder = SchemaBuilder.map(asConnectSchema(keySchema), asConnectSchema(valueSchema));
break;
case JsonSchema.STRUCT_TYPE_NAME :builder = SchemaBuilder.struct();List<JSONObject> v57 = ((List<JSONObject>) (jsonSchema.get(JsonSchema.STRUCT_FIELDS_FIELD_NAME)));
if (Objects.isNull(v57)) {
throw new ConnectException("Struct schema's \"fields\" argument is not an array.");
}
for (JSONObject field : v57) {
String jsonFieldName = field.getString(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME);
if (jsonFieldName == null) {
throw new ConnectException("Struct schema's field name not specified properly");
}
builder.field(jsonFieldName, asConnectSchema(field));
}
break;
default :throw new ConnectException("Unknown schema type: " + schemaType);
}
// optional
Boolean isOptional = jsonSchema.getBoolean(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME);
if ((isOptional != null) && isOptional) {
builder.optional();
}
// schema name
String schemaName = jsonSchema.getString(JsonSchema.SCHEMA_NAME_FIELD_NAME);
builder.name(schemaName);// schema version
Object version = jsonSchema.get(JsonSchema.SCHEMA_VERSION_FIELD_NAME);
if ((version != null) && (version instanceof Integer)) {
builder.version(Integer.parseInt(version.toString()));
}
// schema doc
String doc = jsonSchema.getString(JsonSchema.SCHEMA_DOC_FIELD_NAME);
if (StringUtils.isNotEmpty(doc)) {
builder.doc(doc);
}
// schema parameter
JSONObject schemaParams = ((JSONObject) (jsonSchema.get(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME)));
if (schemaParams != null) {
Map<String,
Object> paramsIt = schemaParams.getInnerMap();
paramsIt.forEach((k, v) -> {
builder.parameter(k, String.valueOf(v));
});
}
Object schemaDefaultNode = jsonSchema.get(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME);
if (schemaDefaultNode != null) {
builder.defaultValue(convertToConnect(builder.build(), schemaDefaultNode));
}
Schema result = builder.build();
toConnectSchemaCache.put(jsonSchema, result);
return result;
} | 3.26 |
rocketmq-connect_JsonConverter_configure_rdh | /**
* Configure this class.
*
* @param configs
* configs in key/value pairs
*/
@Override
public void configure(Map<String, ?> configs) {
converterConfig = new JsonConverterConfig(configs);
fromConnectSchemaCache = new LRUCache<>(converterConfig.cacheSize());
toConnectSchemaCache = new
LRUCache<>(converterConfig.cacheSize());
} | 3.26 |
rocketmq-connect_JsonConverter_convertToJsonWithEnvelope_rdh | /**
* convert to json with envelope
*
* @param schema
* @param value
* @return */
private JSONObject convertToJsonWithEnvelope(Schema schema, Object value) {
return new JsonSchema.Envelope(asJsonSchema(schema), convertToJson(schema, value)).toJsonNode();
} | 3.26 |
rocketmq-connect_JsonConverter_convertToConnect_rdh | /**
* convert to connect
*
* @param schema
* @param value
* @return */
private Object convertToConnect(Schema schema, Object value) {
final FieldType schemaType;
if (schema != null) {
schemaType = schema.getFieldType();
if (value == null) {
if (schema.getDefaultValue() != null) {
return schema.getDefaultValue();// any logical type conversions should already have been applied
}
if (schema.isOptional()) {
return null;
}
throw new ConnectException(("Invalid null value for required " + schemaType) + " field");
}
} else if (value == null) {
return null;
} else if (value instanceof String) {
schemaType = FieldType.STRING;
} else if (value instanceof Integer) {
schemaType = FieldType.INT32;} else if (value instanceof Long) {
schemaType = FieldType.INT64;
} else if (value instanceof Float) {
schemaType = FieldType.FLOAT32;
} else if (value instanceof Double) {schemaType = FieldType.FLOAT64;
} else if (value instanceof BigDecimal) {
schemaType = FieldType.FLOAT64;
} else if (value instanceof Boolean) {
schemaType = FieldType.BOOLEAN;
} else if (value instanceof List) {
schemaType = FieldType.ARRAY;
} else if (value instanceof Map) {
schemaType = FieldType.MAP;
} else {schemaType = null;
}
final
JsonToConnectTypeConverter typeConverter = TO_CONNECT_CONVERTERS.get(schemaType);
if (typeConverter == null) {
throw new ConnectException("Unknown schema type: " + schemaType);
}if ((schema != null) && (schema.getName() != null)) {
LogicalTypeConverter logicalConverter = LOGICAL_CONVERTERS.get(schema.getName());
if (logicalConverter != null) {
return logicalConverter.toConnect(schema, value);
}
}
return typeConverter.convert(schema, value);
} | 3.26 |
rocketmq-connect_JsonConverter_asJsonSchema_rdh | /**
* convert ConnectRecord schema to json schema
*
* @param schema
* @return */
public JSONObject asJsonSchema(Schema schema) {
if (schema == null) {return null;
}
// from cached
JSONObject cached = fromConnectSchemaCache.get(schema);
if (cached != null) {
return cached.clone();
}
JSONObject jsonSchema;
// convert field type name
switch (schema.getFieldType()) {
case BOOLEAN :
jsonSchema = JsonSchema.BOOLEAN_SCHEMA();
break;
case BYTES :
jsonSchema = JsonSchema.BYTES_SCHEMA();
break;
case FLOAT64 :
jsonSchema = JsonSchema.DOUBLE_SCHEMA();
break;
case FLOAT32 :
jsonSchema = JsonSchema.FLOAT_SCHEMA();
break;
case INT8 :
jsonSchema = JsonSchema.INT8_SCHEMA();
break;
case INT16 :
jsonSchema = JsonSchema.INT16_SCHEMA();
break;
case INT32 :
jsonSchema = JsonSchema.INT32_SCHEMA();
break;
case INT64 :
jsonSchema = JsonSchema.INT64_SCHEMA();
break;
case STRING :
jsonSchema = JsonSchema.STRING_SCHEMA();
break;
case ARRAY :
jsonSchema
= new JSONObject();
jsonSchema.put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.ARRAY_TYPE_NAME);
jsonSchema.put(JsonSchema.ARRAY_ITEMS_FIELD_NAME, asJsonSchema(schema.getValueSchema()));
break;
case MAP :
jsonSchema = new JSONObject();
jsonSchema.put(JsonSchema.SCHEMA_TYPE_FIELD_NAME,
JsonSchema.MAP_TYPE_NAME);
jsonSchema.put(JsonSchema.MAP_KEY_FIELD_NAME, asJsonSchema(schema.getKeySchema()));
jsonSchema.put(JsonSchema.MAP_VALUE_FIELD_NAME, asJsonSchema(schema.getValueSchema()));
break;
case STRUCT :
jsonSchema = new JSONObject(new ConcurrentHashMap<>());
jsonSchema.put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.STRUCT_TYPE_NAME);
// field list
JSONArray fields = new JSONArray();
for (Field field : schema.getFields()) {
String fieldName =
field.getName();
JSONObject fieldJsonSchema = asJsonSchema(field.getSchema());
fieldJsonSchema.put(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME, fieldName);
fields.add(fieldJsonSchema);
}
jsonSchema.put(JsonSchema.STRUCT_FIELDS_FIELD_NAME, fields); break;
default :
throw new ConnectException(("Couldn't translate unsupported schema type " + schema) + ".");
}
// optional
jsonSchema.put(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME, schema.isOptional());
// name
if (schema.getName() != null) {
jsonSchema.put(JsonSchema.SCHEMA_NAME_FIELD_NAME, schema.getName());
}
// version
if (schema.getVersion() != null) {
jsonSchema.put(JsonSchema.SCHEMA_VERSION_FIELD_NAME, schema.getVersion());
}
// doc
if (schema.getDoc() != null) {
jsonSchema.put(JsonSchema.SCHEMA_DOC_FIELD_NAME, schema.getDoc());
}
// parameters
if (schema.getParameters() != null) {
JSONObject jsonSchemaParams = new JSONObject();for (Map.Entry<String, String> prop
: schema.getParameters().entrySet()) {
jsonSchemaParams.put(prop.getKey(), prop.getValue());}
jsonSchema.put(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME, jsonSchemaParams);
}// default value
if (schema.getDefaultValue() != null) {
jsonSchema.put(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME, convertToJson(schema, schema.getDefaultValue()));
}
// add cache
fromConnectSchemaCache.put(schema, jsonSchema);
return jsonSchema;
} | 3.26 |
rocketmq-connect_ByteArrayConverter_m0_rdh | /**
* Configure this class.
*
* @param configs
* configs in key/value pairs
*/ @Override
public void m0(Map configs) {
// config
} | 3.26 |
rocketmq-connect_Base64Util_base64Decode_rdh | /**
* decode
*
* @param in
* @return */
public static byte[] base64Decode(String in) {
if (StringUtils.isEmpty(in)) {
return null;
}
return Base64.getDecoder().decode(in);
} | 3.26 |
rocketmq-connect_RmqSourceReplicator_ensureTargetTopic_rdh | /**
* ensure target topic eixst. if target topic does not exist, ensureTopic will create target topic on target
* cluster, with same TopicConfig but using target topic name. any exception will be caught and then throw
* IllegalStateException.
*
* @param srcTopic
* @param targetTopic
* @throws RemotingException
* @throws MQClientException
* @throws InterruptedException
*/
public void ensureTargetTopic(String srcTopic, String targetTopic) throws RemotingException, MQClientException, InterruptedException {
String srcCluster = this.replicatorConfig.getSrcCluster();
String targetCluster = this.replicatorConfig.getTargetCluster();
List<BrokerData> brokerList = Utils.examineBrokerData(this.srcMQAdminExt, srcTopic, srcCluster);
if (brokerList.size() == 0) {
throw new IllegalStateException(String.format("no broker found for srcTopic: %s srcCluster: %s", srcTopic, srcCluster));
}final TopicRouteData topicRouteData = this.srcMQAdminExt.examineTopicRouteInfo(srcTopic);
final TopicConfig topicConfig = new TopicConfig();
final List<QueueData> queueDatas = topicRouteData.getQueueDatas();QueueData queueData = queueDatas.get(0);
topicConfig.setPerm(queueData.getPerm());
topicConfig.setReadQueueNums(queueData.getReadQueueNums());
topicConfig.setWriteQueueNums(queueData.getWriteQueueNums());
topicConfig.setTopicSysFlag(queueData.getTopicSysFlag());
topicConfig.setTopicName(targetTopic);
Utils.createTopic(this.targetMQAdminExt, topicConfig, targetCluster);
} | 3.26 |
rocketmq-connect_ReporterManagerUtil_m0_rdh | /**
* create worker error record reporter
*
* @param connConfig
* @param retryWithToleranceOperator
* @param converter
* @return */
public static WorkerErrorRecordReporter m0(ConnectKeyValue connConfig, RetryWithToleranceOperator
retryWithToleranceOperator, RecordConverter converter) {
DeadLetterQueueConfig deadLetterQueueConfig = new DeadLetterQueueConfig(connConfig);
if (deadLetterQueueConfig.enableErrantRecordReporter()) {
return new WorkerErrorRecordReporter(retryWithToleranceOperator, converter);
}return null;
} | 3.26 |
rocketmq-connect_ReporterManagerUtil_sourceTaskReporters_rdh | /**
* build source task reporter
*
* @param connectorTaskId
* @param connConfig
* @return */
public static List<ErrorReporter> sourceTaskReporters(ConnectorTaskId connectorTaskId, ConnectKeyValue connConfig, ErrorMetricsGroup errorMetricsGroup) {
List<ErrorReporter> reporters = new ArrayList<>();
LogReporter logReporter = new LogReporter(connectorTaskId, connConfig, errorMetricsGroup);
reporters.add(logReporter);
return reporters;
} | 3.26 |
rocketmq-connect_ReporterManagerUtil_m1_rdh | /**
* build sink task reporter
*
* @param connectorTaskId
* @param connConfig
* @param workerConfig
* @return */
public static List<ErrorReporter> m1(ConnectorTaskId connectorTaskId, ConnectKeyValue connConfig, WorkerConfig workerConfig, ErrorMetricsGroup errorMetricsGroup) {
// ensure reporter order
ArrayList<ErrorReporter> reporters = new ArrayList<>();
LogReporter logReporter = new LogReporter(connectorTaskId, connConfig, errorMetricsGroup);
reporters.add(logReporter);
// dead letter queue reporter
DeadLetterQueueReporter reporter = DeadLetterQueueReporter.build(connectorTaskId, connConfig, workerConfig, errorMetricsGroup);
if (reporter != null) {
reporters.add(reporter);
}
return reporters;
} | 3.26 |
rocketmq-connect_ReporterManagerUtil_createRetryWithToleranceOperator_rdh | /**
* create retry operator
*
* @param connConfig
* @return */
public static RetryWithToleranceOperator createRetryWithToleranceOperator(ConnectKeyValue connConfig, ErrorMetricsGroup errorMetricsGroup) {
DeadLetterQueueConfig deadLetterQueueConfig = new DeadLetterQueueConfig(connConfig);
return new RetryWithToleranceOperator(deadLetterQueueConfig.errorRetryTimeout(), deadLetterQueueConfig.errorMaxDelayInMillis(), deadLetterQueueConfig.errorToleranceType(), errorMetricsGroup);
} | 3.26 |
hadoop_TimelineEvent_getInfoJAXB_rdh | // required by JAXB
@InterfaceAudience.Private
@XmlElement(name = "info")
public HashMap<String, Object> getInfoJAXB() {
return info;
} | 3.26 |
hadoop_SubClusterState_fromString_rdh | /**
* Convert a string into {@code SubClusterState}.
*
* @param state
* the string to convert in SubClusterState
* @return the respective {@code SubClusterState}
*/
public static SubClusterState fromString(String state) {
try {
return SubClusterState.valueOf(state);
} catch (Exception e) {
LOG.error("Invalid SubCluster State value({}) in the StateStore does not" + " match with the YARN Federation standard.", state);
return
null;
}
} | 3.26 |
hadoop_SubClusterState_isUsable_rdh | /**
* Subcluster has unregistered.
*/
SC_UNREGISTERED; public boolean isUsable() {
return (this == SC_RUNNING) || (this == SC_NEW);
} | 3.26 |
hadoop_RolloverSignerSecretProvider_rollSecret_rdh | /**
* Rolls the secret. It is called automatically at the rollover interval.
*/
protected synchronized void rollSecret() {
if (!isDestroyed) {
LOG.debug("rolling secret");
byte[] newSecret =
generateNewSecret();
secrets = new byte[][]{ newSecret, secrets[0] };
}
} | 3.26 |
hadoop_RolloverSignerSecretProvider_init_rdh | /**
* Initialize the SignerSecretProvider. It initializes the current secret
* and starts the scheduler for the rollover to run at an interval of
* tokenValidity.
*
* @param config
* configuration properties
* @param servletContext
* servlet context
* @param tokenValidity
* The amount of time a token is valid for
* @throws Exception
* thrown if an error occurred
*/
@Override
public void init(Properties config, ServletContext servletContext, long tokenValidity)
throws Exception {
initSecrets(generateNewSecret(), null);
startScheduler(tokenValidity, tokenValidity);
} | 3.26 |
hadoop_RolloverSignerSecretProvider_initSecrets_rdh | /**
* Initializes the secrets array. This should typically be called only once,
* during init but some implementations may wish to call it other times.
* previousSecret can be null if there isn't a previous secret, but
* currentSecret should never be null.
*
* @param currentSecret
* The current secret
* @param previousSecret
* The previous secret
*/
protected void initSecrets(byte[] currentSecret, byte[] previousSecret) {
secrets = new byte[][]{ currentSecret, previousSecret };
} | 3.26 |
hadoop_RolloverSignerSecretProvider_startScheduler_rdh | /**
* Starts the scheduler for the rollover to run at an interval.
*
* @param initialDelay
* The initial delay in the rollover in milliseconds
* @param period
* The interval for the rollover in milliseconds
*/
protected synchronized void startScheduler(long initialDelay, long period) {
if (!schedulerRunning) {
schedulerRunning = true;
scheduler
= Executors.newSingleThreadScheduledExecutor();
scheduler.scheduleAtFixedRate(new Runnable() {
@Overridepublic void run() {
rollSecret();
}
}, initialDelay, period, TimeUnit.MILLISECONDS);
} } | 3.26 |
hadoop_FederationRMFailoverProxyProvider_close_rdh | /**
* Close all the proxy objects which have been opened over the lifetime of
* this proxy provider.
*/
@Override
public synchronized void close() throws IOException {
closeInternal(current);
} | 3.26 |
hadoop_ServiceShutdownHook_run_rdh | /**
* Shutdown handler.
* Query the service hook reference -if it is still valid the
* {@link Service#stop()} operation is invoked.
*/
@Override
public void run() {
shutdown();
} | 3.26 |
hadoop_ServiceShutdownHook_shutdown_rdh | /**
* Shutdown operation.
* <p>
* Subclasses may extend it, but it is primarily
* made available for testing.
*
* @return true if the service was stopped and no exception was raised.
*/
protected boolean shutdown() {
Service service;
boolean result = false;
synchronized(this) {
service = serviceRef.get();
serviceRef.clear();
}
if (service != null) {
try {
// Stop the Service
service.stop();
result = true;
} catch (Throwable t) {
LOG.info("Error stopping {}", service.getName(), t);
}
}
return result;
} | 3.26 |
hadoop_ServiceShutdownHook_unregister_rdh | /**
* Unregister the hook.
*/
public synchronized void
unregister() {
try {
ShutdownHookManager.get().removeShutdownHook(this);
} catch (IllegalStateException e) {
LOG.info("Failed to unregister shutdown hook: {}", e, e);
}} | 3.26 |
hadoop_ServiceShutdownHook_register_rdh | /**
* Register the service for shutdown with Hadoop's
* {@link ShutdownHookManager}.
*
* @param priority
* shutdown hook priority
*/
public synchronized void
register(int priority) {
unregister();
ShutdownHookManager.get().addShutdownHook(this, priority);
} | 3.26 |
hadoop_NodeName_anonymize_rdh | // TODO There is no caching for saving memory.
private static String anonymize(String data, WordList wordList) {
if (data == null) {
return null;
}
if (!wordList.contains(data)) {
wordList.add(data);
}
return wordList.getName() + wordList.indexOf(data);
} | 3.26 |
hadoop_JavaCommandLineBuilder_addConfOptionToCLI_rdh | /**
* Ass a configuration option to the command line of the application
*
* @param conf
* configuration
* @param key
* key
* @param defVal
* default value
* @return the resolved configuration option
* @throws IllegalArgumentException
* if key is null or the looked up value
* is null (that is: the argument is missing and devVal was null.
*/
public String addConfOptionToCLI(Configuration conf, String key, String defVal) {
Preconditions.checkArgument(key != null, "null key");String val = conf.get(key, defVal);
define(key, val);
return val;
} | 3.26 |
hadoop_JavaCommandLineBuilder_define_rdh | /**
* Add a <code>-D key=val</code> command to the CLI. This is very Hadoop API
*
* @param key
* key
* @param val
* value
* @throws IllegalArgumentException
* if either argument is null
*/
public void define(String key, String val) {Preconditions.checkArgument(key != null, "null key");
Preconditions.checkArgument(val != null, "null value");
add("-D", (key + "=") + val);
} | 3.26 |
hadoop_JavaCommandLineBuilder_addPrefixedConfOptions_rdh | /**
* Add all configuration options which match the prefix
*
* @param conf
* configuration
* @param prefix
* prefix, e.g {@code "slider."}
* @return the number of entries copied
*/
public int addPrefixedConfOptions(Configuration conf, String prefix) {
int copied = 0;
for (Map.Entry<String, String> entry : conf) {
if (entry.getKey().startsWith(prefix)) {
define(entry.getKey(), entry.getValue());
copied++;
}
}return copied;
} | 3.26 |
hadoop_JavaCommandLineBuilder_getJavaBinary_rdh | /**
* Get the java binary. This is called in the constructor so don't try and
* do anything other than return a constant.
*
* @return the path to the Java binary
*/
protected String getJavaBinary() {
return Environment.JAVA_HOME.$$() + "/bin/java";} | 3.26 |
hadoop_JavaCommandLineBuilder_setJVMOpts_rdh | /**
* Set JVM opts.
*
* @param jvmOpts
* JVM opts
*/
public void setJVMOpts(String jvmOpts) {
if (ServiceUtils.isSet(jvmOpts)) {
add(jvmOpts);
}
} | 3.26 |
hadoop_JavaCommandLineBuilder_enableJavaAssertions_rdh | /**
* Turn Java assertions on
*/
public void enableJavaAssertions() {
add("-ea");
add("-esa");
} | 3.26 |
hadoop_JavaCommandLineBuilder_sysprop_rdh | /**
* Add a system property definition -must be used before setting the main entry point
*
* @param property
* @param value
*/
public void sysprop(String property, String value) {
Preconditions.checkArgument(property != null, "null property name");Preconditions.checkArgument(value != null, "null value");
add((("-D" + property) + "=") + value);
} | 3.26 |
hadoop_JavaCommandLineBuilder_defineIfSet_rdh | /**
* Add a <code>-D key=val</code> command to the CLI if <code>val</code>
* is not null
*
* @param key
* key
* @param val
* value
*/
public boolean defineIfSet(String key, String val) {
Preconditions.checkArgument(key != null, "null key");if (val != null) {
define(key, val);
return true;
} else {return false;
}
} | 3.26 |
hadoop_JavaCommandLineBuilder_addMandatoryConfOption_rdh | /**
* Add a mandatory config option
*
* @param conf
* configuration
* @param key
* key
* @throws BadConfigException
* if the key is missing
*/
public void
addMandatoryConfOption(Configuration conf, String key) throws BadConfigException {
if (!addConfOption(conf, key)) {
throw new BadConfigException("Missing configuration option: " + key);
}
} | 3.26 |
hadoop_DiffList_unmodifiableList_rdh | /**
* Returns an unmodifiable diffList.
*
* @param diffs
* DiffList
* @param <T>
* Type of the object in the the diffList
* @return Unmodifiable diffList
*/
static <T extends Comparable<Integer>> DiffList<T> unmodifiableList(DiffList<T> diffs)
{
return new DiffList<T>() {
@Override
public T get(int i)
{
return diffs.get(i);
}
@Override
public boolean isEmpty() {
return diffs.isEmpty();
}
@Override
public int size() {
return diffs.size();
}
@Override
public T remove(int i) {
throw new UnsupportedOperationException("This list is unmodifiable.");
}
@Override
public boolean addLast(T t) {
throw new UnsupportedOperationException("This list is unmodifiable.");
}
@Override
public void addFirst(T t) {throw new UnsupportedOperationException("This list is unmodifiable.");
}
@Override
public int binarySearch(int i) {
return diffs.binarySearch(i);
}
@Override
public Iterator<T> iterator() {
return diffs.iterator();
}
@Override
public List<T> getMinListForRange(int startIndex, int endIndex, INodeDirectory dir) {
return diffs.getMinListForRange(startIndex, endIndex, dir);
}
};
} | 3.26 |
hadoop_DiffList_m0_rdh | /**
* Returns an empty DiffList.
*/
static <T extends Comparable<Integer>> DiffList<T> m0() {
return EMPTY_LIST;} | 3.26 |
hadoop_SequentialBlockGroupIdGenerator_nextValue_rdh | // NumberGenerator
@Override
public long nextValue() {
skipTo((getCurrentValue() & (~BLOCK_GROUP_INDEX_MASK)) + MAX_BLOCKS_IN_GROUP);
// Make sure there's no conflict with existing random block IDs
final Block b = new Block(getCurrentValue());
while (hasValidBlockInRange(b)) {
skipTo(getCurrentValue() + MAX_BLOCKS_IN_GROUP);
b.setBlockId(getCurrentValue());
}
if (b.getBlockId() >= 0) {
throw new IllegalStateException(("All negative block group IDs are used, " + "growing into positive IDs, ") + "which might conflict with non-erasure coded blocks.");
}
return getCurrentValue();
}
/**
*
* @param b
* A block object whose id is set to the starting point for check
* @return true if any ID in the range
{id, id+HdfsConstants.MAX_BLOCKS_IN_GROUP} | 3.26 |
hadoop_RetriableCommand_setRetryPolicy_rdh | /**
* Fluent-interface to change the RetryHandler.
*
* @param retryHandler
* The new RetryHandler instance to be used.
* @return Self.
*/
public RetriableCommand setRetryPolicy(RetryPolicy retryHandler) {
this.retryPolicy = retryHandler;
return this;
} | 3.26 |
hadoop_RetriableCommand_execute_rdh | /**
* The execute() method invokes doExecute() until either:
* 1. doExecute() succeeds, or
* 2. the command may no longer be retried (e.g. runs out of retry-attempts).
*
* @param arguments
* The list of arguments for the command.
* @return Generic "Object" from doExecute(), on success.
* @throws Exception
*/
public Object execute(Object... arguments) throws Exception {
Exception latestException;
int counter = 0;
while (true) {
try {
return doExecute(arguments);} catch (Exception exception) {
LOG.error("Failure in Retriable command: " + description, exception);
latestException = exception;
}
counter++;
RetryAction action = retryPolicy.shouldRetry(latestException, counter, 0, true);
if (action.action == RetryDecision.RETRY) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis);
} else
{
break;
}}
throw new IOException("Couldn't run retriable-command: " + description, latestException);
} | 3.26 |
hadoop_ZKClient_registerService_rdh | /**
* register the service to a specific path.
*
* @param path
* the path in zookeeper namespace to register to
* @param data
* the data that is part of this registration
* @throws IOException
* if there are I/O errors.
* @throws InterruptedException
* if any thread has interrupted.
*/
public void registerService(String path, String data) throws IOException, InterruptedException {
try {
zkClient.create(path, data.getBytes(StandardCharsets.UTF_8), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch (KeeperException ke) {
throw new IOException(ke);
}
} | 3.26 |
hadoop_ZKClient_listServices_rdh | /**
* list the services registered under a path.
*
* @param path
* the path under which services are
* registered
* @return the list of names of services registered
* @throws IOException
* if there are I/O errors.
* @throws InterruptedException
* if any thread has interrupted.
*/
public List<String> listServices(String path) throws IOException, InterruptedException {List<String> children = null;
try {
children = zkClient.getChildren(path, false);
} catch (KeeperException ke) {throw new IOException(ke); }
return children;
} | 3.26 |
hadoop_ZKClient_getServiceData_rdh | /**
* get data published by the service at the registration address.
*
* @param path
* the path where the service is registered
* @return the data of the registered service
* @throws IOException
* if there are I/O errors.
* @throws InterruptedException
* if any thread has interrupted.
*/
public String getServiceData(String path) throws IOException, InterruptedException {
String data;
try {
Stat stat = new Stat();
byte[] byteData = zkClient.getData(path, false, stat);
data = new String(byteData, StandardCharsets.UTF_8);
} catch (KeeperException ke) {
throw new IOException(ke);
}
return data;
} | 3.26 |
hadoop_ZKClient_unregisterService_rdh | /**
* unregister the service.
*
* @param path
* the path at which the service was registered
* @throws IOException
* if there are I/O errors.
* @throws InterruptedException
* if any thread has interrupted.
*/
public void unregisterService(String path) throws IOException, InterruptedException {
try {
zkClient.delete(path, -1);
} catch (KeeperException ke) {
throw new IOException(ke);
}
} | 3.26 |
hadoop_FlowActivitySubDoc_equals_rdh | // Only check if type and id are equal
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof FlowActivitySubDoc)) {
return false;
}
FlowActivitySubDoc m = ((FlowActivitySubDoc)
(o));
if (!flowVersion.equalsIgnoreCase(m.getFlowVersion())) {
return false;
}
return flowRunId == m.getFlowRunId();
} | 3.26 |
hadoop_FederationRouterRMTokenInputValidator_validate_rdh | /**
* We will check with the RouterMasterKeyRequest{@link RouterMasterKeyRequest}
* to ensure that the request object is not empty and that the RouterMasterKey is not empty.
*
* @param request
* RouterMasterKey Request.
* @throws FederationStateStoreInvalidInputException
* if the request is invalid.
*/
public static void validate(RouterMasterKeyRequest request) throws FederationStateStoreInvalidInputException {
// Verify the request to ensure that the request is not empty,
// if the request is found to be empty, an exception will be thrown.
if (request == null) {
String message = "Missing RouterMasterKey Request." + " Please try again by specifying a router master key request information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// Check whether the masterKey is empty,
// if the masterKey is empty, throw an exception message.
RouterMasterKey v6 = request.getRouterMasterKey();
if (v6 == null) {
String message = "Missing RouterMasterKey." + " Please try again by specifying a router master key information.";
LOG.warn(message);throw new FederationStateStoreInvalidInputException(message);
}
} | 3.26 |
hadoop_MetricsCache_getTag_rdh | /**
* Lookup a tag value
*
* @param key
* name of the tag
* @return the tag value
*/public String getTag(String key) {
return tags.get(key);
} | 3.26 |
hadoop_MetricsCache_get_rdh | /**
* Get the cached record
*
* @param name
* of the record
* @param tags
* of the record
* @return the cached record or null
*/
public Record get(String name, Collection<MetricsTag> tags) {RecordCache rc = map.get(name);
if (rc == null)
return null;
return rc.get(tags);
} | 3.26 |
hadoop_MetricsCache_getMetricInstance_rdh | /**
* Lookup a metric instance
*
* @param key
* name of the metric
* @return the metric instance
*/
public AbstractMetric getMetricInstance(String key) {
return metrics.get(key);} | 3.26 |
hadoop_MetricsCache_update_rdh | /**
* Update the cache and return the current cache record
*
* @param mr
* the update record
* @return the updated cache record
*/
public Record update(MetricsRecord mr) {
return update(mr, false);
} | 3.26 |
hadoop_MetricsCache_getMetric_rdh | /**
* Lookup a metric value
*
* @param key
* name of the metric
* @return the metric value
*/
public Number getMetric(String key) {
AbstractMetric metric = metrics.get(key);
return metric != null ? metric.value() : null;
} | 3.26 |
hadoop_MetricsCache_metrics_rdh | /**
*
* @deprecated use metricsEntrySet() instead
* @return entry set of metrics
*/
@Deprecated
public Set<Map.Entry<String, Number>> metrics() {
Map<String, Number> v2 = new LinkedHashMap<String, Number>(metrics.size());
for (Map.Entry<String, AbstractMetric> mapEntry : metrics.entrySet()) {
v2.put(mapEntry.getKey(), mapEntry.getValue().value());
}
return v2.entrySet();
} | 3.26 |
hadoop_MetricsCache_metricsEntrySet_rdh | /**
*
* @return entry set of metrics
*/
public Set<Map.Entry<String, AbstractMetric>> metricsEntrySet() {
return metrics.entrySet();
} | 3.26 |
hadoop_MetricsCache_tags_rdh | /**
*
* @return the entry set of the tags of the record
*/
public Set<Map.Entry<String, String>> tags() {return tags.entrySet();
} | 3.26 |
hadoop_AbstractJavaKeyStoreProvider_locateKeystore_rdh | /**
* Open up and initialize the keyStore.
*
* @throws IOException
* If there is a problem reading the password file
* or a problem reading the keystore.
*/
private void locateKeystore() throws IOException {
try {
password = ProviderUtils.locatePassword(CREDENTIAL_PASSWORD_ENV_VAR, conf.get(CREDENTIAL_PASSWORD_FILE_KEY));if (password == null) {
password = CREDENTIAL_PASSWORD_DEFAULT.toCharArray();
}
KeyStore ks;ks = KeyStore.getInstance(getKeyStoreType());
if (keystoreExists()) {
m0();
try (InputStream in = getInputStreamForFile()) {
ks.load(in, password);}
} else {
createPermissions("600");// required to create an empty keystore. *sigh*
ks.load(null, password);
}
keyStore = ks;
} catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e);
}
catch (GeneralSecurityException e) {
throw new IOException("Can't load keystore " + getPathAsString(), e);
}
} | 3.26 |
hadoop_GroupMappingServiceProvider_getGroupsSet_rdh | /**
* Get all various group memberships of a given user.
* Returns EMPTY set in case of non-existing user
*
* @param user
* User's name
* @return set of group memberships of user
* @throws IOException
* raised on errors performing I/O.
*/default Set<String> getGroupsSet(String user) throws IOException {
// Override to form the set directly to avoid another conversion
return new LinkedHashSet<>(getGroups(user));
} | 3.26 |
hadoop_NormalizedResourceEvent_getTaskType_rdh | /**
* the tasktype for the event.
*
* @return the tasktype for the event.
*/
public TaskType getTaskType() {
return this.taskType;
} | 3.26 |
hadoop_PeriodicService_getRunCount_rdh | /**
* Get how many times we run the periodic service.
*
* @return Times we run the periodic service.
*/
protected long getRunCount() {
return this.runCount;
} | 3.26 |
hadoop_PeriodicService_stopPeriodic_rdh | /**
* Stop the periodic task.
*/
protected synchronized void stopPeriodic() {
if (this.isRunning) {
LOG.info("{} is shutting down", this.serviceName);
this.isRunning = false;
this.scheduler.shutdownNow();}
} | 3.26 |
hadoop_PeriodicService_startPeriodic_rdh | /**
* Start the periodic execution.
*/
protected synchronized void startPeriodic() {
stopPeriodic();
// Create the runnable service
Runnable updateRunnable = () -> {
LOG.debug("Running {} update task", serviceName);
try {
if (!isRunning) {
return;
}
periodicInvoke();
runCount++;
lastRun = Time.now();
} catch (Exception ex) {
errorCount++;
LOG.warn("{} service threw an exception", serviceName, ex);
}
};
// Start the execution of the periodic service
this.isRunning = true;
this.scheduler.scheduleWithFixedDelay(updateRunnable, 0, this.f0, TimeUnit.MILLISECONDS);
} | 3.26 |
hadoop_PeriodicService_getErrorCount_rdh | /**
* Get how many times we failed to run the periodic service.
*
* @return Times we failed to run the periodic service.
*/
protected long getErrorCount() {
return
this.errorCount;
} | 3.26 |
hadoop_PeriodicService_setIntervalMs_rdh | /**
* Set the interval for the periodic service.
*
* @param interval
* Interval in milliseconds.
*/
protected void setIntervalMs(long interval) {
if (getServiceState() == STATE.STARTED) {
throw new ServiceStateException("Periodic service already started");
} else {
this.f0 = interval;
}
} | 3.26 |
hadoop_PeriodicService_getLastUpdate_rdh | /**
* Get the last time the periodic service was executed.
*
* @return Last time the periodic service was executed.
*/
protected long getLastUpdate()
{
return this.lastRun; } | 3.26 |
hadoop_PeriodicService_getIntervalMs_rdh | /**
* Get the interval for the periodic service.
*
* @return Interval in milliseconds.
*/
protected long getIntervalMs() {
return this.f0;
} | 3.26 |
hadoop_ConfigRedactor_redact_rdh | /**
* Given a key / value pair, decides whether or not to redact and returns
* either the original value or text indicating it has been redacted.
*
* @param key
* param key.
* @param value
* param value, will return if conditions permit.
* @return Original value, or text indicating it has been redacted
*/
public String redact(String key, String value) {
if (configIsSensitive(key)) {return REDACTED_TEXT;
} return value;
} | 3.26 |
hadoop_ConfigRedactor_redactXml_rdh | /**
* Given a key / value pair, decides whether or not to redact and returns
* either the original value or text indicating it has been redacted.
*
* @param key
* param key.
* @param value
* param value, will return if conditions permit.
* @return Original value, or text indicating it has been redacted
*/
public String redactXml(String key, String value) {
if (configIsSensitive(key)) {
return REDACTED_XML;
}
return value;
} | 3.26 |
hadoop_ConfigRedactor_configIsSensitive_rdh | /**
* Matches given config key against patterns and determines whether or not
* it should be considered sensitive enough to redact in logs and other
* plaintext displays.
*
* @param key
* @return True if parameter is considered sensitive
*/
private boolean configIsSensitive(String key) {
for (Pattern regex : compiledPatterns) {
if (regex.matcher(key).find()) {
return true;
}
}
return false;
} | 3.26 |
hadoop_StorageReceivedDeletedBlocks_getStorageID_rdh | /**
*
* @deprecated Use {@link #getStorage()} instead
*/
@Deprecated
public String getStorageID() {
return storage.getStorageID();
} | 3.26 |
hadoop_DelegatingSSLSocketFactory_bindToOpenSSLProvider_rdh | /**
* Bind to the OpenSSL provider via wildfly.
* This MUST be the only place where wildfly classes are referenced,
* so ensuring that any linkage problems only surface here where they may
* be caught by the initialization code.
*/
private void bindToOpenSSLProvider() throws NoSuchAlgorithmException, KeyManagementException {
if (!openSSLProviderRegistered) {
LOG.debug("Attempting to register OpenSSL provider");
openssl.OpenSSLProvider.register();
openSSLProviderRegistered = true;}
// Strong reference needs to be kept to logger until initialization of
// SSLContext finished (see HADOOP-16174):
Logger logger = Logger.getLogger("org.wildfly.openssl.SSL");
Level originalLevel = logger.getLevel();
try {
logger.setLevel(Level.WARNING);
ctx = SSLContext.getInstance("openssl.TLS");
ctx.init(null, null, null);
} finally {
logger.setLevel(originalLevel);
}
} | 3.26 |
hadoop_DelegatingSSLSocketFactory_getChannelMode_rdh | /**
* Get the channel mode of this instance.
*
* @return a channel mode.
*/
public SSLChannelMode
getChannelMode() {
return channelMode;
} | 3.26 |
hadoop_DelegatingSSLSocketFactory_initializeDefaultFactory_rdh | /**
* Initialize a singleton SSL socket factory.
*
* @param preferredMode
* applicable only if the instance is not initialized.
* @throws IOException
* if an error occurs.
*/
public static synchronized void initializeDefaultFactory(SSLChannelMode preferredMode) throws IOException {
if (instance == null) {
instance = new DelegatingSSLSocketFactory(preferredMode);
}
} | 3.26 |
hadoop_DelegatingSSLSocketFactory_resetDefaultFactory_rdh | /**
* For testing only: reset the socket factory.
*/
@VisibleForTesting
public static synchronized void resetDefaultFactory() {
LOG.info("Resetting default SSL Socket Factory");
instance = null;
} | 3.26 |
hadoop_CustomTokenProviderAdapter_bind_rdh | /**
* Bind to the filesystem by passing the binding call on
* to any custom token provider adaptee which implements
* {@link BoundDTExtension}.
* No-op if they don't.
*
* @param fsURI
* URI of the filesystem.
* @param conf
* configuration of this extension.
* @throws IOException
* failure.
*/
@Override
public void bind(final URI
fsURI, final Configuration conf) throws IOException {
ExtensionHelper.bind(adaptee, fsURI, conf);
} | 3.26 |
hadoop_ServerWebApp_contextDestroyed_rdh | /**
* Destroys the <code>ServletContextListener</code> which destroys
* the Server.
*
* @param event
* servelt context event.
*/
@Override
public void contextDestroyed(ServletContextEvent event) {
destroy();
} | 3.26 |
hadoop_ServerWebApp_getHomeDir_rdh | /**
* Returns the server home directory.
* <p>
* It is looked up in the Java System property
* <code>#SERVER_NAME#.home.dir</code>.
*
* @param name
* the server home directory.
* @return the server home directory.
*/
static String getHomeDir(String name) {
String homeDir = f0.get();
if (homeDir == null) {
String sysProp = name + HOME_DIR;
homeDir =
System.getProperty(sysProp);
if (homeDir == null) {
throw new IllegalArgumentException(MessageFormat.format("System property [{0}] not defined", sysProp));
}
}
return
homeDir;
} | 3.26 |
hadoop_ServerWebApp_setAuthority_rdh | /**
* Sets an alternate hostname:port InetSocketAddress to use.
* <p>
* For testing purposes.
*
* @param authority
* alterante authority.
*/
@VisibleForTesting
public void setAuthority(InetSocketAddress authority) {
this.authority = authority;
} | 3.26 |
hadoop_ServerWebApp_isSslEnabled_rdh | /**
*/
public boolean isSslEnabled() {
return Boolean.parseBoolean(System.getProperty(getName() + SSL_ENABLED, "false"));
} | 3.26 |
hadoop_ServerWebApp_m0_rdh | /**
* Returns the hostname:port InetSocketAddress the webserver is listening to.
*
* @return the hostname:port InetSocketAddress the webserver is listening to.
*/
public InetSocketAddress m0() throws ServerException {
synchronized(this) {if (authority == null) { authority = resolveAuthority();
}
}
return authority;
} | 3.26 |
hadoop_ServerWebApp_setHomeDirForCurrentThread_rdh | /**
* Method for testing purposes.
*/
public static void setHomeDirForCurrentThread(String homeDir) {
f0.set(homeDir);
} | 3.26 |
hadoop_HdfsUtils_m0_rdh | /**
* Is the HDFS healthy?
* HDFS is considered as healthy if it is up and not in safemode.
*
* @param uri
* the HDFS URI. Note that the URI path is ignored.
* @return true if HDFS is healthy; false, otherwise.
*/
@SuppressWarnings("deprecation")
public static boolean m0(URI uri) {
// check scheme
final String scheme = uri.getScheme();
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
throw new IllegalArgumentException((("The scheme is not " + HdfsConstants.HDFS_URI_SCHEME) + ", uri=") + uri);
}
final Configuration conf = new Configuration();
// disable FileSystem cache
conf.setBoolean(String.format("fs.%s.impl.disable.cache",
scheme), true);
// disable client retry for rpc connection and rpc calls
conf.setBoolean(Retry.POLICY_ENABLED_KEY, false);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
try (DistributedFileSystem fs = ((DistributedFileSystem) (FileSystem.get(uri, conf)))) {
final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
if (LOG.isDebugEnabled()) {
LOG.debug("Is namenode in safemode? {}; uri={}", safemode, uri);
}
return !safemode;
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Got an exception for uri={}", uri, e);
}
return false;
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.