repo
stringlengths 5
58
| path
stringlengths 9
168
| func_name
stringlengths 9
130
| original_string
stringlengths 66
10.5k
| language
stringclasses 1
value | code
stringlengths 66
10.5k
| code_tokens
sequence | docstring
stringlengths 8
16k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 94
266
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
zendesk/ruby-kafka | lib/kafka/message_buffer.rb | Kafka.MessageBuffer.clear_messages | def clear_messages(topic:, partition:)
return unless @buffer.key?(topic) && @buffer[topic].key?(partition)
@size -= @buffer[topic][partition].count
@bytesize -= @buffer[topic][partition].map(&:bytesize).reduce(0, :+)
@buffer[topic].delete(partition)
@buffer.delete(topic) if @buffer[topic].empty?
end | ruby | def clear_messages(topic:, partition:)
return unless @buffer.key?(topic) && @buffer[topic].key?(partition)
@size -= @buffer[topic][partition].count
@bytesize -= @buffer[topic][partition].map(&:bytesize).reduce(0, :+)
@buffer[topic].delete(partition)
@buffer.delete(topic) if @buffer[topic].empty?
end | [
"def",
"clear_messages",
"(",
"topic",
":",
",",
"partition",
":",
")",
"return",
"unless",
"@buffer",
".",
"key?",
"(",
"topic",
")",
"&&",
"@buffer",
"[",
"topic",
"]",
".",
"key?",
"(",
"partition",
")",
"@size",
"-=",
"@buffer",
"[",
"topic",
"]",
"[",
"partition",
"]",
".",
"count",
"@bytesize",
"-=",
"@buffer",
"[",
"topic",
"]",
"[",
"partition",
"]",
".",
"map",
"(",
":bytesize",
")",
".",
"reduce",
"(",
"0",
",",
":+",
")",
"@buffer",
"[",
"topic",
"]",
".",
"delete",
"(",
"partition",
")",
"@buffer",
".",
"delete",
"(",
"topic",
")",
"if",
"@buffer",
"[",
"topic",
"]",
".",
"empty?",
"end"
] | Clears buffered messages for the given topic and partition.
@param topic [String] the name of the topic.
@param partition [Integer] the partition id.
@return [nil] | [
"Clears",
"buffered",
"messages",
"for",
"the",
"given",
"topic",
"and",
"partition",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/message_buffer.rb#L57-L65 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.deliver_message | def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
create_time = Time.now
message = PendingMessage.new(
value: value,
key: key,
headers: headers,
topic: topic,
partition: partition,
partition_key: partition_key,
create_time: create_time
)
if partition.nil?
partition_count = @cluster.partitions_for(topic).count
partition = Partitioner.partition_for_key(partition_count, message)
end
buffer = MessageBuffer.new
buffer.write(
value: message.value,
key: message.key,
headers: message.headers,
topic: message.topic,
partition: partition,
create_time: message.create_time,
)
@cluster.add_target_topics([topic])
compressor = Compressor.new(
instrumenter: @instrumenter,
)
transaction_manager = TransactionManager.new(
cluster: @cluster,
logger: @logger,
idempotent: false,
transactional: false
)
operation = ProduceOperation.new(
cluster: @cluster,
transaction_manager: transaction_manager,
buffer: buffer,
required_acks: 1,
ack_timeout: 10,
compressor: compressor,
logger: @logger,
instrumenter: @instrumenter,
)
attempt = 1
begin
operation.execute
unless buffer.empty?
raise DeliveryFailed.new(nil, [message])
end
rescue Kafka::Error => e
@cluster.mark_as_stale!
if attempt >= (retries + 1)
raise
else
attempt += 1
@logger.warn "Error while delivering message, #{e.class}: #{e.message}; retrying after 1s..."
sleep 1
retry
end
end
end | ruby | def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
create_time = Time.now
message = PendingMessage.new(
value: value,
key: key,
headers: headers,
topic: topic,
partition: partition,
partition_key: partition_key,
create_time: create_time
)
if partition.nil?
partition_count = @cluster.partitions_for(topic).count
partition = Partitioner.partition_for_key(partition_count, message)
end
buffer = MessageBuffer.new
buffer.write(
value: message.value,
key: message.key,
headers: message.headers,
topic: message.topic,
partition: partition,
create_time: message.create_time,
)
@cluster.add_target_topics([topic])
compressor = Compressor.new(
instrumenter: @instrumenter,
)
transaction_manager = TransactionManager.new(
cluster: @cluster,
logger: @logger,
idempotent: false,
transactional: false
)
operation = ProduceOperation.new(
cluster: @cluster,
transaction_manager: transaction_manager,
buffer: buffer,
required_acks: 1,
ack_timeout: 10,
compressor: compressor,
logger: @logger,
instrumenter: @instrumenter,
)
attempt = 1
begin
operation.execute
unless buffer.empty?
raise DeliveryFailed.new(nil, [message])
end
rescue Kafka::Error => e
@cluster.mark_as_stale!
if attempt >= (retries + 1)
raise
else
attempt += 1
@logger.warn "Error while delivering message, #{e.class}: #{e.message}; retrying after 1s..."
sleep 1
retry
end
end
end | [
"def",
"deliver_message",
"(",
"value",
",",
"key",
":",
"nil",
",",
"headers",
":",
"{",
"}",
",",
"topic",
":",
",",
"partition",
":",
"nil",
",",
"partition_key",
":",
"nil",
",",
"retries",
":",
"1",
")",
"create_time",
"=",
"Time",
".",
"now",
"message",
"=",
"PendingMessage",
".",
"new",
"(",
"value",
":",
"value",
",",
"key",
":",
"key",
",",
"headers",
":",
"headers",
",",
"topic",
":",
"topic",
",",
"partition",
":",
"partition",
",",
"partition_key",
":",
"partition_key",
",",
"create_time",
":",
"create_time",
")",
"if",
"partition",
".",
"nil?",
"partition_count",
"=",
"@cluster",
".",
"partitions_for",
"(",
"topic",
")",
".",
"count",
"partition",
"=",
"Partitioner",
".",
"partition_for_key",
"(",
"partition_count",
",",
"message",
")",
"end",
"buffer",
"=",
"MessageBuffer",
".",
"new",
"buffer",
".",
"write",
"(",
"value",
":",
"message",
".",
"value",
",",
"key",
":",
"message",
".",
"key",
",",
"headers",
":",
"message",
".",
"headers",
",",
"topic",
":",
"message",
".",
"topic",
",",
"partition",
":",
"partition",
",",
"create_time",
":",
"message",
".",
"create_time",
",",
")",
"@cluster",
".",
"add_target_topics",
"(",
"[",
"topic",
"]",
")",
"compressor",
"=",
"Compressor",
".",
"new",
"(",
"instrumenter",
":",
"@instrumenter",
",",
")",
"transaction_manager",
"=",
"TransactionManager",
".",
"new",
"(",
"cluster",
":",
"@cluster",
",",
"logger",
":",
"@logger",
",",
"idempotent",
":",
"false",
",",
"transactional",
":",
"false",
")",
"operation",
"=",
"ProduceOperation",
".",
"new",
"(",
"cluster",
":",
"@cluster",
",",
"transaction_manager",
":",
"transaction_manager",
",",
"buffer",
":",
"buffer",
",",
"required_acks",
":",
"1",
",",
"ack_timeout",
":",
"10",
",",
"compressor",
":",
"compressor",
",",
"logger",
":",
"@logger",
",",
"instrumenter",
":",
"@instrumenter",
",",
")",
"attempt",
"=",
"1",
"begin",
"operation",
".",
"execute",
"unless",
"buffer",
".",
"empty?",
"raise",
"DeliveryFailed",
".",
"new",
"(",
"nil",
",",
"[",
"message",
"]",
")",
"end",
"rescue",
"Kafka",
"::",
"Error",
"=>",
"e",
"@cluster",
".",
"mark_as_stale!",
"if",
"attempt",
">=",
"(",
"retries",
"+",
"1",
")",
"raise",
"else",
"attempt",
"+=",
"1",
"@logger",
".",
"warn",
"\"Error while delivering message, #{e.class}: #{e.message}; retrying after 1s...\"",
"sleep",
"1",
"retry",
"end",
"end",
"end"
] | Initializes a new Kafka client.
@param seed_brokers [Array<String>, String] the list of brokers used to initialize
the client. Either an Array of connections, or a comma separated string of connections.
A connection can either be a string of "host:port" or a full URI with a scheme.
If there's a scheme it's ignored and only host/port are used.
@param client_id [String] the identifier for this application.
@param logger [Logger] the logger that should be used by the client.
@param connect_timeout [Integer, nil] the timeout setting for connecting
to brokers. See {BrokerPool#initialize}.
@param socket_timeout [Integer, nil] the timeout setting for socket
connections. See {BrokerPool#initialize}.
@param ssl_ca_cert [String, Array<String>, nil] a PEM encoded CA cert, or an Array of
PEM encoded CA certs, to use with an SSL connection.
@param ssl_ca_cert_file_path [String, nil] a path on the filesystem to a PEM encoded CA cert
to use with an SSL connection.
@param ssl_client_cert [String, nil] a PEM encoded client cert to use with an
SSL connection. Must be used in combination with ssl_client_cert_key.
@param ssl_client_cert_key [String, nil] a PEM encoded client cert key to use with an
SSL connection. Must be used in combination with ssl_client_cert.
@param ssl_client_cert_key_password [String, nil] the password required to read the
ssl_client_cert_key. Must be used in combination with ssl_client_cert_key.
@param sasl_gssapi_principal [String, nil] a KRB5 principal
@param sasl_gssapi_keytab [String, nil] a KRB5 keytab filepath
@param sasl_scram_username [String, nil] SCRAM username
@param sasl_scram_password [String, nil] SCRAM password
@param sasl_scram_mechanism [String, nil] Scram mechanism, either "sha256" or "sha512"
@param sasl_over_ssl [Boolean] whether to enforce SSL with SASL
@param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
implements method token. See {Sasl::OAuth#initialize}
@return [Client]
Delivers a single message to the Kafka cluster.
**Note:** Only use this API for low-throughput scenarios. If you want to deliver
many messages at a high rate, or if you want to configure the way messages are
sent, use the {#producer} or {#async_producer} APIs instead.
@param value [String, nil] the message value.
@param key [String, nil] the message key.
@param headers [Hash<String, String>] the headers for the message.
@param topic [String] the topic that the message should be written to.
@param partition [Integer, nil] the partition that the message should be written
to, or `nil` if either `partition_key` is passed or the partition should be
chosen at random.
@param partition_key [String] a value used to deterministically choose a
partition to write to.
@param retries [Integer] the number of times to retry the delivery before giving
up.
@return [nil] | [
"Initializes",
"a",
"new",
"Kafka",
"client",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L137-L212 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.producer | def producer(
compression_codec: nil,
compression_threshold: 1,
ack_timeout: 5,
required_acks: :all,
max_retries: 2,
retry_backoff: 1,
max_buffer_size: 1000,
max_buffer_bytesize: 10_000_000,
idempotent: false,
transactional: false,
transactional_id: nil,
transactional_timeout: 60
)
cluster = initialize_cluster
compressor = Compressor.new(
codec_name: compression_codec,
threshold: compression_threshold,
instrumenter: @instrumenter,
)
transaction_manager = TransactionManager.new(
cluster: cluster,
logger: @logger,
idempotent: idempotent,
transactional: transactional,
transactional_id: transactional_id,
transactional_timeout: transactional_timeout,
)
Producer.new(
cluster: cluster,
transaction_manager: transaction_manager,
logger: @logger,
instrumenter: @instrumenter,
compressor: compressor,
ack_timeout: ack_timeout,
required_acks: required_acks,
max_retries: max_retries,
retry_backoff: retry_backoff,
max_buffer_size: max_buffer_size,
max_buffer_bytesize: max_buffer_bytesize,
)
end | ruby | def producer(
compression_codec: nil,
compression_threshold: 1,
ack_timeout: 5,
required_acks: :all,
max_retries: 2,
retry_backoff: 1,
max_buffer_size: 1000,
max_buffer_bytesize: 10_000_000,
idempotent: false,
transactional: false,
transactional_id: nil,
transactional_timeout: 60
)
cluster = initialize_cluster
compressor = Compressor.new(
codec_name: compression_codec,
threshold: compression_threshold,
instrumenter: @instrumenter,
)
transaction_manager = TransactionManager.new(
cluster: cluster,
logger: @logger,
idempotent: idempotent,
transactional: transactional,
transactional_id: transactional_id,
transactional_timeout: transactional_timeout,
)
Producer.new(
cluster: cluster,
transaction_manager: transaction_manager,
logger: @logger,
instrumenter: @instrumenter,
compressor: compressor,
ack_timeout: ack_timeout,
required_acks: required_acks,
max_retries: max_retries,
retry_backoff: retry_backoff,
max_buffer_size: max_buffer_size,
max_buffer_bytesize: max_buffer_bytesize,
)
end | [
"def",
"producer",
"(",
"compression_codec",
":",
"nil",
",",
"compression_threshold",
":",
"1",
",",
"ack_timeout",
":",
"5",
",",
"required_acks",
":",
":all",
",",
"max_retries",
":",
"2",
",",
"retry_backoff",
":",
"1",
",",
"max_buffer_size",
":",
"1000",
",",
"max_buffer_bytesize",
":",
"10_000_000",
",",
"idempotent",
":",
"false",
",",
"transactional",
":",
"false",
",",
"transactional_id",
":",
"nil",
",",
"transactional_timeout",
":",
"60",
")",
"cluster",
"=",
"initialize_cluster",
"compressor",
"=",
"Compressor",
".",
"new",
"(",
"codec_name",
":",
"compression_codec",
",",
"threshold",
":",
"compression_threshold",
",",
"instrumenter",
":",
"@instrumenter",
",",
")",
"transaction_manager",
"=",
"TransactionManager",
".",
"new",
"(",
"cluster",
":",
"cluster",
",",
"logger",
":",
"@logger",
",",
"idempotent",
":",
"idempotent",
",",
"transactional",
":",
"transactional",
",",
"transactional_id",
":",
"transactional_id",
",",
"transactional_timeout",
":",
"transactional_timeout",
",",
")",
"Producer",
".",
"new",
"(",
"cluster",
":",
"cluster",
",",
"transaction_manager",
":",
"transaction_manager",
",",
"logger",
":",
"@logger",
",",
"instrumenter",
":",
"@instrumenter",
",",
"compressor",
":",
"compressor",
",",
"ack_timeout",
":",
"ack_timeout",
",",
"required_acks",
":",
"required_acks",
",",
"max_retries",
":",
"max_retries",
",",
"retry_backoff",
":",
"retry_backoff",
",",
"max_buffer_size",
":",
"max_buffer_size",
",",
"max_buffer_bytesize",
":",
"max_buffer_bytesize",
",",
")",
"end"
] | Initializes a new Kafka producer.
@param ack_timeout [Integer] The number of seconds a broker can wait for
replicas to acknowledge a write before responding with a timeout.
@param required_acks [Integer, Symbol] The number of replicas that must acknowledge
a write, or `:all` if all in-sync replicas must acknowledge.
@param max_retries [Integer] the number of retries that should be attempted
before giving up sending messages to the cluster. Does not include the
original attempt.
@param retry_backoff [Integer] the number of seconds to wait between retries.
@param max_buffer_size [Integer] the number of messages allowed in the buffer
before new writes will raise {BufferOverflow} exceptions.
@param max_buffer_bytesize [Integer] the maximum size of the buffer in bytes.
attempting to produce messages when the buffer reaches this size will
result in {BufferOverflow} being raised.
@param compression_codec [Symbol, nil] the name of the compression codec to
use, or nil if no compression should be performed. Valid codecs: `:snappy`,
`:gzip`, `:lz4`, `:zstd`
@param compression_threshold [Integer] the number of messages that needs to
be in a message set before it should be compressed. Note that message sets
are per-partition rather than per-topic or per-producer.
@return [Kafka::Producer] the Kafka producer. | [
"Initializes",
"a",
"new",
"Kafka",
"producer",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L244-L287 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.async_producer | def async_producer(delivery_interval: 0, delivery_threshold: 0, max_queue_size: 1000, max_retries: -1, retry_backoff: 0, **options)
sync_producer = producer(**options)
AsyncProducer.new(
sync_producer: sync_producer,
delivery_interval: delivery_interval,
delivery_threshold: delivery_threshold,
max_queue_size: max_queue_size,
max_retries: max_retries,
retry_backoff: retry_backoff,
instrumenter: @instrumenter,
logger: @logger,
)
end | ruby | def async_producer(delivery_interval: 0, delivery_threshold: 0, max_queue_size: 1000, max_retries: -1, retry_backoff: 0, **options)
sync_producer = producer(**options)
AsyncProducer.new(
sync_producer: sync_producer,
delivery_interval: delivery_interval,
delivery_threshold: delivery_threshold,
max_queue_size: max_queue_size,
max_retries: max_retries,
retry_backoff: retry_backoff,
instrumenter: @instrumenter,
logger: @logger,
)
end | [
"def",
"async_producer",
"(",
"delivery_interval",
":",
"0",
",",
"delivery_threshold",
":",
"0",
",",
"max_queue_size",
":",
"1000",
",",
"max_retries",
":",
"-",
"1",
",",
"retry_backoff",
":",
"0",
",",
"**",
"options",
")",
"sync_producer",
"=",
"producer",
"(",
"**",
"options",
")",
"AsyncProducer",
".",
"new",
"(",
"sync_producer",
":",
"sync_producer",
",",
"delivery_interval",
":",
"delivery_interval",
",",
"delivery_threshold",
":",
"delivery_threshold",
",",
"max_queue_size",
":",
"max_queue_size",
",",
"max_retries",
":",
"max_retries",
",",
"retry_backoff",
":",
"retry_backoff",
",",
"instrumenter",
":",
"@instrumenter",
",",
"logger",
":",
"@logger",
",",
")",
"end"
] | Creates a new AsyncProducer instance.
All parameters allowed by {#producer} can be passed. In addition to this,
a few extra parameters can be passed when creating an async producer.
@param max_queue_size [Integer] the maximum number of messages allowed in
the queue.
@param delivery_threshold [Integer] if greater than zero, the number of
buffered messages that will automatically trigger a delivery.
@param delivery_interval [Integer] if greater than zero, the number of
seconds between automatic message deliveries.
@see AsyncProducer
@return [AsyncProducer] | [
"Creates",
"a",
"new",
"AsyncProducer",
"instance",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L303-L316 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.consumer | def consumer(
group_id:,
session_timeout: 30,
offset_commit_interval: 10,
offset_commit_threshold: 0,
heartbeat_interval: 10,
offset_retention_time: nil,
fetcher_max_queue_size: 100
)
cluster = initialize_cluster
instrumenter = DecoratingInstrumenter.new(@instrumenter, {
group_id: group_id,
})
# The Kafka protocol expects the retention time to be in ms.
retention_time = (offset_retention_time && offset_retention_time * 1_000) || -1
group = ConsumerGroup.new(
cluster: cluster,
logger: @logger,
group_id: group_id,
session_timeout: session_timeout,
retention_time: retention_time,
instrumenter: instrumenter,
)
fetcher = Fetcher.new(
cluster: initialize_cluster,
group: group,
logger: @logger,
instrumenter: instrumenter,
max_queue_size: fetcher_max_queue_size
)
offset_manager = OffsetManager.new(
cluster: cluster,
group: group,
fetcher: fetcher,
logger: @logger,
commit_interval: offset_commit_interval,
commit_threshold: offset_commit_threshold,
offset_retention_time: offset_retention_time
)
heartbeat = Heartbeat.new(
group: group,
interval: heartbeat_interval,
instrumenter: instrumenter
)
Consumer.new(
cluster: cluster,
logger: @logger,
instrumenter: instrumenter,
group: group,
offset_manager: offset_manager,
fetcher: fetcher,
session_timeout: session_timeout,
heartbeat: heartbeat,
)
end | ruby | def consumer(
group_id:,
session_timeout: 30,
offset_commit_interval: 10,
offset_commit_threshold: 0,
heartbeat_interval: 10,
offset_retention_time: nil,
fetcher_max_queue_size: 100
)
cluster = initialize_cluster
instrumenter = DecoratingInstrumenter.new(@instrumenter, {
group_id: group_id,
})
# The Kafka protocol expects the retention time to be in ms.
retention_time = (offset_retention_time && offset_retention_time * 1_000) || -1
group = ConsumerGroup.new(
cluster: cluster,
logger: @logger,
group_id: group_id,
session_timeout: session_timeout,
retention_time: retention_time,
instrumenter: instrumenter,
)
fetcher = Fetcher.new(
cluster: initialize_cluster,
group: group,
logger: @logger,
instrumenter: instrumenter,
max_queue_size: fetcher_max_queue_size
)
offset_manager = OffsetManager.new(
cluster: cluster,
group: group,
fetcher: fetcher,
logger: @logger,
commit_interval: offset_commit_interval,
commit_threshold: offset_commit_threshold,
offset_retention_time: offset_retention_time
)
heartbeat = Heartbeat.new(
group: group,
interval: heartbeat_interval,
instrumenter: instrumenter
)
Consumer.new(
cluster: cluster,
logger: @logger,
instrumenter: instrumenter,
group: group,
offset_manager: offset_manager,
fetcher: fetcher,
session_timeout: session_timeout,
heartbeat: heartbeat,
)
end | [
"def",
"consumer",
"(",
"group_id",
":",
",",
"session_timeout",
":",
"30",
",",
"offset_commit_interval",
":",
"10",
",",
"offset_commit_threshold",
":",
"0",
",",
"heartbeat_interval",
":",
"10",
",",
"offset_retention_time",
":",
"nil",
",",
"fetcher_max_queue_size",
":",
"100",
")",
"cluster",
"=",
"initialize_cluster",
"instrumenter",
"=",
"DecoratingInstrumenter",
".",
"new",
"(",
"@instrumenter",
",",
"{",
"group_id",
":",
"group_id",
",",
"}",
")",
"# The Kafka protocol expects the retention time to be in ms.",
"retention_time",
"=",
"(",
"offset_retention_time",
"&&",
"offset_retention_time",
"*",
"1_000",
")",
"||",
"-",
"1",
"group",
"=",
"ConsumerGroup",
".",
"new",
"(",
"cluster",
":",
"cluster",
",",
"logger",
":",
"@logger",
",",
"group_id",
":",
"group_id",
",",
"session_timeout",
":",
"session_timeout",
",",
"retention_time",
":",
"retention_time",
",",
"instrumenter",
":",
"instrumenter",
",",
")",
"fetcher",
"=",
"Fetcher",
".",
"new",
"(",
"cluster",
":",
"initialize_cluster",
",",
"group",
":",
"group",
",",
"logger",
":",
"@logger",
",",
"instrumenter",
":",
"instrumenter",
",",
"max_queue_size",
":",
"fetcher_max_queue_size",
")",
"offset_manager",
"=",
"OffsetManager",
".",
"new",
"(",
"cluster",
":",
"cluster",
",",
"group",
":",
"group",
",",
"fetcher",
":",
"fetcher",
",",
"logger",
":",
"@logger",
",",
"commit_interval",
":",
"offset_commit_interval",
",",
"commit_threshold",
":",
"offset_commit_threshold",
",",
"offset_retention_time",
":",
"offset_retention_time",
")",
"heartbeat",
"=",
"Heartbeat",
".",
"new",
"(",
"group",
":",
"group",
",",
"interval",
":",
"heartbeat_interval",
",",
"instrumenter",
":",
"instrumenter",
")",
"Consumer",
".",
"new",
"(",
"cluster",
":",
"cluster",
",",
"logger",
":",
"@logger",
",",
"instrumenter",
":",
"instrumenter",
",",
"group",
":",
"group",
",",
"offset_manager",
":",
"offset_manager",
",",
"fetcher",
":",
"fetcher",
",",
"session_timeout",
":",
"session_timeout",
",",
"heartbeat",
":",
"heartbeat",
",",
")",
"end"
] | Creates a new Kafka consumer.
@param group_id [String] the id of the group that the consumer should join.
@param session_timeout [Integer] the number of seconds after which, if a client
hasn't contacted the Kafka cluster, it will be kicked out of the group.
@param offset_commit_interval [Integer] the interval between offset commits,
in seconds.
@param offset_commit_threshold [Integer] the number of messages that can be
processed before their offsets are committed. If zero, offset commits are
not triggered by message processing.
@param heartbeat_interval [Integer] the interval between heartbeats; must be less
than the session window.
@param offset_retention_time [Integer] the time period that committed
offsets will be retained, in seconds. Defaults to the broker setting.
@param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
are stored for further processing. Note, that each item in the queue represents a
response from a single broker.
@return [Consumer] | [
"Creates",
"a",
"new",
"Kafka",
"consumer",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L336-L397 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.fetch_messages | def fetch_messages(topic:, partition:, offset: :latest, max_wait_time: 5, min_bytes: 1, max_bytes: 1048576, retries: 1)
operation = FetchOperation.new(
cluster: @cluster,
logger: @logger,
min_bytes: min_bytes,
max_bytes: max_bytes,
max_wait_time: max_wait_time,
)
operation.fetch_from_partition(topic, partition, offset: offset, max_bytes: max_bytes)
attempt = 1
begin
operation.execute.flat_map {|batch| batch.messages }
rescue Kafka::Error => e
@cluster.mark_as_stale!
if attempt >= (retries + 1)
raise
else
attempt += 1
@logger.warn "Error while fetching messages, #{e.class}: #{e.message}; retrying..."
retry
end
end
end | ruby | def fetch_messages(topic:, partition:, offset: :latest, max_wait_time: 5, min_bytes: 1, max_bytes: 1048576, retries: 1)
operation = FetchOperation.new(
cluster: @cluster,
logger: @logger,
min_bytes: min_bytes,
max_bytes: max_bytes,
max_wait_time: max_wait_time,
)
operation.fetch_from_partition(topic, partition, offset: offset, max_bytes: max_bytes)
attempt = 1
begin
operation.execute.flat_map {|batch| batch.messages }
rescue Kafka::Error => e
@cluster.mark_as_stale!
if attempt >= (retries + 1)
raise
else
attempt += 1
@logger.warn "Error while fetching messages, #{e.class}: #{e.message}; retrying..."
retry
end
end
end | [
"def",
"fetch_messages",
"(",
"topic",
":",
",",
"partition",
":",
",",
"offset",
":",
":latest",
",",
"max_wait_time",
":",
"5",
",",
"min_bytes",
":",
"1",
",",
"max_bytes",
":",
"1048576",
",",
"retries",
":",
"1",
")",
"operation",
"=",
"FetchOperation",
".",
"new",
"(",
"cluster",
":",
"@cluster",
",",
"logger",
":",
"@logger",
",",
"min_bytes",
":",
"min_bytes",
",",
"max_bytes",
":",
"max_bytes",
",",
"max_wait_time",
":",
"max_wait_time",
",",
")",
"operation",
".",
"fetch_from_partition",
"(",
"topic",
",",
"partition",
",",
"offset",
":",
"offset",
",",
"max_bytes",
":",
"max_bytes",
")",
"attempt",
"=",
"1",
"begin",
"operation",
".",
"execute",
".",
"flat_map",
"{",
"|",
"batch",
"|",
"batch",
".",
"messages",
"}",
"rescue",
"Kafka",
"::",
"Error",
"=>",
"e",
"@cluster",
".",
"mark_as_stale!",
"if",
"attempt",
">=",
"(",
"retries",
"+",
"1",
")",
"raise",
"else",
"attempt",
"+=",
"1",
"@logger",
".",
"warn",
"\"Error while fetching messages, #{e.class}: #{e.message}; retrying...\"",
"retry",
"end",
"end",
"end"
] | Fetches a batch of messages from a single partition. Note that it's possible
to get back empty batches.
The starting point for the fetch can be configured with the `:offset` argument.
If you pass a number, the fetch will start at that offset. However, there are
two special Symbol values that can be passed instead:
* `:earliest` — the first offset in the partition.
* `:latest` — the next offset that will be written to, effectively making the
call block until there is a new message in the partition.
The Kafka protocol specifies the numeric values of these two options: -2 and -1,
respectively. You can also pass in these numbers directly.
## Example
When enumerating the messages in a partition, you typically fetch batches
sequentially.
offset = :earliest
loop do
messages = kafka.fetch_messages(
topic: "my-topic",
partition: 42,
offset: offset,
)
messages.each do |message|
puts message.offset, message.key, message.value
# Set the next offset that should be read to be the subsequent
# offset.
offset = message.offset + 1
end
end
See a working example in `examples/simple-consumer.rb`.
@param topic [String] the topic that messages should be fetched from.
@param partition [Integer] the partition that messages should be fetched from.
@param offset [Integer, Symbol] the offset to start reading from. Default is
the latest offset.
@param max_wait_time [Integer] the maximum amount of time to wait before
the server responds, in seconds.
@param min_bytes [Integer] the minimum number of bytes to wait for. If set to
zero, the broker will respond immediately, but the response may be empty.
The default is 1 byte, which means that the broker will respond as soon as
a message is written to the partition.
@param max_bytes [Integer] the maximum number of bytes to include in the
response message set. Default is 1 MB. You need to set this higher if you
expect messages to be larger than this.
@return [Array<Kafka::FetchedMessage>] the messages returned from the broker. | [
"Fetches",
"a",
"batch",
"of",
"messages",
"from",
"a",
"single",
"partition",
".",
"Note",
"that",
"it",
"s",
"possible",
"to",
"get",
"back",
"empty",
"batches",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L458-L484 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.each_message | def each_message(topic:, start_from_beginning: true, max_wait_time: 5, min_bytes: 1, max_bytes: 1048576, &block)
default_offset ||= start_from_beginning ? :earliest : :latest
offsets = Hash.new { default_offset }
loop do
operation = FetchOperation.new(
cluster: @cluster,
logger: @logger,
min_bytes: min_bytes,
max_wait_time: max_wait_time,
)
@cluster.partitions_for(topic).map(&:partition_id).each do |partition|
partition_offset = offsets[partition]
operation.fetch_from_partition(topic, partition, offset: partition_offset, max_bytes: max_bytes)
end
batches = operation.execute
batches.each do |batch|
batch.messages.each(&block)
offsets[batch.partition] = batch.last_offset + 1 unless batch.unknown_last_offset?
end
end
end | ruby | def each_message(topic:, start_from_beginning: true, max_wait_time: 5, min_bytes: 1, max_bytes: 1048576, &block)
default_offset ||= start_from_beginning ? :earliest : :latest
offsets = Hash.new { default_offset }
loop do
operation = FetchOperation.new(
cluster: @cluster,
logger: @logger,
min_bytes: min_bytes,
max_wait_time: max_wait_time,
)
@cluster.partitions_for(topic).map(&:partition_id).each do |partition|
partition_offset = offsets[partition]
operation.fetch_from_partition(topic, partition, offset: partition_offset, max_bytes: max_bytes)
end
batches = operation.execute
batches.each do |batch|
batch.messages.each(&block)
offsets[batch.partition] = batch.last_offset + 1 unless batch.unknown_last_offset?
end
end
end | [
"def",
"each_message",
"(",
"topic",
":",
",",
"start_from_beginning",
":",
"true",
",",
"max_wait_time",
":",
"5",
",",
"min_bytes",
":",
"1",
",",
"max_bytes",
":",
"1048576",
",",
"&",
"block",
")",
"default_offset",
"||=",
"start_from_beginning",
"?",
":earliest",
":",
":latest",
"offsets",
"=",
"Hash",
".",
"new",
"{",
"default_offset",
"}",
"loop",
"do",
"operation",
"=",
"FetchOperation",
".",
"new",
"(",
"cluster",
":",
"@cluster",
",",
"logger",
":",
"@logger",
",",
"min_bytes",
":",
"min_bytes",
",",
"max_wait_time",
":",
"max_wait_time",
",",
")",
"@cluster",
".",
"partitions_for",
"(",
"topic",
")",
".",
"map",
"(",
":partition_id",
")",
".",
"each",
"do",
"|",
"partition",
"|",
"partition_offset",
"=",
"offsets",
"[",
"partition",
"]",
"operation",
".",
"fetch_from_partition",
"(",
"topic",
",",
"partition",
",",
"offset",
":",
"partition_offset",
",",
"max_bytes",
":",
"max_bytes",
")",
"end",
"batches",
"=",
"operation",
".",
"execute",
"batches",
".",
"each",
"do",
"|",
"batch",
"|",
"batch",
".",
"messages",
".",
"each",
"(",
"block",
")",
"offsets",
"[",
"batch",
".",
"partition",
"]",
"=",
"batch",
".",
"last_offset",
"+",
"1",
"unless",
"batch",
".",
"unknown_last_offset?",
"end",
"end",
"end"
] | Enumerate all messages in a topic.
@param topic [String] the topic to consume messages from.
@param start_from_beginning [Boolean] whether to start from the beginning
of the topic or just subscribe to new messages being produced.
@param max_wait_time [Integer] the maximum amount of time to wait before
the server responds, in seconds.
@param min_bytes [Integer] the minimum number of bytes to wait for. If set to
zero, the broker will respond immediately, but the response may be empty.
The default is 1 byte, which means that the broker will respond as soon as
a message is written to the partition.
@param max_bytes [Integer] the maximum number of bytes to include in the
response message set. Default is 1 MB. You need to set this higher if you
expect messages to be larger than this.
@return [nil] | [
"Enumerate",
"all",
"messages",
"in",
"a",
"topic",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L506-L530 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.create_topic | def create_topic(name, num_partitions: 1, replication_factor: 1, timeout: 30, config: {})
@cluster.create_topic(
name,
num_partitions: num_partitions,
replication_factor: replication_factor,
timeout: timeout,
config: config,
)
end | ruby | def create_topic(name, num_partitions: 1, replication_factor: 1, timeout: 30, config: {})
@cluster.create_topic(
name,
num_partitions: num_partitions,
replication_factor: replication_factor,
timeout: timeout,
config: config,
)
end | [
"def",
"create_topic",
"(",
"name",
",",
"num_partitions",
":",
"1",
",",
"replication_factor",
":",
"1",
",",
"timeout",
":",
"30",
",",
"config",
":",
"{",
"}",
")",
"@cluster",
".",
"create_topic",
"(",
"name",
",",
"num_partitions",
":",
"num_partitions",
",",
"replication_factor",
":",
"replication_factor",
",",
"timeout",
":",
"timeout",
",",
"config",
":",
"config",
",",
")",
"end"
] | Creates a topic in the cluster.
@example Creating a topic with log compaction
# Enable log compaction:
config = { "cleanup.policy" => "compact" }
# Create the topic:
kafka.create_topic("dns-mappings", config: config)
@param name [String] the name of the topic.
@param num_partitions [Integer] the number of partitions that should be created
in the topic.
@param replication_factor [Integer] the replication factor of the topic.
@param timeout [Integer] a duration of time to wait for the topic to be
completely created.
@param config [Hash] topic configuration entries. See
[the Kafka documentation](https://kafka.apache.org/documentation/#topicconfigs)
for more information.
@raise [Kafka::TopicAlreadyExists] if the topic already exists.
@return [nil] | [
"Creates",
"a",
"topic",
"in",
"the",
"cluster",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L552-L560 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.create_partitions_for | def create_partitions_for(name, num_partitions: 1, timeout: 30)
@cluster.create_partitions_for(name, num_partitions: num_partitions, timeout: timeout)
end | ruby | def create_partitions_for(name, num_partitions: 1, timeout: 30)
@cluster.create_partitions_for(name, num_partitions: num_partitions, timeout: timeout)
end | [
"def",
"create_partitions_for",
"(",
"name",
",",
"num_partitions",
":",
"1",
",",
"timeout",
":",
"30",
")",
"@cluster",
".",
"create_partitions_for",
"(",
"name",
",",
"num_partitions",
":",
"num_partitions",
",",
"timeout",
":",
"timeout",
")",
"end"
] | Create partitions for a topic.
@param name [String] the name of the topic.
@param num_partitions [Integer] the number of desired partitions for
the topic
@param timeout [Integer] a duration of time to wait for the new
partitions to be added.
@return [nil] | [
"Create",
"partitions",
"for",
"a",
"topic",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L625-L627 | train |
zendesk/ruby-kafka | lib/kafka/client.rb | Kafka.Client.last_offsets_for | def last_offsets_for(*topics)
@cluster.add_target_topics(topics)
topics.map {|topic|
partition_ids = @cluster.partitions_for(topic).collect(&:partition_id)
partition_offsets = @cluster.resolve_offsets(topic, partition_ids, :latest)
[topic, partition_offsets.collect { |k, v| [k, v - 1] }.to_h]
}.to_h
end | ruby | def last_offsets_for(*topics)
@cluster.add_target_topics(topics)
topics.map {|topic|
partition_ids = @cluster.partitions_for(topic).collect(&:partition_id)
partition_offsets = @cluster.resolve_offsets(topic, partition_ids, :latest)
[topic, partition_offsets.collect { |k, v| [k, v - 1] }.to_h]
}.to_h
end | [
"def",
"last_offsets_for",
"(",
"*",
"topics",
")",
"@cluster",
".",
"add_target_topics",
"(",
"topics",
")",
"topics",
".",
"map",
"{",
"|",
"topic",
"|",
"partition_ids",
"=",
"@cluster",
".",
"partitions_for",
"(",
"topic",
")",
".",
"collect",
"(",
":partition_id",
")",
"partition_offsets",
"=",
"@cluster",
".",
"resolve_offsets",
"(",
"topic",
",",
"partition_ids",
",",
":latest",
")",
"[",
"topic",
",",
"partition_offsets",
".",
"collect",
"{",
"|",
"k",
",",
"v",
"|",
"[",
"k",
",",
"v",
"-",
"1",
"]",
"}",
".",
"to_h",
"]",
"}",
".",
"to_h",
"end"
] | Retrieve the offset of the last message in each partition of the specified topics.
@param topics [Array<String>] topic names.
@return [Hash<String, Hash<Integer, Integer>>]
@example
last_offsets_for('topic-1', 'topic-2') # =>
# {
# 'topic-1' => { 0 => 100, 1 => 100 },
# 'topic-2' => { 0 => 100, 1 => 100 }
# } | [
"Retrieve",
"the",
"offset",
"of",
"the",
"last",
"message",
"in",
"each",
"partition",
"of",
"the",
"specified",
"topics",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/client.rb#L688-L695 | train |
zendesk/ruby-kafka | lib/kafka/cluster.rb | Kafka.Cluster.add_target_topics | def add_target_topics(topics)
topics = Set.new(topics)
unless topics.subset?(@target_topics)
new_topics = topics - @target_topics
unless new_topics.empty?
@logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
@target_topics.merge(new_topics)
refresh_metadata!
end
end
end | ruby | def add_target_topics(topics)
topics = Set.new(topics)
unless topics.subset?(@target_topics)
new_topics = topics - @target_topics
unless new_topics.empty?
@logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
@target_topics.merge(new_topics)
refresh_metadata!
end
end
end | [
"def",
"add_target_topics",
"(",
"topics",
")",
"topics",
"=",
"Set",
".",
"new",
"(",
"topics",
")",
"unless",
"topics",
".",
"subset?",
"(",
"@target_topics",
")",
"new_topics",
"=",
"topics",
"-",
"@target_topics",
"unless",
"new_topics",
".",
"empty?",
"@logger",
".",
"info",
"\"New topics added to target list: #{new_topics.to_a.join(', ')}\"",
"@target_topics",
".",
"merge",
"(",
"new_topics",
")",
"refresh_metadata!",
"end",
"end",
"end"
] | Initializes a Cluster with a set of seed brokers.
The cluster will try to fetch cluster metadata from one of the brokers.
@param seed_brokers [Array<URI>]
@param broker_pool [Kafka::BrokerPool]
@param logger [Logger]
Adds a list of topics to the target list. Only the topics on this list will
be queried for metadata.
@param topics [Array<String>]
@return [nil] | [
"Initializes",
"a",
"Cluster",
"with",
"a",
"set",
"of",
"seed",
"brokers",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/cluster.rb#L42-L55 | train |
zendesk/ruby-kafka | lib/kafka/cluster.rb | Kafka.Cluster.get_transaction_coordinator | def get_transaction_coordinator(transactional_id:)
@logger.debug "Getting transaction coordinator for `#{transactional_id}`"
refresh_metadata_if_necessary!
if transactional_id.nil?
# Get a random_broker
@logger.debug "Transaction ID is not available. Choose a random broker."
return random_broker
else
get_coordinator(Kafka::Protocol::COORDINATOR_TYPE_TRANSACTION, transactional_id)
end
end | ruby | def get_transaction_coordinator(transactional_id:)
@logger.debug "Getting transaction coordinator for `#{transactional_id}`"
refresh_metadata_if_necessary!
if transactional_id.nil?
# Get a random_broker
@logger.debug "Transaction ID is not available. Choose a random broker."
return random_broker
else
get_coordinator(Kafka::Protocol::COORDINATOR_TYPE_TRANSACTION, transactional_id)
end
end | [
"def",
"get_transaction_coordinator",
"(",
"transactional_id",
":",
")",
"@logger",
".",
"debug",
"\"Getting transaction coordinator for `#{transactional_id}`\"",
"refresh_metadata_if_necessary!",
"if",
"transactional_id",
".",
"nil?",
"# Get a random_broker",
"@logger",
".",
"debug",
"\"Transaction ID is not available. Choose a random broker.\"",
"return",
"random_broker",
"else",
"get_coordinator",
"(",
"Kafka",
"::",
"Protocol",
"::",
"COORDINATOR_TYPE_TRANSACTION",
",",
"transactional_id",
")",
"end",
"end"
] | Finds the broker acting as the coordinator of the given transaction.
@param transactional_id: [String]
@return [Broker] the broker that's currently coordinator. | [
"Finds",
"the",
"broker",
"acting",
"as",
"the",
"coordinator",
"of",
"the",
"given",
"transaction",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/cluster.rb#L128-L140 | train |
zendesk/ruby-kafka | lib/kafka/cluster.rb | Kafka.Cluster.list_topics | def list_topics
response = random_broker.fetch_metadata(topics: nil)
response.topics.select do |topic|
topic.topic_error_code == 0
end.map(&:topic_name)
end | ruby | def list_topics
response = random_broker.fetch_metadata(topics: nil)
response.topics.select do |topic|
topic.topic_error_code == 0
end.map(&:topic_name)
end | [
"def",
"list_topics",
"response",
"=",
"random_broker",
".",
"fetch_metadata",
"(",
"topics",
":",
"nil",
")",
"response",
".",
"topics",
".",
"select",
"do",
"|",
"topic",
"|",
"topic",
".",
"topic_error_code",
"==",
"0",
"end",
".",
"map",
"(",
":topic_name",
")",
"end"
] | Lists all topics in the cluster. | [
"Lists",
"all",
"topics",
"in",
"the",
"cluster",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/cluster.rb#L329-L334 | train |
zendesk/ruby-kafka | lib/kafka/cluster.rb | Kafka.Cluster.fetch_cluster_info | def fetch_cluster_info
errors = []
@seed_brokers.shuffle.each do |node|
@logger.info "Fetching cluster metadata from #{node}"
begin
broker = @broker_pool.connect(node.hostname, node.port)
cluster_info = broker.fetch_metadata(topics: @target_topics)
if cluster_info.brokers.empty?
@logger.error "No brokers in cluster"
else
@logger.info "Discovered cluster metadata; nodes: #{cluster_info.brokers.join(', ')}"
@stale = false
return cluster_info
end
rescue Error => e
@logger.error "Failed to fetch metadata from #{node}: #{e}"
errors << [node, e]
ensure
broker.disconnect unless broker.nil?
end
end
error_description = errors.map {|node, exception| "- #{node}: #{exception}" }.join("\n")
raise ConnectionError, "Could not connect to any of the seed brokers:\n#{error_description}"
end | ruby | def fetch_cluster_info
errors = []
@seed_brokers.shuffle.each do |node|
@logger.info "Fetching cluster metadata from #{node}"
begin
broker = @broker_pool.connect(node.hostname, node.port)
cluster_info = broker.fetch_metadata(topics: @target_topics)
if cluster_info.brokers.empty?
@logger.error "No brokers in cluster"
else
@logger.info "Discovered cluster metadata; nodes: #{cluster_info.brokers.join(', ')}"
@stale = false
return cluster_info
end
rescue Error => e
@logger.error "Failed to fetch metadata from #{node}: #{e}"
errors << [node, e]
ensure
broker.disconnect unless broker.nil?
end
end
error_description = errors.map {|node, exception| "- #{node}: #{exception}" }.join("\n")
raise ConnectionError, "Could not connect to any of the seed brokers:\n#{error_description}"
end | [
"def",
"fetch_cluster_info",
"errors",
"=",
"[",
"]",
"@seed_brokers",
".",
"shuffle",
".",
"each",
"do",
"|",
"node",
"|",
"@logger",
".",
"info",
"\"Fetching cluster metadata from #{node}\"",
"begin",
"broker",
"=",
"@broker_pool",
".",
"connect",
"(",
"node",
".",
"hostname",
",",
"node",
".",
"port",
")",
"cluster_info",
"=",
"broker",
".",
"fetch_metadata",
"(",
"topics",
":",
"@target_topics",
")",
"if",
"cluster_info",
".",
"brokers",
".",
"empty?",
"@logger",
".",
"error",
"\"No brokers in cluster\"",
"else",
"@logger",
".",
"info",
"\"Discovered cluster metadata; nodes: #{cluster_info.brokers.join(', ')}\"",
"@stale",
"=",
"false",
"return",
"cluster_info",
"end",
"rescue",
"Error",
"=>",
"e",
"@logger",
".",
"error",
"\"Failed to fetch metadata from #{node}: #{e}\"",
"errors",
"<<",
"[",
"node",
",",
"e",
"]",
"ensure",
"broker",
".",
"disconnect",
"unless",
"broker",
".",
"nil?",
"end",
"end",
"error_description",
"=",
"errors",
".",
"map",
"{",
"|",
"node",
",",
"exception",
"|",
"\"- #{node}: #{exception}\"",
"}",
".",
"join",
"(",
"\"\\n\"",
")",
"raise",
"ConnectionError",
",",
"\"Could not connect to any of the seed brokers:\\n#{error_description}\"",
"end"
] | Fetches the cluster metadata.
This is used to update the partition leadership information, among other things.
The methods will go through each node listed in `seed_brokers`, connecting to the
first one that is available. This node will be queried for the cluster metadata.
@raise [ConnectionError] if none of the nodes in `seed_brokers` are available.
@return [Protocol::MetadataResponse] the cluster metadata. | [
"Fetches",
"the",
"cluster",
"metadata",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/cluster.rb#L367-L397 | train |
zendesk/ruby-kafka | lib/kafka/async_producer.rb | Kafka.AsyncProducer.produce | def produce(value, topic:, **options)
ensure_threads_running!
if @queue.size >= @max_queue_size
buffer_overflow topic,
"Cannot produce to #{topic}, max queue size (#{@max_queue_size} messages) reached"
end
args = [value, **options.merge(topic: topic)]
@queue << [:produce, args]
@instrumenter.instrument("enqueue_message.async_producer", {
topic: topic,
queue_size: @queue.size,
max_queue_size: @max_queue_size,
})
nil
end | ruby | def produce(value, topic:, **options)
ensure_threads_running!
if @queue.size >= @max_queue_size
buffer_overflow topic,
"Cannot produce to #{topic}, max queue size (#{@max_queue_size} messages) reached"
end
args = [value, **options.merge(topic: topic)]
@queue << [:produce, args]
@instrumenter.instrument("enqueue_message.async_producer", {
topic: topic,
queue_size: @queue.size,
max_queue_size: @max_queue_size,
})
nil
end | [
"def",
"produce",
"(",
"value",
",",
"topic",
":",
",",
"**",
"options",
")",
"ensure_threads_running!",
"if",
"@queue",
".",
"size",
">=",
"@max_queue_size",
"buffer_overflow",
"topic",
",",
"\"Cannot produce to #{topic}, max queue size (#{@max_queue_size} messages) reached\"",
"end",
"args",
"=",
"[",
"value",
",",
"**",
"options",
".",
"merge",
"(",
"topic",
":",
"topic",
")",
"]",
"@queue",
"<<",
"[",
":produce",
",",
"args",
"]",
"@instrumenter",
".",
"instrument",
"(",
"\"enqueue_message.async_producer\"",
",",
"{",
"topic",
":",
"topic",
",",
"queue_size",
":",
"@queue",
".",
"size",
",",
"max_queue_size",
":",
"@max_queue_size",
",",
"}",
")",
"nil",
"end"
] | Initializes a new AsyncProducer.
@param sync_producer [Kafka::Producer] the synchronous producer that should
be used in the background.
@param max_queue_size [Integer] the maximum number of messages allowed in
the queue.
@param delivery_threshold [Integer] if greater than zero, the number of
buffered messages that will automatically trigger a delivery.
@param delivery_interval [Integer] if greater than zero, the number of
seconds between automatic message deliveries.
Produces a message to the specified topic.
@see Kafka::Producer#produce
@param (see Kafka::Producer#produce)
@raise [BufferOverflow] if the message queue is full.
@return [nil] | [
"Initializes",
"a",
"new",
"AsyncProducer",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/async_producer.rb#L105-L123 | train |
zendesk/ruby-kafka | lib/kafka/ssl_socket_with_timeout.rb | Kafka.SSLSocketWithTimeout.write | def write(bytes)
loop do
written = 0
begin
# unlike plain tcp sockets, ssl sockets don't support IO.select
# properly.
# Instead, timeouts happen on a per write basis, and we have to
# catch exceptions from write_nonblock, and gradually build up
# our write buffer.
written += @ssl_socket.write_nonblock(bytes)
rescue Errno::EFAULT => error
raise error
rescue OpenSSL::SSL::SSLError, Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable => error
if error.is_a?(OpenSSL::SSL::SSLError) && error.message == 'write would block'
if select_with_timeout(@ssl_socket, :write)
retry
else
raise Errno::ETIMEDOUT
end
else
raise error
end
end
# Fast, common case.
break if written == bytes.size
# This takes advantage of the fact that most ruby implementations
# have Copy-On-Write strings. Thusly why requesting a subrange
# of data, we actually don't copy data because the new string
# simply references a subrange of the original.
bytes = bytes[written, bytes.size]
end
end | ruby | def write(bytes)
loop do
written = 0
begin
# unlike plain tcp sockets, ssl sockets don't support IO.select
# properly.
# Instead, timeouts happen on a per write basis, and we have to
# catch exceptions from write_nonblock, and gradually build up
# our write buffer.
written += @ssl_socket.write_nonblock(bytes)
rescue Errno::EFAULT => error
raise error
rescue OpenSSL::SSL::SSLError, Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable => error
if error.is_a?(OpenSSL::SSL::SSLError) && error.message == 'write would block'
if select_with_timeout(@ssl_socket, :write)
retry
else
raise Errno::ETIMEDOUT
end
else
raise error
end
end
# Fast, common case.
break if written == bytes.size
# This takes advantage of the fact that most ruby implementations
# have Copy-On-Write strings. Thusly why requesting a subrange
# of data, we actually don't copy data because the new string
# simply references a subrange of the original.
bytes = bytes[written, bytes.size]
end
end | [
"def",
"write",
"(",
"bytes",
")",
"loop",
"do",
"written",
"=",
"0",
"begin",
"# unlike plain tcp sockets, ssl sockets don't support IO.select",
"# properly.",
"# Instead, timeouts happen on a per write basis, and we have to",
"# catch exceptions from write_nonblock, and gradually build up",
"# our write buffer.",
"written",
"+=",
"@ssl_socket",
".",
"write_nonblock",
"(",
"bytes",
")",
"rescue",
"Errno",
"::",
"EFAULT",
"=>",
"error",
"raise",
"error",
"rescue",
"OpenSSL",
"::",
"SSL",
"::",
"SSLError",
",",
"Errno",
"::",
"EAGAIN",
",",
"Errno",
"::",
"EWOULDBLOCK",
",",
"IO",
"::",
"WaitWritable",
"=>",
"error",
"if",
"error",
".",
"is_a?",
"(",
"OpenSSL",
"::",
"SSL",
"::",
"SSLError",
")",
"&&",
"error",
".",
"message",
"==",
"'write would block'",
"if",
"select_with_timeout",
"(",
"@ssl_socket",
",",
":write",
")",
"retry",
"else",
"raise",
"Errno",
"::",
"ETIMEDOUT",
"end",
"else",
"raise",
"error",
"end",
"end",
"# Fast, common case.",
"break",
"if",
"written",
"==",
"bytes",
".",
"size",
"# This takes advantage of the fact that most ruby implementations",
"# have Copy-On-Write strings. Thusly why requesting a subrange",
"# of data, we actually don't copy data because the new string",
"# simply references a subrange of the original.",
"bytes",
"=",
"bytes",
"[",
"written",
",",
"bytes",
".",
"size",
"]",
"end",
"end"
] | Writes bytes to the socket, possible with a timeout.
@param bytes [String] the data that should be written to the socket.
@raise [Errno::ETIMEDOUT] if the timeout is exceeded.
@return [Integer] the number of bytes written. | [
"Writes",
"bytes",
"to",
"the",
"socket",
"possible",
"with",
"a",
"timeout",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/ssl_socket_with_timeout.rb#L126-L159 | train |
zendesk/ruby-kafka | lib/kafka/consumer.rb | Kafka.Consumer.subscribe | def subscribe(topic_or_regex, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
default_offset ||= start_from_beginning ? :earliest : :latest
if topic_or_regex.is_a?(Regexp)
cluster_topics.select { |topic| topic =~ topic_or_regex }.each do |topic|
subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
end
else
subscribe_to_topic(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
end
nil
end | ruby | def subscribe(topic_or_regex, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
default_offset ||= start_from_beginning ? :earliest : :latest
if topic_or_regex.is_a?(Regexp)
cluster_topics.select { |topic| topic =~ topic_or_regex }.each do |topic|
subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
end
else
subscribe_to_topic(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
end
nil
end | [
"def",
"subscribe",
"(",
"topic_or_regex",
",",
"default_offset",
":",
"nil",
",",
"start_from_beginning",
":",
"true",
",",
"max_bytes_per_partition",
":",
"1048576",
")",
"default_offset",
"||=",
"start_from_beginning",
"?",
":earliest",
":",
":latest",
"if",
"topic_or_regex",
".",
"is_a?",
"(",
"Regexp",
")",
"cluster_topics",
".",
"select",
"{",
"|",
"topic",
"|",
"topic",
"=~",
"topic_or_regex",
"}",
".",
"each",
"do",
"|",
"topic",
"|",
"subscribe_to_topic",
"(",
"topic",
",",
"default_offset",
",",
"start_from_beginning",
",",
"max_bytes_per_partition",
")",
"end",
"else",
"subscribe_to_topic",
"(",
"topic_or_regex",
",",
"default_offset",
",",
"start_from_beginning",
",",
"max_bytes_per_partition",
")",
"end",
"nil",
"end"
] | Subscribes the consumer to a topic.
Typically you either want to start reading messages from the very
beginning of the topic's partitions or you simply want to wait for new
messages to be written. In the former case, set `start_from_beginning`
to true (the default); in the latter, set it to false.
@param topic_or_regex [String, Regexp] subscribe to single topic with a string
or multiple topics matching a regex.
@param default_offset [Symbol] whether to start from the beginning or the
end of the topic's partitions. Deprecated.
@param start_from_beginning [Boolean] whether to start from the beginning
of the topic or just subscribe to new messages being produced. This
only applies when first consuming a topic partition – once the consumer
has checkpointed its progress, it will always resume from the last
checkpoint.
@param max_bytes_per_partition [Integer] the maximum amount of data fetched
from a single partition at a time.
@return [nil] | [
"Subscribes",
"the",
"consumer",
"to",
"a",
"topic",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/consumer.rb#L97-L109 | train |
zendesk/ruby-kafka | lib/kafka/consumer.rb | Kafka.Consumer.pause | def pause(topic, partition, timeout: nil, max_timeout: nil, exponential_backoff: false)
if max_timeout && !exponential_backoff
raise ArgumentError, "`max_timeout` only makes sense when `exponential_backoff` is enabled"
end
pause_for(topic, partition).pause!(
timeout: timeout,
max_timeout: max_timeout,
exponential_backoff: exponential_backoff,
)
end | ruby | def pause(topic, partition, timeout: nil, max_timeout: nil, exponential_backoff: false)
if max_timeout && !exponential_backoff
raise ArgumentError, "`max_timeout` only makes sense when `exponential_backoff` is enabled"
end
pause_for(topic, partition).pause!(
timeout: timeout,
max_timeout: max_timeout,
exponential_backoff: exponential_backoff,
)
end | [
"def",
"pause",
"(",
"topic",
",",
"partition",
",",
"timeout",
":",
"nil",
",",
"max_timeout",
":",
"nil",
",",
"exponential_backoff",
":",
"false",
")",
"if",
"max_timeout",
"&&",
"!",
"exponential_backoff",
"raise",
"ArgumentError",
",",
"\"`max_timeout` only makes sense when `exponential_backoff` is enabled\"",
"end",
"pause_for",
"(",
"topic",
",",
"partition",
")",
".",
"pause!",
"(",
"timeout",
":",
"timeout",
",",
"max_timeout",
":",
"max_timeout",
",",
"exponential_backoff",
":",
"exponential_backoff",
",",
")",
"end"
] | Pause processing of a specific topic partition.
When a specific message causes the processor code to fail, it can be a good
idea to simply pause the partition until the error can be resolved, allowing
the rest of the partitions to continue being processed.
If the `timeout` argument is passed, the partition will automatically be
resumed when the timeout expires. If `exponential_backoff` is enabled, each
subsequent pause will cause the timeout to double until a message from the
partition has been successfully processed.
@param topic [String]
@param partition [Integer]
@param timeout [nil, Integer] the number of seconds to pause the partition for,
or `nil` if the partition should not be automatically resumed.
@param max_timeout [nil, Integer] the maximum number of seconds to pause for,
or `nil` if no maximum should be enforced.
@param exponential_backoff [Boolean] whether to enable exponential backoff.
@return [nil] | [
"Pause",
"processing",
"of",
"a",
"specific",
"topic",
"partition",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/consumer.rb#L141-L151 | train |
zendesk/ruby-kafka | lib/kafka/consumer.rb | Kafka.Consumer.resume | def resume(topic, partition)
pause_for(topic, partition).resume!
# During re-balancing we might have lost the paused partition. Check if partition is still in group before seek.
seek_to_next(topic, partition) if @group.assigned_to?(topic, partition)
end | ruby | def resume(topic, partition)
pause_for(topic, partition).resume!
# During re-balancing we might have lost the paused partition. Check if partition is still in group before seek.
seek_to_next(topic, partition) if @group.assigned_to?(topic, partition)
end | [
"def",
"resume",
"(",
"topic",
",",
"partition",
")",
"pause_for",
"(",
"topic",
",",
"partition",
")",
".",
"resume!",
"# During re-balancing we might have lost the paused partition. Check if partition is still in group before seek.",
"seek_to_next",
"(",
"topic",
",",
"partition",
")",
"if",
"@group",
".",
"assigned_to?",
"(",
"topic",
",",
"partition",
")",
"end"
] | Resume processing of a topic partition.
@see #pause
@param topic [String]
@param partition [Integer]
@return [nil] | [
"Resume",
"processing",
"of",
"a",
"topic",
"partition",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/consumer.rb#L159-L164 | train |
zendesk/ruby-kafka | lib/kafka/consumer.rb | Kafka.Consumer.paused? | def paused?(topic, partition)
pause = pause_for(topic, partition)
pause.paused? && !pause.expired?
end | ruby | def paused?(topic, partition)
pause = pause_for(topic, partition)
pause.paused? && !pause.expired?
end | [
"def",
"paused?",
"(",
"topic",
",",
"partition",
")",
"pause",
"=",
"pause_for",
"(",
"topic",
",",
"partition",
")",
"pause",
".",
"paused?",
"&&",
"!",
"pause",
".",
"expired?",
"end"
] | Whether the topic partition is currently paused.
@see #pause
@param topic [String]
@param partition [Integer]
@return [Boolean] true if the partition is paused, false otherwise. | [
"Whether",
"the",
"topic",
"partition",
"is",
"currently",
"paused",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/consumer.rb#L172-L175 | train |
zendesk/ruby-kafka | lib/kafka/offset_manager.rb | Kafka.OffsetManager.seek_to_default | def seek_to_default(topic, partition)
# Remove any cached offset, in case things have changed broker-side.
clear_resolved_offset(topic)
offset = resolve_offset(topic, partition)
seek_to(topic, partition, offset)
end | ruby | def seek_to_default(topic, partition)
# Remove any cached offset, in case things have changed broker-side.
clear_resolved_offset(topic)
offset = resolve_offset(topic, partition)
seek_to(topic, partition, offset)
end | [
"def",
"seek_to_default",
"(",
"topic",
",",
"partition",
")",
"# Remove any cached offset, in case things have changed broker-side.",
"clear_resolved_offset",
"(",
"topic",
")",
"offset",
"=",
"resolve_offset",
"(",
"topic",
",",
"partition",
")",
"seek_to",
"(",
"topic",
",",
"partition",
",",
"offset",
")",
"end"
] | Move the consumer's position in the partition back to the configured default
offset, either the first or latest in the partition.
@param topic [String] the name of the topic.
@param partition [Integer] the partition number.
@return [nil] | [
"Move",
"the",
"consumer",
"s",
"position",
"in",
"the",
"partition",
"back",
"to",
"the",
"configured",
"default",
"offset",
"either",
"the",
"first",
"or",
"latest",
"in",
"the",
"partition",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/offset_manager.rb#L68-L75 | train |
zendesk/ruby-kafka | lib/kafka/offset_manager.rb | Kafka.OffsetManager.seek_to | def seek_to(topic, partition, offset)
@processed_offsets[topic] ||= {}
@processed_offsets[topic][partition] = offset
@fetcher.seek(topic, partition, offset)
end | ruby | def seek_to(topic, partition, offset)
@processed_offsets[topic] ||= {}
@processed_offsets[topic][partition] = offset
@fetcher.seek(topic, partition, offset)
end | [
"def",
"seek_to",
"(",
"topic",
",",
"partition",
",",
"offset",
")",
"@processed_offsets",
"[",
"topic",
"]",
"||=",
"{",
"}",
"@processed_offsets",
"[",
"topic",
"]",
"[",
"partition",
"]",
"=",
"offset",
"@fetcher",
".",
"seek",
"(",
"topic",
",",
"partition",
",",
"offset",
")",
"end"
] | Move the consumer's position in the partition to the specified offset.
@param topic [String] the name of the topic.
@param partition [Integer] the partition number.
@param offset [Integer] the offset that the consumer position should be moved to.
@return [nil] | [
"Move",
"the",
"consumer",
"s",
"position",
"in",
"the",
"partition",
"to",
"the",
"specified",
"offset",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/offset_manager.rb#L83-L88 | train |
zendesk/ruby-kafka | lib/kafka/offset_manager.rb | Kafka.OffsetManager.next_offset_for | def next_offset_for(topic, partition)
offset = @processed_offsets.fetch(topic, {}).fetch(partition) {
committed_offset_for(topic, partition)
}
# A negative offset means that no offset has been committed, so we need to
# resolve the default offset for the topic.
if offset < 0
resolve_offset(topic, partition)
else
# The next offset is the last offset.
offset
end
end | ruby | def next_offset_for(topic, partition)
offset = @processed_offsets.fetch(topic, {}).fetch(partition) {
committed_offset_for(topic, partition)
}
# A negative offset means that no offset has been committed, so we need to
# resolve the default offset for the topic.
if offset < 0
resolve_offset(topic, partition)
else
# The next offset is the last offset.
offset
end
end | [
"def",
"next_offset_for",
"(",
"topic",
",",
"partition",
")",
"offset",
"=",
"@processed_offsets",
".",
"fetch",
"(",
"topic",
",",
"{",
"}",
")",
".",
"fetch",
"(",
"partition",
")",
"{",
"committed_offset_for",
"(",
"topic",
",",
"partition",
")",
"}",
"# A negative offset means that no offset has been committed, so we need to",
"# resolve the default offset for the topic.",
"if",
"offset",
"<",
"0",
"resolve_offset",
"(",
"topic",
",",
"partition",
")",
"else",
"# The next offset is the last offset.",
"offset",
"end",
"end"
] | Return the next offset that should be fetched for the specified partition.
@param topic [String] the name of the topic.
@param partition [Integer] the partition number.
@return [Integer] the next offset that should be fetched. | [
"Return",
"the",
"next",
"offset",
"that",
"should",
"be",
"fetched",
"for",
"the",
"specified",
"partition",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/offset_manager.rb#L95-L108 | train |
zendesk/ruby-kafka | lib/kafka/offset_manager.rb | Kafka.OffsetManager.commit_offsets | def commit_offsets(recommit = false)
offsets = offsets_to_commit(recommit)
unless offsets.empty?
@logger.debug "Committing offsets#{recommit ? ' with recommit' : ''}: #{prettify_offsets(offsets)}"
@group.commit_offsets(offsets)
@last_commit = Time.now
@last_recommit = Time.now if recommit
@uncommitted_offsets = 0
@committed_offsets = nil
end
end | ruby | def commit_offsets(recommit = false)
offsets = offsets_to_commit(recommit)
unless offsets.empty?
@logger.debug "Committing offsets#{recommit ? ' with recommit' : ''}: #{prettify_offsets(offsets)}"
@group.commit_offsets(offsets)
@last_commit = Time.now
@last_recommit = Time.now if recommit
@uncommitted_offsets = 0
@committed_offsets = nil
end
end | [
"def",
"commit_offsets",
"(",
"recommit",
"=",
"false",
")",
"offsets",
"=",
"offsets_to_commit",
"(",
"recommit",
")",
"unless",
"offsets",
".",
"empty?",
"@logger",
".",
"debug",
"\"Committing offsets#{recommit ? ' with recommit' : ''}: #{prettify_offsets(offsets)}\"",
"@group",
".",
"commit_offsets",
"(",
"offsets",
")",
"@last_commit",
"=",
"Time",
".",
"now",
"@last_recommit",
"=",
"Time",
".",
"now",
"if",
"recommit",
"@uncommitted_offsets",
"=",
"0",
"@committed_offsets",
"=",
"nil",
"end",
"end"
] | Commit offsets of messages that have been marked as processed.
If `recommit` is set to true, we will also commit the existing positions
even if no messages have been processed on a partition. This is done
in order to avoid the offset information expiring in cases where messages
are very rare -- it's essentially a keep-alive.
@param recommit [Boolean] whether to recommit offsets that have already been
committed.
@return [nil] | [
"Commit",
"offsets",
"of",
"messages",
"that",
"have",
"been",
"marked",
"as",
"processed",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/offset_manager.rb#L120-L133 | train |
zendesk/ruby-kafka | lib/kafka/offset_manager.rb | Kafka.OffsetManager.clear_offsets_excluding | def clear_offsets_excluding(excluded)
# Clear all offsets that aren't in `excluded`.
@processed_offsets.each do |topic, partitions|
partitions.keep_if do |partition, _|
excluded.fetch(topic, []).include?(partition)
end
end
# Clear the cached commits from the brokers.
@committed_offsets = nil
@resolved_offsets.clear
end | ruby | def clear_offsets_excluding(excluded)
# Clear all offsets that aren't in `excluded`.
@processed_offsets.each do |topic, partitions|
partitions.keep_if do |partition, _|
excluded.fetch(topic, []).include?(partition)
end
end
# Clear the cached commits from the brokers.
@committed_offsets = nil
@resolved_offsets.clear
end | [
"def",
"clear_offsets_excluding",
"(",
"excluded",
")",
"# Clear all offsets that aren't in `excluded`.",
"@processed_offsets",
".",
"each",
"do",
"|",
"topic",
",",
"partitions",
"|",
"partitions",
".",
"keep_if",
"do",
"|",
"partition",
",",
"_",
"|",
"excluded",
".",
"fetch",
"(",
"topic",
",",
"[",
"]",
")",
".",
"include?",
"(",
"partition",
")",
"end",
"end",
"# Clear the cached commits from the brokers.",
"@committed_offsets",
"=",
"nil",
"@resolved_offsets",
".",
"clear",
"end"
] | Clear stored offset information for all partitions except those specified
in `excluded`.
offset_manager.clear_offsets_excluding("my-topic" => [1, 2, 3])
@return [nil] | [
"Clear",
"stored",
"offset",
"information",
"for",
"all",
"partitions",
"except",
"those",
"specified",
"in",
"excluded",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/offset_manager.rb#L163-L174 | train |
zendesk/ruby-kafka | lib/kafka/connection.rb | Kafka.Connection.send_request | def send_request(request)
api_name = Protocol.api_name(request.api_key)
# Default notification payload.
notification = {
broker_host: @host,
api: api_name,
request_size: 0,
response_size: 0,
}
raise IdleConnection if idle?
@logger.push_tags(api_name)
@instrumenter.instrument("request.connection", notification) do
open unless open?
@correlation_id += 1
@logger.debug "Sending #{api_name} API request #{@correlation_id} to #{to_s}"
write_request(request, notification)
response_class = request.response_class
response = wait_for_response(response_class, notification) unless response_class.nil?
@last_request = Time.now
response
end
rescue SystemCallError, EOFError, IOError => e
close
raise ConnectionError, "Connection error #{e.class}: #{e}"
ensure
@logger.pop_tags
end | ruby | def send_request(request)
api_name = Protocol.api_name(request.api_key)
# Default notification payload.
notification = {
broker_host: @host,
api: api_name,
request_size: 0,
response_size: 0,
}
raise IdleConnection if idle?
@logger.push_tags(api_name)
@instrumenter.instrument("request.connection", notification) do
open unless open?
@correlation_id += 1
@logger.debug "Sending #{api_name} API request #{@correlation_id} to #{to_s}"
write_request(request, notification)
response_class = request.response_class
response = wait_for_response(response_class, notification) unless response_class.nil?
@last_request = Time.now
response
end
rescue SystemCallError, EOFError, IOError => e
close
raise ConnectionError, "Connection error #{e.class}: #{e}"
ensure
@logger.pop_tags
end | [
"def",
"send_request",
"(",
"request",
")",
"api_name",
"=",
"Protocol",
".",
"api_name",
"(",
"request",
".",
"api_key",
")",
"# Default notification payload.",
"notification",
"=",
"{",
"broker_host",
":",
"@host",
",",
"api",
":",
"api_name",
",",
"request_size",
":",
"0",
",",
"response_size",
":",
"0",
",",
"}",
"raise",
"IdleConnection",
"if",
"idle?",
"@logger",
".",
"push_tags",
"(",
"api_name",
")",
"@instrumenter",
".",
"instrument",
"(",
"\"request.connection\"",
",",
"notification",
")",
"do",
"open",
"unless",
"open?",
"@correlation_id",
"+=",
"1",
"@logger",
".",
"debug",
"\"Sending #{api_name} API request #{@correlation_id} to #{to_s}\"",
"write_request",
"(",
"request",
",",
"notification",
")",
"response_class",
"=",
"request",
".",
"response_class",
"response",
"=",
"wait_for_response",
"(",
"response_class",
",",
"notification",
")",
"unless",
"response_class",
".",
"nil?",
"@last_request",
"=",
"Time",
".",
"now",
"response",
"end",
"rescue",
"SystemCallError",
",",
"EOFError",
",",
"IOError",
"=>",
"e",
"close",
"raise",
"ConnectionError",
",",
"\"Connection error #{e.class}: #{e}\"",
"ensure",
"@logger",
".",
"pop_tags",
"end"
] | Sends a request over the connection.
@param request [#encode, #response_class] the request that should be
encoded and written.
@return [Object] the response. | [
"Sends",
"a",
"request",
"over",
"the",
"connection",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/connection.rb#L83-L119 | train |
zendesk/ruby-kafka | lib/kafka/connection.rb | Kafka.Connection.write_request | def write_request(request, notification)
message = Kafka::Protocol::RequestMessage.new(
api_key: request.api_key,
api_version: request.respond_to?(:api_version) ? request.api_version : 0,
correlation_id: @correlation_id,
client_id: @client_id,
request: request,
)
data = Kafka::Protocol::Encoder.encode_with(message)
notification[:request_size] = data.bytesize
@encoder.write_bytes(data)
nil
rescue Errno::ETIMEDOUT
@logger.error "Timed out while writing request #{@correlation_id}"
raise
end | ruby | def write_request(request, notification)
message = Kafka::Protocol::RequestMessage.new(
api_key: request.api_key,
api_version: request.respond_to?(:api_version) ? request.api_version : 0,
correlation_id: @correlation_id,
client_id: @client_id,
request: request,
)
data = Kafka::Protocol::Encoder.encode_with(message)
notification[:request_size] = data.bytesize
@encoder.write_bytes(data)
nil
rescue Errno::ETIMEDOUT
@logger.error "Timed out while writing request #{@correlation_id}"
raise
end | [
"def",
"write_request",
"(",
"request",
",",
"notification",
")",
"message",
"=",
"Kafka",
"::",
"Protocol",
"::",
"RequestMessage",
".",
"new",
"(",
"api_key",
":",
"request",
".",
"api_key",
",",
"api_version",
":",
"request",
".",
"respond_to?",
"(",
":api_version",
")",
"?",
"request",
".",
"api_version",
":",
"0",
",",
"correlation_id",
":",
"@correlation_id",
",",
"client_id",
":",
"@client_id",
",",
"request",
":",
"request",
",",
")",
"data",
"=",
"Kafka",
"::",
"Protocol",
"::",
"Encoder",
".",
"encode_with",
"(",
"message",
")",
"notification",
"[",
":request_size",
"]",
"=",
"data",
".",
"bytesize",
"@encoder",
".",
"write_bytes",
"(",
"data",
")",
"nil",
"rescue",
"Errno",
"::",
"ETIMEDOUT",
"@logger",
".",
"error",
"\"Timed out while writing request #{@correlation_id}\"",
"raise",
"end"
] | Writes a request over the connection.
@param request [#encode] the request that should be encoded and written.
@return [nil] | [
"Writes",
"a",
"request",
"over",
"the",
"connection",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/connection.rb#L156-L174 | train |
zendesk/ruby-kafka | lib/kafka/connection.rb | Kafka.Connection.read_response | def read_response(response_class, notification)
@logger.debug "Waiting for response #{@correlation_id} from #{to_s}"
data = @decoder.bytes
notification[:response_size] = data.bytesize
buffer = StringIO.new(data)
response_decoder = Kafka::Protocol::Decoder.new(buffer)
correlation_id = response_decoder.int32
response = response_class.decode(response_decoder)
@logger.debug "Received response #{correlation_id} from #{to_s}"
return correlation_id, response
rescue Errno::ETIMEDOUT
@logger.error "Timed out while waiting for response #{@correlation_id}"
raise
end | ruby | def read_response(response_class, notification)
@logger.debug "Waiting for response #{@correlation_id} from #{to_s}"
data = @decoder.bytes
notification[:response_size] = data.bytesize
buffer = StringIO.new(data)
response_decoder = Kafka::Protocol::Decoder.new(buffer)
correlation_id = response_decoder.int32
response = response_class.decode(response_decoder)
@logger.debug "Received response #{correlation_id} from #{to_s}"
return correlation_id, response
rescue Errno::ETIMEDOUT
@logger.error "Timed out while waiting for response #{@correlation_id}"
raise
end | [
"def",
"read_response",
"(",
"response_class",
",",
"notification",
")",
"@logger",
".",
"debug",
"\"Waiting for response #{@correlation_id} from #{to_s}\"",
"data",
"=",
"@decoder",
".",
"bytes",
"notification",
"[",
":response_size",
"]",
"=",
"data",
".",
"bytesize",
"buffer",
"=",
"StringIO",
".",
"new",
"(",
"data",
")",
"response_decoder",
"=",
"Kafka",
"::",
"Protocol",
"::",
"Decoder",
".",
"new",
"(",
"buffer",
")",
"correlation_id",
"=",
"response_decoder",
".",
"int32",
"response",
"=",
"response_class",
".",
"decode",
"(",
"response_decoder",
")",
"@logger",
".",
"debug",
"\"Received response #{correlation_id} from #{to_s}\"",
"return",
"correlation_id",
",",
"response",
"rescue",
"Errno",
"::",
"ETIMEDOUT",
"@logger",
".",
"error",
"\"Timed out while waiting for response #{@correlation_id}\"",
"raise",
"end"
] | Reads a response from the connection.
@param response_class [#decode] an object that can decode the response from
a given Decoder.
@return [nil] | [
"Reads",
"a",
"response",
"from",
"the",
"connection",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/connection.rb#L182-L200 | train |
zendesk/ruby-kafka | lib/kafka/producer.rb | Kafka.Producer.deliver_messages | def deliver_messages
# There's no need to do anything if the buffer is empty.
return if buffer_size == 0
@instrumenter.instrument("deliver_messages.producer") do |notification|
message_count = buffer_size
notification[:message_count] = message_count
notification[:attempts] = 0
begin
deliver_messages_with_retries(notification)
ensure
notification[:delivered_message_count] = message_count - buffer_size
end
end
end | ruby | def deliver_messages
# There's no need to do anything if the buffer is empty.
return if buffer_size == 0
@instrumenter.instrument("deliver_messages.producer") do |notification|
message_count = buffer_size
notification[:message_count] = message_count
notification[:attempts] = 0
begin
deliver_messages_with_retries(notification)
ensure
notification[:delivered_message_count] = message_count - buffer_size
end
end
end | [
"def",
"deliver_messages",
"# There's no need to do anything if the buffer is empty.",
"return",
"if",
"buffer_size",
"==",
"0",
"@instrumenter",
".",
"instrument",
"(",
"\"deliver_messages.producer\"",
")",
"do",
"|",
"notification",
"|",
"message_count",
"=",
"buffer_size",
"notification",
"[",
":message_count",
"]",
"=",
"message_count",
"notification",
"[",
":attempts",
"]",
"=",
"0",
"begin",
"deliver_messages_with_retries",
"(",
"notification",
")",
"ensure",
"notification",
"[",
":delivered_message_count",
"]",
"=",
"message_count",
"-",
"buffer_size",
"end",
"end",
"end"
] | Sends all buffered messages to the Kafka brokers.
Depending on the value of `required_acks` used when initializing the producer,
this call may block until the specified number of replicas have acknowledged
the writes. The `ack_timeout` setting places an upper bound on the amount of
time the call will block before failing.
@raise [DeliveryFailed] if not all messages could be successfully sent.
@return [nil] | [
"Sends",
"all",
"buffered",
"messages",
"to",
"the",
"Kafka",
"brokers",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/producer.rb#L242-L258 | train |
zendesk/ruby-kafka | lib/kafka/producer.rb | Kafka.Producer.send_offsets_to_transaction | def send_offsets_to_transaction(batch:, group_id:)
@transaction_manager.send_offsets_to_txn(offsets: { batch.topic => { batch.partition => { offset: batch.last_offset + 1, leader_epoch: batch.leader_epoch } } }, group_id: group_id)
end | ruby | def send_offsets_to_transaction(batch:, group_id:)
@transaction_manager.send_offsets_to_txn(offsets: { batch.topic => { batch.partition => { offset: batch.last_offset + 1, leader_epoch: batch.leader_epoch } } }, group_id: group_id)
end | [
"def",
"send_offsets_to_transaction",
"(",
"batch",
":",
",",
"group_id",
":",
")",
"@transaction_manager",
".",
"send_offsets_to_txn",
"(",
"offsets",
":",
"{",
"batch",
".",
"topic",
"=>",
"{",
"batch",
".",
"partition",
"=>",
"{",
"offset",
":",
"batch",
".",
"last_offset",
"+",
"1",
",",
"leader_epoch",
":",
"batch",
".",
"leader_epoch",
"}",
"}",
"}",
",",
"group_id",
":",
"group_id",
")",
"end"
] | Sends batch last offset to the consumer group coordinator, and also marks
this offset as part of the current transaction. This offset will be considered
committed only if the transaction is committed successfully.
This method should be used when you need to batch consumed and produced messages
together, typically in a consume-transform-produce pattern. Thus, the specified
group_id should be the same as config parameter group_id of the used
consumer.
@return [nil] | [
"Sends",
"batch",
"last",
"offset",
"to",
"the",
"consumer",
"group",
"coordinator",
"and",
"also",
"marks",
"this",
"offset",
"as",
"part",
"of",
"the",
"current",
"transaction",
".",
"This",
"offset",
"will",
"be",
"considered",
"committed",
"only",
"if",
"the",
"transaction",
"is",
"committed",
"successfully",
"."
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/producer.rb#L343-L345 | train |
zendesk/ruby-kafka | lib/kafka/producer.rb | Kafka.Producer.transaction | def transaction
raise 'This method requires a block' unless block_given?
begin_transaction
yield
commit_transaction
rescue Kafka::Producer::AbortTransaction
abort_transaction
rescue
abort_transaction
raise
end | ruby | def transaction
raise 'This method requires a block' unless block_given?
begin_transaction
yield
commit_transaction
rescue Kafka::Producer::AbortTransaction
abort_transaction
rescue
abort_transaction
raise
end | [
"def",
"transaction",
"raise",
"'This method requires a block'",
"unless",
"block_given?",
"begin_transaction",
"yield",
"commit_transaction",
"rescue",
"Kafka",
"::",
"Producer",
"::",
"AbortTransaction",
"abort_transaction",
"rescue",
"abort_transaction",
"raise",
"end"
] | Syntactic sugar to enable easier transaction usage. Do the following steps
- Start the transaction (with Producer#begin_transaction)
- Yield the given block
- Commit the transaction (with Producer#commit_transaction)
If the block raises exception, the transaction is automatically aborted
*before* bubble up the exception.
If the block raises Kafka::Producer::AbortTransaction indicator exception,
it aborts the transaction silently, without throwing up that exception.
@return [nil] | [
"Syntactic",
"sugar",
"to",
"enable",
"easier",
"transaction",
"usage",
".",
"Do",
"the",
"following",
"steps"
] | 2a73471b6a607a52dc85c79301ba522acb4566b5 | https://github.com/zendesk/ruby-kafka/blob/2a73471b6a607a52dc85c79301ba522acb4566b5/lib/kafka/producer.rb#L360-L370 | train |
teamcapybara/capybara | lib/capybara/session.rb | Capybara.Session.open_new_window | def open_new_window(kind = :tab)
window_opened_by do
if driver.method(:open_new_window).arity.zero?
driver.open_new_window
else
driver.open_new_window(kind)
end
end
end | ruby | def open_new_window(kind = :tab)
window_opened_by do
if driver.method(:open_new_window).arity.zero?
driver.open_new_window
else
driver.open_new_window(kind)
end
end
end | [
"def",
"open_new_window",
"(",
"kind",
"=",
":tab",
")",
"window_opened_by",
"do",
"if",
"driver",
".",
"method",
"(",
":open_new_window",
")",
".",
"arity",
".",
"zero?",
"driver",
".",
"open_new_window",
"else",
"driver",
".",
"open_new_window",
"(",
"kind",
")",
"end",
"end",
"end"
] | Open new window.
Current window doesn't change as the result of this call.
It should be switched to explicitly.
@return [Capybara::Window] window that has been opened | [
"Open",
"new",
"window",
".",
"Current",
"window",
"doesn",
"t",
"change",
"as",
"the",
"result",
"of",
"this",
"call",
".",
"It",
"should",
"be",
"switched",
"to",
"explicitly",
"."
] | 3819078c820c5cd3be6f0bc9e8b1b0cc1190bc41 | https://github.com/teamcapybara/capybara/blob/3819078c820c5cd3be6f0bc9e8b1b0cc1190bc41/lib/capybara/session.rb#L461-L469 | train |
lostisland/faraday | lib/faraday/autoload.rb | Faraday.AutoloadHelper.autoload_all | def autoload_all(prefix, options)
if prefix =~ %r{^faraday(/|$)}i
prefix = File.join(Faraday.root_path, prefix)
end
options.each do |const_name, path|
autoload const_name, File.join(prefix, path)
end
end | ruby | def autoload_all(prefix, options)
if prefix =~ %r{^faraday(/|$)}i
prefix = File.join(Faraday.root_path, prefix)
end
options.each do |const_name, path|
autoload const_name, File.join(prefix, path)
end
end | [
"def",
"autoload_all",
"(",
"prefix",
",",
"options",
")",
"if",
"prefix",
"=~",
"%r{",
"}i",
"prefix",
"=",
"File",
".",
"join",
"(",
"Faraday",
".",
"root_path",
",",
"prefix",
")",
"end",
"options",
".",
"each",
"do",
"|",
"const_name",
",",
"path",
"|",
"autoload",
"const_name",
",",
"File",
".",
"join",
"(",
"prefix",
",",
"path",
")",
"end",
"end"
] | Registers the constants to be auto loaded.
@param prefix [String] The require prefix. If the path is inside Faraday,
then it will be prefixed with the root path of this loaded
Faraday version.
@param options [{ Symbol => String }] library names.
@example
Faraday.autoload_all 'faraday/foo',
Bar: 'bar'
# requires faraday/foo/bar to load Faraday::Bar.
Faraday::Bar
@return [void] | [
"Registers",
"the",
"constants",
"to",
"be",
"auto",
"loaded",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/autoload.rb#L25-L33 | train |
lostisland/faraday | lib/faraday/autoload.rb | Faraday.AutoloadHelper.all_loaded_constants | def all_loaded_constants
constants
.map { |c| const_get(c) }
.select { |a| a.respond_to?(:loaded?) && a.loaded? }
end | ruby | def all_loaded_constants
constants
.map { |c| const_get(c) }
.select { |a| a.respond_to?(:loaded?) && a.loaded? }
end | [
"def",
"all_loaded_constants",
"constants",
".",
"map",
"{",
"|",
"c",
"|",
"const_get",
"(",
"c",
")",
"}",
".",
"select",
"{",
"|",
"a",
"|",
"a",
".",
"respond_to?",
"(",
":loaded?",
")",
"&&",
"a",
".",
"loaded?",
"}",
"end"
] | Filters the module's contents with those that have been already
autoloaded.
@return [Array<Class, Module>] | [
"Filters",
"the",
"module",
"s",
"contents",
"with",
"those",
"that",
"have",
"been",
"already",
"autoloaded",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/autoload.rb#L49-L53 | train |
lostisland/faraday | spec/support/helper_methods.rb | Faraday.HelperMethods.parse_multipart | def parse_multipart(boundary, body)
reader = MultipartParser::Reader.new(boundary)
result = { errors: [], parts: [] }
def result.part(name)
hash = self[:parts].detect { |h| h[:part].name == name }
[hash[:part], hash[:body].join]
end
reader.on_part do |part|
result[:parts] << thispart = {
part: part,
body: []
}
part.on_data do |chunk|
thispart[:body] << chunk
end
end
reader.on_error do |msg|
result[:errors] << msg
end
reader.write(body)
result
end | ruby | def parse_multipart(boundary, body)
reader = MultipartParser::Reader.new(boundary)
result = { errors: [], parts: [] }
def result.part(name)
hash = self[:parts].detect { |h| h[:part].name == name }
[hash[:part], hash[:body].join]
end
reader.on_part do |part|
result[:parts] << thispart = {
part: part,
body: []
}
part.on_data do |chunk|
thispart[:body] << chunk
end
end
reader.on_error do |msg|
result[:errors] << msg
end
reader.write(body)
result
end | [
"def",
"parse_multipart",
"(",
"boundary",
",",
"body",
")",
"reader",
"=",
"MultipartParser",
"::",
"Reader",
".",
"new",
"(",
"boundary",
")",
"result",
"=",
"{",
"errors",
":",
"[",
"]",
",",
"parts",
":",
"[",
"]",
"}",
"def",
"result",
".",
"part",
"(",
"name",
")",
"hash",
"=",
"self",
"[",
":parts",
"]",
".",
"detect",
"{",
"|",
"h",
"|",
"h",
"[",
":part",
"]",
".",
"name",
"==",
"name",
"}",
"[",
"hash",
"[",
":part",
"]",
",",
"hash",
"[",
":body",
"]",
".",
"join",
"]",
"end",
"reader",
".",
"on_part",
"do",
"|",
"part",
"|",
"result",
"[",
":parts",
"]",
"<<",
"thispart",
"=",
"{",
"part",
":",
"part",
",",
"body",
":",
"[",
"]",
"}",
"part",
".",
"on_data",
"do",
"|",
"chunk",
"|",
"thispart",
"[",
":body",
"]",
"<<",
"chunk",
"end",
"end",
"reader",
".",
"on_error",
"do",
"|",
"msg",
"|",
"result",
"[",
":errors",
"]",
"<<",
"msg",
"end",
"reader",
".",
"write",
"(",
"body",
")",
"result",
"end"
] | parse a multipart MIME message, returning a hash of any multipart errors | [
"parse",
"a",
"multipart",
"MIME",
"message",
"returning",
"a",
"hash",
"of",
"any",
"multipart",
"errors"
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/spec/support/helper_methods.rb#L100-L122 | train |
lostisland/faraday | lib/faraday/dependency_loader.rb | Faraday.DependencyLoader.dependency | def dependency(lib = nil)
lib ? require(lib) : yield
rescue LoadError, NameError => e
self.load_error = e
end | ruby | def dependency(lib = nil)
lib ? require(lib) : yield
rescue LoadError, NameError => e
self.load_error = e
end | [
"def",
"dependency",
"(",
"lib",
"=",
"nil",
")",
"lib",
"?",
"require",
"(",
"lib",
")",
":",
"yield",
"rescue",
"LoadError",
",",
"NameError",
"=>",
"e",
"self",
".",
"load_error",
"=",
"e",
"end"
] | Executes a block which should try to require and reference dependent
libraries | [
"Executes",
"a",
"block",
"which",
"should",
"try",
"to",
"require",
"and",
"reference",
"dependent",
"libraries"
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/dependency_loader.rb#L10-L14 | train |
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.default_parallel_manager | def default_parallel_manager
@default_parallel_manager ||= begin
adapter = @builder.adapter.klass if @builder.adapter
if support_parallel?(adapter)
adapter.setup_parallel_manager
elsif block_given?
yield
end
end
end | ruby | def default_parallel_manager
@default_parallel_manager ||= begin
adapter = @builder.adapter.klass if @builder.adapter
if support_parallel?(adapter)
adapter.setup_parallel_manager
elsif block_given?
yield
end
end
end | [
"def",
"default_parallel_manager",
"@default_parallel_manager",
"||=",
"begin",
"adapter",
"=",
"@builder",
".",
"adapter",
".",
"klass",
"if",
"@builder",
".",
"adapter",
"if",
"support_parallel?",
"(",
"adapter",
")",
"adapter",
".",
"setup_parallel_manager",
"elsif",
"block_given?",
"yield",
"end",
"end",
"end"
] | Check if the adapter is parallel-capable.
@yield if the adapter isn't parallel-capable, or if no adapter is set yet.
@return [Object, nil] a parallel manager or nil if yielded
@api private | [
"Check",
"if",
"the",
"adapter",
"is",
"parallel",
"-",
"capable",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L355-L365 | train |
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.url_prefix= | def url_prefix=(url, encoder = nil)
uri = @url_prefix = Utils.URI(url)
self.path_prefix = uri.path
params.merge_query(uri.query, encoder)
uri.query = nil
with_uri_credentials(uri) do |user, password|
basic_auth user, password
uri.user = uri.password = nil
end
end | ruby | def url_prefix=(url, encoder = nil)
uri = @url_prefix = Utils.URI(url)
self.path_prefix = uri.path
params.merge_query(uri.query, encoder)
uri.query = nil
with_uri_credentials(uri) do |user, password|
basic_auth user, password
uri.user = uri.password = nil
end
end | [
"def",
"url_prefix",
"=",
"(",
"url",
",",
"encoder",
"=",
"nil",
")",
"uri",
"=",
"@url_prefix",
"=",
"Utils",
".",
"URI",
"(",
"url",
")",
"self",
".",
"path_prefix",
"=",
"uri",
".",
"path",
"params",
".",
"merge_query",
"(",
"uri",
".",
"query",
",",
"encoder",
")",
"uri",
".",
"query",
"=",
"nil",
"with_uri_credentials",
"(",
"uri",
")",
"do",
"|",
"user",
",",
"password",
"|",
"basic_auth",
"user",
",",
"password",
"uri",
".",
"user",
"=",
"uri",
".",
"password",
"=",
"nil",
"end",
"end"
] | Parses the given URL with URI and stores the individual
components in this connection. These components serve as defaults for
requests made by this connection.
@param url [String, URI]
@param encoder [Object]
@example
conn = Faraday::Connection.new { ... }
conn.url_prefix = "https://sushi.com/api"
conn.scheme # => https
conn.path_prefix # => "/api"
conn.get("nigiri?page=2") # accesses https://sushi.com/api/nigiri | [
"Parses",
"the",
"given",
"URL",
"with",
"URI",
"and",
"stores",
"the",
"individual",
"components",
"in",
"this",
"connection",
".",
"These",
"components",
"serve",
"as",
"defaults",
"for",
"requests",
"made",
"by",
"this",
"connection",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L420-L431 | train |
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.build_url | def build_url(url = nil, extra_params = nil)
uri = build_exclusive_url(url)
query_values = params.dup.merge_query(uri.query, options.params_encoder)
query_values.update(extra_params) if extra_params
uri.query =
if query_values.empty?
nil
else
query_values.to_query(options.params_encoder)
end
uri
end | ruby | def build_url(url = nil, extra_params = nil)
uri = build_exclusive_url(url)
query_values = params.dup.merge_query(uri.query, options.params_encoder)
query_values.update(extra_params) if extra_params
uri.query =
if query_values.empty?
nil
else
query_values.to_query(options.params_encoder)
end
uri
end | [
"def",
"build_url",
"(",
"url",
"=",
"nil",
",",
"extra_params",
"=",
"nil",
")",
"uri",
"=",
"build_exclusive_url",
"(",
"url",
")",
"query_values",
"=",
"params",
".",
"dup",
".",
"merge_query",
"(",
"uri",
".",
"query",
",",
"options",
".",
"params_encoder",
")",
"query_values",
".",
"update",
"(",
"extra_params",
")",
"if",
"extra_params",
"uri",
".",
"query",
"=",
"if",
"query_values",
".",
"empty?",
"nil",
"else",
"query_values",
".",
"to_query",
"(",
"options",
".",
"params_encoder",
")",
"end",
"uri",
"end"
] | Takes a relative url for a request and combines it with the defaults
set on the connection instance.
@param url [String]
@param extra_params [Hash]
@example
conn = Faraday::Connection.new { ... }
conn.url_prefix = "https://sushi.com/api?token=abc"
conn.scheme # => https
conn.path_prefix # => "/api"
conn.build_url("nigiri?page=2")
# => https://sushi.com/api/nigiri?token=abc&page=2
conn.build_url("nigiri", page: 2)
# => https://sushi.com/api/nigiri?token=abc&page=2 | [
"Takes",
"a",
"relative",
"url",
"for",
"a",
"request",
"and",
"combines",
"it",
"with",
"the",
"defaults",
"set",
"on",
"the",
"connection",
"instance",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L464-L477 | train |
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.build_request | def build_request(method)
Request.create(method) do |req|
req.params = params.dup
req.headers = headers.dup
req.options = options
yield(req) if block_given?
end
end | ruby | def build_request(method)
Request.create(method) do |req|
req.params = params.dup
req.headers = headers.dup
req.options = options
yield(req) if block_given?
end
end | [
"def",
"build_request",
"(",
"method",
")",
"Request",
".",
"create",
"(",
"method",
")",
"do",
"|",
"req",
"|",
"req",
".",
"params",
"=",
"params",
".",
"dup",
"req",
".",
"headers",
"=",
"headers",
".",
"dup",
"req",
".",
"options",
"=",
"options",
"yield",
"(",
"req",
")",
"if",
"block_given?",
"end",
"end"
] | Creates and configures the request object.
@param method [Symbol]
@yield [Faraday::Request] if block given
@return [Faraday::Request] | [
"Creates",
"and",
"configures",
"the",
"request",
"object",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L513-L520 | train |
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.build_exclusive_url | def build_exclusive_url(url = nil, params = nil, params_encoder = nil)
url = nil if url.respond_to?(:empty?) && url.empty?
base = url_prefix
if url && base.path && base.path !~ %r{/$}
base = base.dup
base.path = base.path + '/' # ensure trailing slash
end
uri = url ? base + url : base
if params
uri.query = params.to_query(params_encoder || options.params_encoder)
end
# rubocop:disable Style/SafeNavigation
uri.query = nil if uri.query && uri.query.empty?
# rubocop:enable Style/SafeNavigation
uri
end | ruby | def build_exclusive_url(url = nil, params = nil, params_encoder = nil)
url = nil if url.respond_to?(:empty?) && url.empty?
base = url_prefix
if url && base.path && base.path !~ %r{/$}
base = base.dup
base.path = base.path + '/' # ensure trailing slash
end
uri = url ? base + url : base
if params
uri.query = params.to_query(params_encoder || options.params_encoder)
end
# rubocop:disable Style/SafeNavigation
uri.query = nil if uri.query && uri.query.empty?
# rubocop:enable Style/SafeNavigation
uri
end | [
"def",
"build_exclusive_url",
"(",
"url",
"=",
"nil",
",",
"params",
"=",
"nil",
",",
"params_encoder",
"=",
"nil",
")",
"url",
"=",
"nil",
"if",
"url",
".",
"respond_to?",
"(",
":empty?",
")",
"&&",
"url",
".",
"empty?",
"base",
"=",
"url_prefix",
"if",
"url",
"&&",
"base",
".",
"path",
"&&",
"base",
".",
"path",
"!~",
"%r{",
"}",
"base",
"=",
"base",
".",
"dup",
"base",
".",
"path",
"=",
"base",
".",
"path",
"+",
"'/'",
"# ensure trailing slash",
"end",
"uri",
"=",
"url",
"?",
"base",
"+",
"url",
":",
"base",
"if",
"params",
"uri",
".",
"query",
"=",
"params",
".",
"to_query",
"(",
"params_encoder",
"||",
"options",
".",
"params_encoder",
")",
"end",
"# rubocop:disable Style/SafeNavigation",
"uri",
".",
"query",
"=",
"nil",
"if",
"uri",
".",
"query",
"&&",
"uri",
".",
"query",
".",
"empty?",
"# rubocop:enable Style/SafeNavigation",
"uri",
"end"
] | Build an absolute URL based on url_prefix.
@param url [String, URI]
@param params [Faraday::Utils::ParamsHash] A Faraday::Utils::ParamsHash to
replace the query values
of the resulting url (default: nil).
@return [URI] | [
"Build",
"an",
"absolute",
"URL",
"based",
"on",
"url_prefix",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L530-L545 | train |
lostisland/faraday | lib/faraday/utils.rb | Faraday.Utils.normalize_path | def normalize_path(url)
url = URI(url)
(url.path.start_with?('/') ? url.path : '/' + url.path) +
(url.query ? "?#{sort_query_params(url.query)}" : '')
end | ruby | def normalize_path(url)
url = URI(url)
(url.path.start_with?('/') ? url.path : '/' + url.path) +
(url.query ? "?#{sort_query_params(url.query)}" : '')
end | [
"def",
"normalize_path",
"(",
"url",
")",
"url",
"=",
"URI",
"(",
"url",
")",
"(",
"url",
".",
"path",
".",
"start_with?",
"(",
"'/'",
")",
"?",
"url",
".",
"path",
":",
"'/'",
"+",
"url",
".",
"path",
")",
"+",
"(",
"url",
".",
"query",
"?",
"\"?#{sort_query_params(url.query)}\"",
":",
"''",
")",
"end"
] | Receives a String or URI and returns just
the path with the query string sorted. | [
"Receives",
"a",
"String",
"or",
"URI",
"and",
"returns",
"just",
"the",
"path",
"with",
"the",
"query",
"string",
"sorted",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/utils.rb#L82-L86 | train |
lostisland/faraday | lib/faraday/utils.rb | Faraday.Utils.deep_merge! | def deep_merge!(target, hash)
hash.each do |key, value|
target[key] = if value.is_a?(Hash) && target[key].is_a?(Hash)
deep_merge(target[key], value)
else
value
end
end
target
end | ruby | def deep_merge!(target, hash)
hash.each do |key, value|
target[key] = if value.is_a?(Hash) && target[key].is_a?(Hash)
deep_merge(target[key], value)
else
value
end
end
target
end | [
"def",
"deep_merge!",
"(",
"target",
",",
"hash",
")",
"hash",
".",
"each",
"do",
"|",
"key",
",",
"value",
"|",
"target",
"[",
"key",
"]",
"=",
"if",
"value",
".",
"is_a?",
"(",
"Hash",
")",
"&&",
"target",
"[",
"key",
"]",
".",
"is_a?",
"(",
"Hash",
")",
"deep_merge",
"(",
"target",
"[",
"key",
"]",
",",
"value",
")",
"else",
"value",
"end",
"end",
"target",
"end"
] | Recursive hash update | [
"Recursive",
"hash",
"update"
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/utils.rb#L89-L98 | train |
lostisland/faraday | lib/faraday/request.rb | Faraday.Request.url | def url(path, params = nil)
if path.respond_to? :query
if (query = path.query)
path = path.dup
path.query = nil
end
else
anchor_index = path.index('#')
path = path.slice(0, anchor_index) unless anchor_index.nil?
path, query = path.split('?', 2)
end
self.path = path
self.params.merge_query query, options.params_encoder
self.params.update(params) if params
end | ruby | def url(path, params = nil)
if path.respond_to? :query
if (query = path.query)
path = path.dup
path.query = nil
end
else
anchor_index = path.index('#')
path = path.slice(0, anchor_index) unless anchor_index.nil?
path, query = path.split('?', 2)
end
self.path = path
self.params.merge_query query, options.params_encoder
self.params.update(params) if params
end | [
"def",
"url",
"(",
"path",
",",
"params",
"=",
"nil",
")",
"if",
"path",
".",
"respond_to?",
":query",
"if",
"(",
"query",
"=",
"path",
".",
"query",
")",
"path",
"=",
"path",
".",
"dup",
"path",
".",
"query",
"=",
"nil",
"end",
"else",
"anchor_index",
"=",
"path",
".",
"index",
"(",
"'#'",
")",
"path",
"=",
"path",
".",
"slice",
"(",
"0",
",",
"anchor_index",
")",
"unless",
"anchor_index",
".",
"nil?",
"path",
",",
"query",
"=",
"path",
".",
"split",
"(",
"'?'",
",",
"2",
")",
"end",
"self",
".",
"path",
"=",
"path",
"self",
".",
"params",
".",
"merge_query",
"query",
",",
"options",
".",
"params_encoder",
"self",
".",
"params",
".",
"update",
"(",
"params",
")",
"if",
"params",
"end"
] | Update path and params.
@param path [URI, String]
@param params [Hash, nil]
@return [void] | [
"Update",
"path",
"and",
"params",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/request.rb#L86-L100 | train |
lostisland/faraday | lib/faraday/request.rb | Faraday.Request.marshal_dump | def marshal_dump
{
method: method,
body: body,
headers: headers,
path: path,
params: params,
options: options
}
end | ruby | def marshal_dump
{
method: method,
body: body,
headers: headers,
path: path,
params: params,
options: options
}
end | [
"def",
"marshal_dump",
"{",
"method",
":",
"method",
",",
"body",
":",
"body",
",",
"headers",
":",
"headers",
",",
"path",
":",
"path",
",",
"params",
":",
"params",
",",
"options",
":",
"options",
"}",
"end"
] | Marshal serialization support.
@return [Hash] the hash ready to be serialized in Marshal. | [
"Marshal",
"serialization",
"support",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/request.rb#L117-L126 | train |
lostisland/faraday | lib/faraday/request.rb | Faraday.Request.marshal_load | def marshal_load(serialised)
self.method = serialised[:method]
self.body = serialised[:body]
self.headers = serialised[:headers]
self.path = serialised[:path]
self.params = serialised[:params]
self.options = serialised[:options]
end | ruby | def marshal_load(serialised)
self.method = serialised[:method]
self.body = serialised[:body]
self.headers = serialised[:headers]
self.path = serialised[:path]
self.params = serialised[:params]
self.options = serialised[:options]
end | [
"def",
"marshal_load",
"(",
"serialised",
")",
"self",
".",
"method",
"=",
"serialised",
"[",
":method",
"]",
"self",
".",
"body",
"=",
"serialised",
"[",
":body",
"]",
"self",
".",
"headers",
"=",
"serialised",
"[",
":headers",
"]",
"self",
".",
"path",
"=",
"serialised",
"[",
":path",
"]",
"self",
".",
"params",
"=",
"serialised",
"[",
":params",
"]",
"self",
".",
"options",
"=",
"serialised",
"[",
":options",
"]",
"end"
] | Marshal serialization support.
Restores the instance variables according to the +serialised+.
@param serialised [Hash] the serialised object. | [
"Marshal",
"serialization",
"support",
".",
"Restores",
"the",
"instance",
"variables",
"according",
"to",
"the",
"+",
"serialised",
"+",
"."
] | 3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70 | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/request.rb#L131-L138 | train |
aws/aws-sdk-ruby | gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb | Aws::S3.Client.get_bucket_policy | def get_bucket_policy(params = {}, options = {}, &block)
req = build_request(:get_bucket_policy, params)
req.send_request(options, &block)
end | ruby | def get_bucket_policy(params = {}, options = {}, &block)
req = build_request(:get_bucket_policy, params)
req.send_request(options, &block)
end | [
"def",
"get_bucket_policy",
"(",
"params",
"=",
"{",
"}",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"req",
"=",
"build_request",
"(",
":get_bucket_policy",
",",
"params",
")",
"req",
".",
"send_request",
"(",
"options",
",",
"block",
")",
"end"
] | Returns the policy of a specified bucket.
@option params [required, String] :bucket
@return [Types::GetBucketPolicyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
* {Types::GetBucketPolicyOutput#policy #policy} => IO
@example Example: To get bucket policy
# The following example returns bucket policy associated with a bucket.
resp = client.get_bucket_policy({
bucket: "examplebucket",
})
resp.to_h outputs the following:
{
policy: "{\"Version\":\"2008-10-17\",\"Id\":\"LogPolicy\",\"Statement\":[{\"Sid\":\"Enables the log delivery group to publish logs to your bucket \",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"111122223333\"},\"Action\":[\"s3:GetBucketAcl\",\"s3:GetObjectAcl\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::policytest1/*\",\"arn:aws:s3:::policytest1\"]}]}",
}
@example Request syntax with placeholder values
resp = client.get_bucket_policy({
bucket: "BucketName", # required
})
@example Response structure
resp.policy #=> String
@see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy AWS API Documentation
@overload get_bucket_policy(params = {})
@param [Hash] params ({}) | [
"Returns",
"the",
"policy",
"of",
"a",
"specified",
"bucket",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb#L2337-L2340 | train |
aws/aws-sdk-ruby | gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb | Aws::S3.Client.get_object | def get_object(params = {}, options = {}, &block)
req = build_request(:get_object, params)
req.send_request(options, &block)
end | ruby | def get_object(params = {}, options = {}, &block)
req = build_request(:get_object, params)
req.send_request(options, &block)
end | [
"def",
"get_object",
"(",
"params",
"=",
"{",
"}",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"req",
"=",
"build_request",
"(",
":get_object",
",",
"params",
")",
"req",
".",
"send_request",
"(",
"options",
",",
"block",
")",
"end"
] | Retrieves objects from Amazon S3.
@option params [String, IO] :response_target
Where to write response data, file path, or IO object.
@option params [required, String] :bucket
@option params [String] :if_match
Return the object only if its entity tag (ETag) is the same as the one
specified, otherwise return a 412 (precondition failed).
@option params [Time,DateTime,Date,Integer,String] :if_modified_since
Return the object only if it has been modified since the specified
time, otherwise return a 304 (not modified).
@option params [String] :if_none_match
Return the object only if its entity tag (ETag) is different from the
one specified, otherwise return a 304 (not modified).
@option params [Time,DateTime,Date,Integer,String] :if_unmodified_since
Return the object only if it has not been modified since the specified
time, otherwise return a 412 (precondition failed).
@option params [required, String] :key
@option params [String] :range
Downloads the specified range bytes of an object. For more information
about the HTTP Range header, go to
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
@option params [String] :response_cache_control
Sets the Cache-Control header of the response.
@option params [String] :response_content_disposition
Sets the Content-Disposition header of the response
@option params [String] :response_content_encoding
Sets the Content-Encoding header of the response.
@option params [String] :response_content_language
Sets the Content-Language header of the response.
@option params [String] :response_content_type
Sets the Content-Type header of the response.
@option params [Time,DateTime,Date,Integer,String] :response_expires
Sets the Expires header of the response.
@option params [String] :version_id
VersionId used to reference a specific version of the object.
@option params [String] :sse_customer_algorithm
Specifies the algorithm to use to when encrypting the object (e.g.,
AES256).
@option params [String] :sse_customer_key
Specifies the customer-provided encryption key for Amazon S3 to use in
encrypting data. This value is used to store the object and then it is
discarded; Amazon does not store the encryption key. The key must be
appropriate for use with the algorithm specified in the
x-amz-server-side-encryption-customer-algorithm header.
@option params [String] :sse_customer_key_md5
Specifies the 128-bit MD5 digest of the encryption key according to
RFC 1321. Amazon S3 uses this header for a message integrity check to
ensure the encryption key was transmitted without error.
@option params [String] :request_payer
Confirms that the requester knows that she or he will be charged for
the request. Bucket owners need not specify this parameter in their
requests. Documentation on downloading objects from requester pays
buckets can be found at
http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
@option params [Integer] :part_number
Part number of the object being read. This is a positive integer
between 1 and 10,000. Effectively performs a 'ranged' GET request
for the part specified. Useful for downloading just a part of an
object.
@return [Types::GetObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
* {Types::GetObjectOutput#body #body} => IO
* {Types::GetObjectOutput#delete_marker #delete_marker} => Boolean
* {Types::GetObjectOutput#accept_ranges #accept_ranges} => String
* {Types::GetObjectOutput#expiration #expiration} => String
* {Types::GetObjectOutput#restore #restore} => String
* {Types::GetObjectOutput#last_modified #last_modified} => Time
* {Types::GetObjectOutput#content_length #content_length} => Integer
* {Types::GetObjectOutput#etag #etag} => String
* {Types::GetObjectOutput#missing_meta #missing_meta} => Integer
* {Types::GetObjectOutput#version_id #version_id} => String
* {Types::GetObjectOutput#cache_control #cache_control} => String
* {Types::GetObjectOutput#content_disposition #content_disposition} => String
* {Types::GetObjectOutput#content_encoding #content_encoding} => String
* {Types::GetObjectOutput#content_language #content_language} => String
* {Types::GetObjectOutput#content_range #content_range} => String
* {Types::GetObjectOutput#content_type #content_type} => String
* {Types::GetObjectOutput#expires #expires} => Time
* {Types::GetObjectOutput#expires_string #expires_string} => String
* {Types::GetObjectOutput#website_redirect_location #website_redirect_location} => String
* {Types::GetObjectOutput#server_side_encryption #server_side_encryption} => String
* {Types::GetObjectOutput#metadata #metadata} => Hash<String,String>
* {Types::GetObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String
* {Types::GetObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String
* {Types::GetObjectOutput#ssekms_key_id #ssekms_key_id} => String
* {Types::GetObjectOutput#storage_class #storage_class} => String
* {Types::GetObjectOutput#request_charged #request_charged} => String
* {Types::GetObjectOutput#replication_status #replication_status} => String
* {Types::GetObjectOutput#parts_count #parts_count} => Integer
* {Types::GetObjectOutput#tag_count #tag_count} => Integer
* {Types::GetObjectOutput#object_lock_mode #object_lock_mode} => String
* {Types::GetObjectOutput#object_lock_retain_until_date #object_lock_retain_until_date} => Time
* {Types::GetObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String
@example Example: To retrieve an object
# The following example retrieves an object for an S3 bucket.
resp = client.get_object({
bucket: "examplebucket",
key: "HappyFace.jpg",
})
resp.to_h outputs the following:
{
accept_ranges: "bytes",
content_length: 3191,
content_type: "image/jpeg",
etag: "\"6805f2cfc46c0f04559748bb039d69ae\"",
last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"),
metadata: {
},
tag_count: 2,
version_id: "null",
}
@example Example: To retrieve a byte range of an object
# The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a
# specific byte range.
resp = client.get_object({
bucket: "examplebucket",
key: "SampleFile.txt",
range: "bytes=0-9",
})
resp.to_h outputs the following:
{
accept_ranges: "bytes",
content_length: 10,
content_range: "bytes 0-9/43",
content_type: "text/plain",
etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"",
last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"),
metadata: {
},
version_id: "null",
}
@example Download an object to disk
# stream object directly to disk
resp = s3.get_object(
response_target: '/path/to/file',
bucket: 'bucket-name',
key: 'object-key')
# you can still access other response data
resp.metadata #=> { ... }
resp.etag #=> "..."
@example Download object into memory
# omit :response_target to download to a StringIO in memory
resp = s3.get_object(bucket: 'bucket-name', key: 'object-key')
# call #read or #string on the response body
resp.body.read
#=> '...'
@example Streaming data to a block
# WARNING: yielding data to a block disables retries of networking errors
File.open('/path/to/file', 'wb') do |file|
s3.get_object(bucket: 'bucket-name', key: 'object-key') do |chunk|
file.write(chunk)
end
end
@example Request syntax with placeholder values
resp = client.get_object({
bucket: "BucketName", # required
if_match: "IfMatch",
if_modified_since: Time.now,
if_none_match: "IfNoneMatch",
if_unmodified_since: Time.now,
key: "ObjectKey", # required
range: "Range",
response_cache_control: "ResponseCacheControl",
response_content_disposition: "ResponseContentDisposition",
response_content_encoding: "ResponseContentEncoding",
response_content_language: "ResponseContentLanguage",
response_content_type: "ResponseContentType",
response_expires: Time.now,
version_id: "ObjectVersionId",
sse_customer_algorithm: "SSECustomerAlgorithm",
sse_customer_key: "SSECustomerKey",
sse_customer_key_md5: "SSECustomerKeyMD5",
request_payer: "requester", # accepts requester
part_number: 1,
})
@example Response structure
resp.body #=> IO
resp.delete_marker #=> Boolean
resp.accept_ranges #=> String
resp.expiration #=> String
resp.restore #=> String
resp.last_modified #=> Time
resp.content_length #=> Integer
resp.etag #=> String
resp.missing_meta #=> Integer
resp.version_id #=> String
resp.cache_control #=> String
resp.content_disposition #=> String
resp.content_encoding #=> String
resp.content_language #=> String
resp.content_range #=> String
resp.content_type #=> String
resp.expires #=> Time
resp.expires_string #=> String
resp.website_redirect_location #=> String
resp.server_side_encryption #=> String, one of "AES256", "aws:kms"
resp.metadata #=> Hash
resp.metadata["MetadataKey"] #=> String
resp.sse_customer_algorithm #=> String
resp.sse_customer_key_md5 #=> String
resp.ssekms_key_id #=> String
resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE"
resp.request_charged #=> String, one of "requester"
resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA"
resp.parts_count #=> Integer
resp.tag_count #=> Integer
resp.object_lock_mode #=> String, one of "GOVERNANCE", "COMPLIANCE"
resp.object_lock_retain_until_date #=> Time
resp.object_lock_legal_hold_status #=> String, one of "ON", "OFF"
@see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject AWS API Documentation
@overload get_object(params = {})
@param [Hash] params ({}) | [
"Retrieves",
"objects",
"from",
"Amazon",
"S3",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb#L2900-L2903 | train |
aws/aws-sdk-ruby | gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb | Aws::S3.Client.get_object_torrent | def get_object_torrent(params = {}, options = {}, &block)
req = build_request(:get_object_torrent, params)
req.send_request(options, &block)
end | ruby | def get_object_torrent(params = {}, options = {}, &block)
req = build_request(:get_object_torrent, params)
req.send_request(options, &block)
end | [
"def",
"get_object_torrent",
"(",
"params",
"=",
"{",
"}",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"req",
"=",
"build_request",
"(",
":get_object_torrent",
",",
"params",
")",
"req",
".",
"send_request",
"(",
"options",
",",
"block",
")",
"end"
] | Return torrent files from a bucket.
@option params [String, IO] :response_target
Where to write response data, file path, or IO object.
@option params [required, String] :bucket
@option params [required, String] :key
@option params [String] :request_payer
Confirms that the requester knows that she or he will be charged for
the request. Bucket owners need not specify this parameter in their
requests. Documentation on downloading objects from requester pays
buckets can be found at
http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
@return [Types::GetObjectTorrentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
* {Types::GetObjectTorrentOutput#body #body} => IO
* {Types::GetObjectTorrentOutput#request_charged #request_charged} => String
@example Example: To retrieve torrent files for an object
# The following example retrieves torrent files of an object.
resp = client.get_object_torrent({
bucket: "examplebucket",
key: "HappyFace.jpg",
})
resp.to_h outputs the following:
{
}
@example Request syntax with placeholder values
resp = client.get_object_torrent({
bucket: "BucketName", # required
key: "ObjectKey", # required
request_payer: "requester", # accepts requester
})
@example Response structure
resp.body #=> IO
resp.request_charged #=> String, one of "requester"
@see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent AWS API Documentation
@overload get_object_torrent(params = {})
@param [Hash] params ({}) | [
"Return",
"torrent",
"files",
"from",
"a",
"bucket",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb#L3273-L3276 | train |
aws/aws-sdk-ruby | gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb | Aws::S3.Client.wait_until | def wait_until(waiter_name, params = {}, options = {})
w = waiter(waiter_name, options)
yield(w.waiter) if block_given? # deprecated
w.wait(params)
end | ruby | def wait_until(waiter_name, params = {}, options = {})
w = waiter(waiter_name, options)
yield(w.waiter) if block_given? # deprecated
w.wait(params)
end | [
"def",
"wait_until",
"(",
"waiter_name",
",",
"params",
"=",
"{",
"}",
",",
"options",
"=",
"{",
"}",
")",
"w",
"=",
"waiter",
"(",
"waiter_name",
",",
"options",
")",
"yield",
"(",
"w",
".",
"waiter",
")",
"if",
"block_given?",
"# deprecated",
"w",
".",
"wait",
"(",
"params",
")",
"end"
] | Polls an API operation until a resource enters a desired state.
## Basic Usage
A waiter will call an API operation until:
* It is successful
* It enters a terminal state
* It makes the maximum number of attempts
In between attempts, the waiter will sleep.
# polls in a loop, sleeping between attempts
client.wait_until(waiter_name, params)
## Configuration
You can configure the maximum number of polling attempts, and the
delay (in seconds) between each polling attempt. You can pass
configuration as the final arguments hash.
# poll for ~25 seconds
client.wait_until(waiter_name, params, {
max_attempts: 5,
delay: 5,
})
## Callbacks
You can be notified before each polling attempt and before each
delay. If you throw `:success` or `:failure` from these callbacks,
it will terminate the waiter.
started_at = Time.now
client.wait_until(waiter_name, params, {
# disable max attempts
max_attempts: nil,
# poll for 1 hour, instead of a number of attempts
before_wait: -> (attempts, response) do
throw :failure if Time.now - started_at > 3600
end
})
## Handling Errors
When a waiter is unsuccessful, it will raise an error.
All of the failure errors extend from
{Aws::Waiters::Errors::WaiterFailed}.
begin
client.wait_until(...)
rescue Aws::Waiters::Errors::WaiterFailed
# resource did not enter the desired state in time
end
## Valid Waiters
The following table lists the valid waiter names, the operations they call,
and the default `:delay` and `:max_attempts` values.
| waiter_name | params | :delay | :max_attempts |
| ----------------- | -------------- | -------- | ------------- |
| bucket_exists | {#head_bucket} | 5 | 20 |
| bucket_not_exists | {#head_bucket} | 5 | 20 |
| object_exists | {#head_object} | 5 | 20 |
| object_not_exists | {#head_object} | 5 | 20 |
@raise [Errors::FailureStateError] Raised when the waiter terminates
because the waiter has entered a state that it will not transition
out of, preventing success.
@raise [Errors::TooManyAttemptsError] Raised when the configured
maximum number of attempts have been made, and the waiter is not
yet successful.
@raise [Errors::UnexpectedError] Raised when an error is encounted
while polling for a resource that is not expected.
@raise [Errors::NoSuchWaiterError] Raised when you request to wait
for an unknown state.
@return [Boolean] Returns `true` if the waiter was successful.
@param [Symbol] waiter_name
@param [Hash] params ({})
@param [Hash] options ({})
@option options [Integer] :max_attempts
@option options [Integer] :delay
@option options [Proc] :before_attempt
@option options [Proc] :before_wait | [
"Polls",
"an",
"API",
"operation",
"until",
"a",
"resource",
"enters",
"a",
"desired",
"state",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb#L7139-L7143 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/shared_config.rb | Aws.SharedConfig.fresh | def fresh(options = {})
@profile_name = nil
@credentials_path = nil
@config_path = nil
@parsed_credentials = {}
@parsed_config = nil
@config_enabled = options[:config_enabled] ? true : false
@profile_name = determine_profile(options)
@credentials_path = options[:credentials_path] ||
determine_credentials_path
load_credentials_file if loadable?(@credentials_path)
if @config_enabled
@config_path = options[:config_path] || determine_config_path
load_config_file if loadable?(@config_path)
end
end | ruby | def fresh(options = {})
@profile_name = nil
@credentials_path = nil
@config_path = nil
@parsed_credentials = {}
@parsed_config = nil
@config_enabled = options[:config_enabled] ? true : false
@profile_name = determine_profile(options)
@credentials_path = options[:credentials_path] ||
determine_credentials_path
load_credentials_file if loadable?(@credentials_path)
if @config_enabled
@config_path = options[:config_path] || determine_config_path
load_config_file if loadable?(@config_path)
end
end | [
"def",
"fresh",
"(",
"options",
"=",
"{",
"}",
")",
"@profile_name",
"=",
"nil",
"@credentials_path",
"=",
"nil",
"@config_path",
"=",
"nil",
"@parsed_credentials",
"=",
"{",
"}",
"@parsed_config",
"=",
"nil",
"@config_enabled",
"=",
"options",
"[",
":config_enabled",
"]",
"?",
"true",
":",
"false",
"@profile_name",
"=",
"determine_profile",
"(",
"options",
")",
"@credentials_path",
"=",
"options",
"[",
":credentials_path",
"]",
"||",
"determine_credentials_path",
"load_credentials_file",
"if",
"loadable?",
"(",
"@credentials_path",
")",
"if",
"@config_enabled",
"@config_path",
"=",
"options",
"[",
":config_path",
"]",
"||",
"determine_config_path",
"load_config_file",
"if",
"loadable?",
"(",
"@config_path",
")",
"end",
"end"
] | Constructs a new SharedConfig provider object. This will load the shared
credentials file, and optionally the shared configuration file, as ini
files which support profiles.
By default, the shared credential file (the default path for which is
`~/.aws/credentials`) and the shared config file (the default path for
which is `~/.aws/config`) are loaded. However, if you set the
`ENV['AWS_SDK_CONFIG_OPT_OUT']` environment variable, only the shared
credential file will be loaded. You can specify the shared credential
file path with the `ENV['AWS_SHARED_CREDENTIALS_FILE']` environment
variable or with the `:credentials_path` option. Similarly, you can
specify the shared config file path with the `ENV['AWS_CONFIG_FILE']`
environment variable or with the `:config_path` option.
The default profile name is 'default'. You can specify the profile name
with the `ENV['AWS_PROFILE']` environment variable or with the
`:profile_name` option.
@param [Hash] options
@option options [String] :credentials_path Path to the shared credentials
file. If not specified, will check `ENV['AWS_SHARED_CREDENTIALS_FILE']`
before using the default value of "#{Dir.home}/.aws/credentials".
@option options [String] :config_path Path to the shared config file.
If not specified, will check `ENV['AWS_CONFIG_FILE']` before using the
default value of "#{Dir.home}/.aws/config".
@option options [String] :profile_name The credential/config profile name
to use. If not specified, will check `ENV['AWS_PROFILE']` before using
the fixed default value of 'default'.
@option options [Boolean] :config_enabled If true, loads the shared config
file and enables new config values outside of the old shared credential
spec.
@api private | [
"Constructs",
"a",
"new",
"SharedConfig",
"provider",
"object",
".",
"This",
"will",
"load",
"the",
"shared",
"credentials",
"file",
"and",
"optionally",
"the",
"shared",
"configuration",
"file",
"as",
"ini",
"files",
"which",
"support",
"profiles",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/shared_config.rb#L61-L76 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/shared_config.rb | Aws.SharedConfig.assume_role_credentials_from_config | def assume_role_credentials_from_config(opts = {})
p = opts.delete(:profile) || @profile_name
chain_config = opts.delete(:chain_config)
credentials = assume_role_from_profile(@parsed_credentials, p, opts, chain_config)
if @parsed_config
credentials ||= assume_role_from_profile(@parsed_config, p, opts, chain_config)
end
credentials
end | ruby | def assume_role_credentials_from_config(opts = {})
p = opts.delete(:profile) || @profile_name
chain_config = opts.delete(:chain_config)
credentials = assume_role_from_profile(@parsed_credentials, p, opts, chain_config)
if @parsed_config
credentials ||= assume_role_from_profile(@parsed_config, p, opts, chain_config)
end
credentials
end | [
"def",
"assume_role_credentials_from_config",
"(",
"opts",
"=",
"{",
"}",
")",
"p",
"=",
"opts",
".",
"delete",
"(",
":profile",
")",
"||",
"@profile_name",
"chain_config",
"=",
"opts",
".",
"delete",
"(",
":chain_config",
")",
"credentials",
"=",
"assume_role_from_profile",
"(",
"@parsed_credentials",
",",
"p",
",",
"opts",
",",
"chain_config",
")",
"if",
"@parsed_config",
"credentials",
"||=",
"assume_role_from_profile",
"(",
"@parsed_config",
",",
"p",
",",
"opts",
",",
"chain_config",
")",
"end",
"credentials",
"end"
] | Attempts to assume a role from shared config or shared credentials file.
Will always attempt first to assume a role from the shared credentials
file, if present. | [
"Attempts",
"to",
"assume",
"a",
"role",
"from",
"shared",
"config",
"or",
"shared",
"credentials",
"file",
".",
"Will",
"always",
"attempt",
"first",
"to",
"assume",
"a",
"role",
"from",
"the",
"shared",
"credentials",
"file",
"if",
"present",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/shared_config.rb#L114-L122 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/structure.rb | Aws.Structure.to_h | def to_h(obj = self)
case obj
when Struct
obj.members.each.with_object({}) do |member, hash|
value = obj[member]
hash[member] = to_hash(value) unless value.nil?
end
when Hash
obj.each.with_object({}) do |(key, value), hash|
hash[key] = to_hash(value)
end
when Array
obj.collect { |value| to_hash(value) }
else
obj
end
end | ruby | def to_h(obj = self)
case obj
when Struct
obj.members.each.with_object({}) do |member, hash|
value = obj[member]
hash[member] = to_hash(value) unless value.nil?
end
when Hash
obj.each.with_object({}) do |(key, value), hash|
hash[key] = to_hash(value)
end
when Array
obj.collect { |value| to_hash(value) }
else
obj
end
end | [
"def",
"to_h",
"(",
"obj",
"=",
"self",
")",
"case",
"obj",
"when",
"Struct",
"obj",
".",
"members",
".",
"each",
".",
"with_object",
"(",
"{",
"}",
")",
"do",
"|",
"member",
",",
"hash",
"|",
"value",
"=",
"obj",
"[",
"member",
"]",
"hash",
"[",
"member",
"]",
"=",
"to_hash",
"(",
"value",
")",
"unless",
"value",
".",
"nil?",
"end",
"when",
"Hash",
"obj",
".",
"each",
".",
"with_object",
"(",
"{",
"}",
")",
"do",
"|",
"(",
"key",
",",
"value",
")",
",",
"hash",
"|",
"hash",
"[",
"key",
"]",
"=",
"to_hash",
"(",
"value",
")",
"end",
"when",
"Array",
"obj",
".",
"collect",
"{",
"|",
"value",
"|",
"to_hash",
"(",
"value",
")",
"}",
"else",
"obj",
"end",
"end"
] | Deeply converts the Structure into a hash. Structure members that
are `nil` are omitted from the resultant hash.
You can call #orig_to_h to get vanilla #to_h behavior as defined
in stdlib Struct.
@return [Hash] | [
"Deeply",
"converts",
"the",
"Structure",
"into",
"a",
"hash",
".",
"Structure",
"members",
"that",
"are",
"nil",
"are",
"omitted",
"from",
"the",
"resultant",
"hash",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/structure.rb#L29-L45 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/client_stubs.rb | Aws.ClientStubs.api_requests | def api_requests(options = {})
if config.stub_responses
if options[:exclude_presign]
@api_requests.reject {|req| req[:context][:presigned_url] }
else
@api_requests
end
else
msg = 'This method is only implemented for stubbed clients, and is '
msg << 'available when you enable stubbing in the constructor with `stub_responses: true`'
raise NotImplementedError.new(msg)
end
end | ruby | def api_requests(options = {})
if config.stub_responses
if options[:exclude_presign]
@api_requests.reject {|req| req[:context][:presigned_url] }
else
@api_requests
end
else
msg = 'This method is only implemented for stubbed clients, and is '
msg << 'available when you enable stubbing in the constructor with `stub_responses: true`'
raise NotImplementedError.new(msg)
end
end | [
"def",
"api_requests",
"(",
"options",
"=",
"{",
"}",
")",
"if",
"config",
".",
"stub_responses",
"if",
"options",
"[",
":exclude_presign",
"]",
"@api_requests",
".",
"reject",
"{",
"|",
"req",
"|",
"req",
"[",
":context",
"]",
"[",
":presigned_url",
"]",
"}",
"else",
"@api_requests",
"end",
"else",
"msg",
"=",
"'This method is only implemented for stubbed clients, and is '",
"msg",
"<<",
"'available when you enable stubbing in the constructor with `stub_responses: true`'",
"raise",
"NotImplementedError",
".",
"new",
"(",
"msg",
")",
"end",
"end"
] | Allows you to access all of the requests that the stubbed client has made
@params [Boolean] exclude_presign Setting to true for filtering out not sent requests from
generating presigned urls. Default to false.
@return [Array] Returns an array of the api requests made, each request object contains the
:operation_name, :params, and :context of the request.
@raise [NotImplementedError] Raises `NotImplementedError` when the client is not stubbed | [
"Allows",
"you",
"to",
"access",
"all",
"of",
"the",
"requests",
"that",
"the",
"stubbed",
"client",
"has",
"made"
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/client_stubs.rb#L192-L204 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/client_stubs.rb | Aws.ClientStubs.stub_data | def stub_data(operation_name, data = {})
Stubbing::StubData.new(config.api.operation(operation_name)).stub(data)
end | ruby | def stub_data(operation_name, data = {})
Stubbing::StubData.new(config.api.operation(operation_name)).stub(data)
end | [
"def",
"stub_data",
"(",
"operation_name",
",",
"data",
"=",
"{",
"}",
")",
"Stubbing",
"::",
"StubData",
".",
"new",
"(",
"config",
".",
"api",
".",
"operation",
"(",
"operation_name",
")",
")",
".",
"stub",
"(",
"data",
")",
"end"
] | Generates and returns stubbed response data from the named operation.
s3 = Aws::S3::Client.new
s3.stub_data(:list_buckets)
#=> #<struct Aws::S3::Types::ListBucketsOutput buckets=[], owner=#<struct Aws::S3::Types::Owner display_name="DisplayName", id="ID">>
In addition to generating default stubs, you can provide data to
apply to the response stub.
s3.stub_data(:list_buckets, buckets:[{name:'aws-sdk'}])
#=> #<struct Aws::S3::Types::ListBucketsOutput
buckets=[#<struct Aws::S3::Types::Bucket name="aws-sdk", creation_date=nil>],
owner=#<struct Aws::S3::Types::Owner display_name="DisplayName", id="ID">>
@param [Symbol] operation_name
@param [Hash] data
@return [Structure] Returns a stubbed response data structure. The
actual class returned will depend on the given `operation_name`. | [
"Generates",
"and",
"returns",
"stubbed",
"response",
"data",
"from",
"the",
"named",
"operation",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/client_stubs.rb#L224-L226 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/pageable_response.rb | Aws.PageableResponse.each | def each(&block)
return enum_for(:each_page) unless block_given?
response = self
yield(response)
until response.last_page?
response = response.next_page
yield(response)
end
end | ruby | def each(&block)
return enum_for(:each_page) unless block_given?
response = self
yield(response)
until response.last_page?
response = response.next_page
yield(response)
end
end | [
"def",
"each",
"(",
"&",
"block",
")",
"return",
"enum_for",
"(",
":each_page",
")",
"unless",
"block_given?",
"response",
"=",
"self",
"yield",
"(",
"response",
")",
"until",
"response",
".",
"last_page?",
"response",
"=",
"response",
".",
"next_page",
"yield",
"(",
"response",
")",
"end",
"end"
] | Yields the current and each following response to the given block.
@yieldparam [Response] response
@return [Enumerable,nil] Returns a new Enumerable if no block is given. | [
"Yields",
"the",
"current",
"and",
"each",
"following",
"response",
"to",
"the",
"given",
"block",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/pageable_response.rb#L72-L80 | train |
aws/aws-sdk-ruby | gems/aws-sdk-glacier/lib/aws-sdk-glacier/client.rb | Aws::Glacier.Client.get_job_output | def get_job_output(params = {}, options = {}, &block)
req = build_request(:get_job_output, params)
req.send_request(options, &block)
end | ruby | def get_job_output(params = {}, options = {}, &block)
req = build_request(:get_job_output, params)
req.send_request(options, &block)
end | [
"def",
"get_job_output",
"(",
"params",
"=",
"{",
"}",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"req",
"=",
"build_request",
"(",
":get_job_output",
",",
"params",
")",
"req",
".",
"send_request",
"(",
"options",
",",
"block",
")",
"end"
] | This operation downloads the output of the job you initiated using
InitiateJob. Depending on the job type you specified when you
initiated the job, the output will be either the content of an archive
or a vault inventory.
You can download all the job output or download a portion of the
output by specifying a byte range. In the case of an archive retrieval
job, depending on the byte range you specify, Amazon Glacier returns
the checksum for the portion of the data. You can compute the checksum
on the client and verify that the values match to ensure the portion
you downloaded is the correct data.
A job ID will not expire for at least 24 hours after Amazon Glacier
completes the job. That a byte range. For both archive and inventory
retrieval jobs, you should verify the downloaded size against the size
returned in the headers from the **Get Job Output** response.
For archive retrieval jobs, you should also verify that the size is
what you expected. If you download a portion of the output, the
expected size is based on the range of bytes you specified. For
example, if you specify a range of `bytes=0-1048575`, you should
verify your download size is 1,048,576 bytes. If you download an
entire archive, the expected size is the size of the archive when you
uploaded it to Amazon Glacier The expected size is also returned in
the headers from the **Get Job Output** response.
In the case of an archive retrieval job, depending on the byte range
you specify, Amazon Glacier returns the checksum for the portion of
the data. To ensure the portion you downloaded is the correct data,
compute the checksum on the client, verify that the values match, and
verify that the size is what you expected.
A job ID does not expire for at least 24 hours after Amazon Glacier
completes the job. That is, you can download the job output within the
24 hours period after Amazon Glacier completes the job.
An AWS account has full permission to perform all operations
(actions). However, AWS Identity and Access Management (IAM) users
don't have any permissions by default. You must grant them explicit
permission to perform specific actions. For more information, see
[Access Control Using AWS Identity and Access Management (IAM)][1].
For conceptual information and the underlying REST API, see
[Downloading a Vault Inventory][2], [Downloading an Archive][3], and
[Get Job Output ][4]
[1]: http://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
[2]: http://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html
[3]: http://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html
[4]: http://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html
@option params [required, String] :account_id
The `AccountId` value is the AWS account ID of the account that owns
the vault. You can either specify an AWS account ID or optionally a
single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
account ID associated with the credentials used to sign the request.
If you use an account ID, do not include any hyphens ('-') in the
ID.
@option params [required, String] :vault_name
The name of the vault.
@option params [required, String] :job_id
The job ID whose data is downloaded.
@option params [String] :range
The range of bytes to retrieve from the output. For example, if you
want to download the first 1,048,576 bytes, specify the range as
`bytes=0-1048575`. By default, this operation downloads the entire
output.
If the job output is large, then you can use a range to retrieve a
portion of the output. This allows you to download the entire output
in smaller chunks of bytes. For example, suppose you have 1 GB of job
output you want to download and you decide to download 128 MB chunks
of data at a time, which is a total of eight Get Job Output requests.
You use the following process to download the job output:
1. Download a 128 MB chunk of output by specifying the appropriate
byte range. Verify that all 128 MB of data was received.
2. Along with the data, the response includes a SHA256 tree hash of
the payload. You compute the checksum of the payload on the client
and compare it with the checksum you received in the response to
ensure you received all the expected data.
3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output
data, each time specifying the appropriate byte range.
4. After downloading all the parts of the job output, you have a list
of eight checksum values. Compute the tree hash of these values to
find the checksum of the entire output. Using the DescribeJob API,
obtain job information of the job that provided you the output.
The response includes the checksum of the entire archive stored in
Amazon Glacier. You compare this value with the checksum you
computed to ensure you have downloaded the entire archive content
with no errors.
@return [Types::GetJobOutputOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
* {Types::GetJobOutputOutput#body #body} => IO
* {Types::GetJobOutputOutput#checksum #checksum} => String
* {Types::GetJobOutputOutput#status #status} => Integer
* {Types::GetJobOutputOutput#content_range #content_range} => String
* {Types::GetJobOutputOutput#accept_ranges #accept_ranges} => String
* {Types::GetJobOutputOutput#content_type #content_type} => String
* {Types::GetJobOutputOutput#archive_description #archive_description} => String
@example Example: To get the output of a previously initiated job
# The example downloads the output of a previously initiated inventory retrieval job that is identified by the job ID.
resp = client.get_job_output({
account_id: "-",
job_id: "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4CnMW",
range: "",
vault_name: "my-vaul",
})
resp.to_h outputs the following:
{
accept_ranges: "bytes",
body: "inventory-data",
content_type: "application/json",
status: 200,
}
@example Request syntax with placeholder values
resp = client.get_job_output({
account_id: "string", # required
vault_name: "string", # required
job_id: "string", # required
range: "string",
})
@example Response structure
resp.body #=> IO
resp.checksum #=> String
resp.status #=> Integer
resp.content_range #=> String
resp.accept_ranges #=> String
resp.content_type #=> String
resp.archive_description #=> String
@overload get_job_output(params = {})
@param [Hash] params ({}) | [
"This",
"operation",
"downloads",
"the",
"output",
"of",
"the",
"job",
"you",
"initiated",
"using",
"InitiateJob",
".",
"Depending",
"on",
"the",
"job",
"type",
"you",
"specified",
"when",
"you",
"initiated",
"the",
"job",
"the",
"output",
"will",
"be",
"either",
"the",
"content",
"of",
"an",
"archive",
"or",
"a",
"vault",
"inventory",
"."
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-glacier/lib/aws-sdk-glacier/client.rb#L1448-L1451 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/endpoint_cache.rb | Aws.EndpointCache.key? | def key?(key)
if @entries.key?(key) && (@entries[key].nil? || @entries[key].expired?)
self.delete(key)
end
@entries.key?(key)
end | ruby | def key?(key)
if @entries.key?(key) && (@entries[key].nil? || @entries[key].expired?)
self.delete(key)
end
@entries.key?(key)
end | [
"def",
"key?",
"(",
"key",
")",
"if",
"@entries",
".",
"key?",
"(",
"key",
")",
"&&",
"(",
"@entries",
"[",
"key",
"]",
".",
"nil?",
"||",
"@entries",
"[",
"key",
"]",
".",
"expired?",
")",
"self",
".",
"delete",
"(",
"key",
")",
"end",
"@entries",
".",
"key?",
"(",
"key",
")",
"end"
] | checking whether an unexpired endpoint key exists in cache
@param [String] key
@return [Boolean] | [
"checking",
"whether",
"an",
"unexpired",
"endpoint",
"key",
"exists",
"in",
"cache"
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/endpoint_cache.rb#L62-L67 | train |
aws/aws-sdk-ruby | gems/aws-sdk-core/lib/aws-sdk-core/endpoint_cache.rb | Aws.EndpointCache.extract_key | def extract_key(ctx)
parts = []
# fetching from cred provider directly gives warnings
parts << ctx.config.credentials.credentials.access_key_id
if _endpoint_operation_identifier(ctx)
parts << ctx.operation_name
ctx.operation.input.shape.members.inject(parts) do |p, (name, ref)|
p << ctx.params[name] if ref["endpointdiscoveryid"]
p
end
end
parts.join('_')
end | ruby | def extract_key(ctx)
parts = []
# fetching from cred provider directly gives warnings
parts << ctx.config.credentials.credentials.access_key_id
if _endpoint_operation_identifier(ctx)
parts << ctx.operation_name
ctx.operation.input.shape.members.inject(parts) do |p, (name, ref)|
p << ctx.params[name] if ref["endpointdiscoveryid"]
p
end
end
parts.join('_')
end | [
"def",
"extract_key",
"(",
"ctx",
")",
"parts",
"=",
"[",
"]",
"# fetching from cred provider directly gives warnings",
"parts",
"<<",
"ctx",
".",
"config",
".",
"credentials",
".",
"credentials",
".",
"access_key_id",
"if",
"_endpoint_operation_identifier",
"(",
"ctx",
")",
"parts",
"<<",
"ctx",
".",
"operation_name",
"ctx",
".",
"operation",
".",
"input",
".",
"shape",
".",
"members",
".",
"inject",
"(",
"parts",
")",
"do",
"|",
"p",
",",
"(",
"name",
",",
"ref",
")",
"|",
"p",
"<<",
"ctx",
".",
"params",
"[",
"name",
"]",
"if",
"ref",
"[",
"\"endpointdiscoveryid\"",
"]",
"p",
"end",
"end",
"parts",
".",
"join",
"(",
"'_'",
")",
"end"
] | extract the key to be used in the cache from request context
@param [RequestContext] ctx
@return [String] | [
"extract",
"the",
"key",
"to",
"be",
"used",
"in",
"the",
"cache",
"from",
"request",
"context"
] | e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d | https://github.com/aws/aws-sdk-ruby/blob/e28b8d320ddf7b6ee0161bdd9d00fb786d99b63d/gems/aws-sdk-core/lib/aws-sdk-core/endpoint_cache.rb#L105-L117 | train |
primer/octicons | lib/octicons_jekyll/lib/jekyll-octicons.rb | Jekyll.Octicons.string_to_hash | def string_to_hash(markup)
options = {}
if match = markup.match(Syntax)
markup.scan(TagAttributes) do |key, value|
options[key.to_sym] = value.gsub(/\A"|"\z/, "")
end
end
options
end | ruby | def string_to_hash(markup)
options = {}
if match = markup.match(Syntax)
markup.scan(TagAttributes) do |key, value|
options[key.to_sym] = value.gsub(/\A"|"\z/, "")
end
end
options
end | [
"def",
"string_to_hash",
"(",
"markup",
")",
"options",
"=",
"{",
"}",
"if",
"match",
"=",
"markup",
".",
"match",
"(",
"Syntax",
")",
"markup",
".",
"scan",
"(",
"TagAttributes",
")",
"do",
"|",
"key",
",",
"value",
"|",
"options",
"[",
"key",
".",
"to_sym",
"]",
"=",
"value",
".",
"gsub",
"(",
"/",
"\\A",
"\\z",
"/",
",",
"\"\"",
")",
"end",
"end",
"options",
"end"
] | Create a ruby hash from a string passed by the jekyll tag | [
"Create",
"a",
"ruby",
"hash",
"from",
"a",
"string",
"passed",
"by",
"the",
"jekyll",
"tag"
] | 6fe5475945d5633818b49ce55619ec039789b1c8 | https://github.com/primer/octicons/blob/6fe5475945d5633818b49ce55619ec039789b1c8/lib/octicons_jekyll/lib/jekyll-octicons.rb#L58-L68 | train |
resque/resque | lib/resque/plugin.rb | Resque.Plugin.lint | def lint(plugin)
hooks = before_hooks(plugin) + around_hooks(plugin) + after_hooks(plugin)
hooks.each do |hook|
if hook.to_s.end_with?("perform")
raise LintError, "#{plugin}.#{hook} is not namespaced"
end
end
failure_hooks(plugin).each do |hook|
if hook.to_s.end_with?("failure")
raise LintError, "#{plugin}.#{hook} is not namespaced"
end
end
end | ruby | def lint(plugin)
hooks = before_hooks(plugin) + around_hooks(plugin) + after_hooks(plugin)
hooks.each do |hook|
if hook.to_s.end_with?("perform")
raise LintError, "#{plugin}.#{hook} is not namespaced"
end
end
failure_hooks(plugin).each do |hook|
if hook.to_s.end_with?("failure")
raise LintError, "#{plugin}.#{hook} is not namespaced"
end
end
end | [
"def",
"lint",
"(",
"plugin",
")",
"hooks",
"=",
"before_hooks",
"(",
"plugin",
")",
"+",
"around_hooks",
"(",
"plugin",
")",
"+",
"after_hooks",
"(",
"plugin",
")",
"hooks",
".",
"each",
"do",
"|",
"hook",
"|",
"if",
"hook",
".",
"to_s",
".",
"end_with?",
"(",
"\"perform\"",
")",
"raise",
"LintError",
",",
"\"#{plugin}.#{hook} is not namespaced\"",
"end",
"end",
"failure_hooks",
"(",
"plugin",
")",
".",
"each",
"do",
"|",
"hook",
"|",
"if",
"hook",
".",
"to_s",
".",
"end_with?",
"(",
"\"failure\"",
")",
"raise",
"LintError",
",",
"\"#{plugin}.#{hook} is not namespaced\"",
"end",
"end",
"end"
] | Ensure that your plugin conforms to good hook naming conventions.
Resque::Plugin.lint(MyResquePlugin) | [
"Ensure",
"that",
"your",
"plugin",
"conforms",
"to",
"good",
"hook",
"naming",
"conventions",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/plugin.rb#L10-L24 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.process | def process(job = nil, &block)
return unless job ||= reserve
job.worker = self
working_on job
perform(job, &block)
ensure
done_working
end | ruby | def process(job = nil, &block)
return unless job ||= reserve
job.worker = self
working_on job
perform(job, &block)
ensure
done_working
end | [
"def",
"process",
"(",
"job",
"=",
"nil",
",",
"&",
"block",
")",
"return",
"unless",
"job",
"||=",
"reserve",
"job",
".",
"worker",
"=",
"self",
"working_on",
"job",
"perform",
"(",
"job",
",",
"block",
")",
"ensure",
"done_working",
"end"
] | DEPRECATED. Processes a single job. If none is given, it will
try to produce one. Usually run in the child. | [
"DEPRECATED",
".",
"Processes",
"a",
"single",
"job",
".",
"If",
"none",
"is",
"given",
"it",
"will",
"try",
"to",
"produce",
"one",
".",
"Usually",
"run",
"in",
"the",
"child",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L275-L283 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.report_failed_job | def report_failed_job(job,exception)
log_with_severity :error, "#{job.inspect} failed: #{exception.inspect}"
begin
job.fail(exception)
rescue Object => exception
log_with_severity :error, "Received exception when reporting failure: #{exception.inspect}"
end
begin
failed!
rescue Object => exception
log_with_severity :error, "Received exception when increasing failed jobs counter (redis issue) : #{exception.inspect}"
end
end | ruby | def report_failed_job(job,exception)
log_with_severity :error, "#{job.inspect} failed: #{exception.inspect}"
begin
job.fail(exception)
rescue Object => exception
log_with_severity :error, "Received exception when reporting failure: #{exception.inspect}"
end
begin
failed!
rescue Object => exception
log_with_severity :error, "Received exception when increasing failed jobs counter (redis issue) : #{exception.inspect}"
end
end | [
"def",
"report_failed_job",
"(",
"job",
",",
"exception",
")",
"log_with_severity",
":error",
",",
"\"#{job.inspect} failed: #{exception.inspect}\"",
"begin",
"job",
".",
"fail",
"(",
"exception",
")",
"rescue",
"Object",
"=>",
"exception",
"log_with_severity",
":error",
",",
"\"Received exception when reporting failure: #{exception.inspect}\"",
"end",
"begin",
"failed!",
"rescue",
"Object",
"=>",
"exception",
"log_with_severity",
":error",
",",
"\"Received exception when increasing failed jobs counter (redis issue) : #{exception.inspect}\"",
"end",
"end"
] | Reports the exception and marks the job as failed | [
"Reports",
"the",
"exception",
"and",
"marks",
"the",
"job",
"as",
"failed"
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L286-L298 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.perform | def perform(job)
begin
if fork_per_job?
reconnect
run_hook :after_fork, job
end
job.perform
rescue Object => e
report_failed_job(job,e)
else
log_with_severity :info, "done: #{job.inspect}"
ensure
yield job if block_given?
end
end | ruby | def perform(job)
begin
if fork_per_job?
reconnect
run_hook :after_fork, job
end
job.perform
rescue Object => e
report_failed_job(job,e)
else
log_with_severity :info, "done: #{job.inspect}"
ensure
yield job if block_given?
end
end | [
"def",
"perform",
"(",
"job",
")",
"begin",
"if",
"fork_per_job?",
"reconnect",
"run_hook",
":after_fork",
",",
"job",
"end",
"job",
".",
"perform",
"rescue",
"Object",
"=>",
"e",
"report_failed_job",
"(",
"job",
",",
"e",
")",
"else",
"log_with_severity",
":info",
",",
"\"done: #{job.inspect}\"",
"ensure",
"yield",
"job",
"if",
"block_given?",
"end",
"end"
] | Processes a given job in the child. | [
"Processes",
"a",
"given",
"job",
"in",
"the",
"child",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L302-L316 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.reserve | def reserve
queues.each do |queue|
log_with_severity :debug, "Checking #{queue}"
if job = Resque.reserve(queue)
log_with_severity :debug, "Found job on #{queue}"
return job
end
end
nil
rescue Exception => e
log_with_severity :error, "Error reserving job: #{e.inspect}"
log_with_severity :error, e.backtrace.join("\n")
raise e
end | ruby | def reserve
queues.each do |queue|
log_with_severity :debug, "Checking #{queue}"
if job = Resque.reserve(queue)
log_with_severity :debug, "Found job on #{queue}"
return job
end
end
nil
rescue Exception => e
log_with_severity :error, "Error reserving job: #{e.inspect}"
log_with_severity :error, e.backtrace.join("\n")
raise e
end | [
"def",
"reserve",
"queues",
".",
"each",
"do",
"|",
"queue",
"|",
"log_with_severity",
":debug",
",",
"\"Checking #{queue}\"",
"if",
"job",
"=",
"Resque",
".",
"reserve",
"(",
"queue",
")",
"log_with_severity",
":debug",
",",
"\"Found job on #{queue}\"",
"return",
"job",
"end",
"end",
"nil",
"rescue",
"Exception",
"=>",
"e",
"log_with_severity",
":error",
",",
"\"Error reserving job: #{e.inspect}\"",
"log_with_severity",
":error",
",",
"e",
".",
"backtrace",
".",
"join",
"(",
"\"\\n\"",
")",
"raise",
"e",
"end"
] | Attempts to grab a job off one of the provided queues. Returns
nil if no job can be found. | [
"Attempts",
"to",
"grab",
"a",
"job",
"off",
"one",
"of",
"the",
"provided",
"queues",
".",
"Returns",
"nil",
"if",
"no",
"job",
"can",
"be",
"found",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L320-L334 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.reconnect | def reconnect
tries = 0
begin
data_store.reconnect
rescue Redis::BaseConnectionError
if (tries += 1) <= 3
log_with_severity :error, "Error reconnecting to Redis; retrying"
sleep(tries)
retry
else
log_with_severity :error, "Error reconnecting to Redis; quitting"
raise
end
end
end | ruby | def reconnect
tries = 0
begin
data_store.reconnect
rescue Redis::BaseConnectionError
if (tries += 1) <= 3
log_with_severity :error, "Error reconnecting to Redis; retrying"
sleep(tries)
retry
else
log_with_severity :error, "Error reconnecting to Redis; quitting"
raise
end
end
end | [
"def",
"reconnect",
"tries",
"=",
"0",
"begin",
"data_store",
".",
"reconnect",
"rescue",
"Redis",
"::",
"BaseConnectionError",
"if",
"(",
"tries",
"+=",
"1",
")",
"<=",
"3",
"log_with_severity",
":error",
",",
"\"Error reconnecting to Redis; retrying\"",
"sleep",
"(",
"tries",
")",
"retry",
"else",
"log_with_severity",
":error",
",",
"\"Error reconnecting to Redis; quitting\"",
"raise",
"end",
"end",
"end"
] | Reconnect to Redis to avoid sharing a connection with the parent,
retry up to 3 times with increasing delay before giving up. | [
"Reconnect",
"to",
"Redis",
"to",
"avoid",
"sharing",
"a",
"connection",
"with",
"the",
"parent",
"retry",
"up",
"to",
"3",
"times",
"with",
"increasing",
"delay",
"before",
"giving",
"up",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L338-L352 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.shutdown! | def shutdown!
shutdown
if term_child
if fork_per_job?
new_kill_child
else
# Raise TermException in the same process
trap('TERM') do
# ignore subsequent terms
end
raise TermException.new("SIGTERM")
end
else
kill_child
end
end | ruby | def shutdown!
shutdown
if term_child
if fork_per_job?
new_kill_child
else
# Raise TermException in the same process
trap('TERM') do
# ignore subsequent terms
end
raise TermException.new("SIGTERM")
end
else
kill_child
end
end | [
"def",
"shutdown!",
"shutdown",
"if",
"term_child",
"if",
"fork_per_job?",
"new_kill_child",
"else",
"# Raise TermException in the same process",
"trap",
"(",
"'TERM'",
")",
"do",
"# ignore subsequent terms",
"end",
"raise",
"TermException",
".",
"new",
"(",
"\"SIGTERM\"",
")",
"end",
"else",
"kill_child",
"end",
"end"
] | Kill the child and shutdown immediately.
If not forking, abort this process. | [
"Kill",
"the",
"child",
"and",
"shutdown",
"immediately",
".",
"If",
"not",
"forking",
"abort",
"this",
"process",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L434-L449 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.run_hook | def run_hook(name, *args)
hooks = Resque.send(name)
return if hooks.empty?
return if name == :before_first_fork && @before_first_fork_hook_ran
msg = "Running #{name} hooks"
msg << " with #{args.inspect}" if args.any?
log_with_severity :info, msg
hooks.each do |hook|
args.any? ? hook.call(*args) : hook.call
@before_first_fork_hook_ran = true if name == :before_first_fork
end
end | ruby | def run_hook(name, *args)
hooks = Resque.send(name)
return if hooks.empty?
return if name == :before_first_fork && @before_first_fork_hook_ran
msg = "Running #{name} hooks"
msg << " with #{args.inspect}" if args.any?
log_with_severity :info, msg
hooks.each do |hook|
args.any? ? hook.call(*args) : hook.call
@before_first_fork_hook_ran = true if name == :before_first_fork
end
end | [
"def",
"run_hook",
"(",
"name",
",",
"*",
"args",
")",
"hooks",
"=",
"Resque",
".",
"send",
"(",
"name",
")",
"return",
"if",
"hooks",
".",
"empty?",
"return",
"if",
"name",
"==",
":before_first_fork",
"&&",
"@before_first_fork_hook_ran",
"msg",
"=",
"\"Running #{name} hooks\"",
"msg",
"<<",
"\" with #{args.inspect}\"",
"if",
"args",
".",
"any?",
"log_with_severity",
":info",
",",
"msg",
"hooks",
".",
"each",
"do",
"|",
"hook",
"|",
"args",
".",
"any?",
"?",
"hook",
".",
"call",
"(",
"args",
")",
":",
"hook",
".",
"call",
"@before_first_fork_hook_ran",
"=",
"true",
"if",
"name",
"==",
":before_first_fork",
"end",
"end"
] | Runs a named hook, passing along any arguments. | [
"Runs",
"a",
"named",
"hook",
"passing",
"along",
"any",
"arguments",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L645-L657 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.unregister_worker | def unregister_worker(exception = nil)
# If we're still processing a job, make sure it gets logged as a
# failure.
if (hash = processing) && !hash.empty?
job = Job.new(hash['queue'], hash['payload'])
# Ensure the proper worker is attached to this job, even if
# it's not the precise instance that died.
job.worker = self
begin
job.fail(exception || DirtyExit.new("Job still being processed"))
rescue RuntimeError => e
log_with_severity :error, e.message
end
end
kill_background_threads
data_store.unregister_worker(self) do
Stat.clear("processed:#{self}")
Stat.clear("failed:#{self}")
end
rescue Exception => exception_while_unregistering
message = exception_while_unregistering.message
if exception
message += "\nOriginal Exception (#{exception.class}): #{exception.message}"
message += "\n #{exception.backtrace.join(" \n")}" if exception.backtrace
end
fail(exception_while_unregistering.class,
message,
exception_while_unregistering.backtrace)
end | ruby | def unregister_worker(exception = nil)
# If we're still processing a job, make sure it gets logged as a
# failure.
if (hash = processing) && !hash.empty?
job = Job.new(hash['queue'], hash['payload'])
# Ensure the proper worker is attached to this job, even if
# it's not the precise instance that died.
job.worker = self
begin
job.fail(exception || DirtyExit.new("Job still being processed"))
rescue RuntimeError => e
log_with_severity :error, e.message
end
end
kill_background_threads
data_store.unregister_worker(self) do
Stat.clear("processed:#{self}")
Stat.clear("failed:#{self}")
end
rescue Exception => exception_while_unregistering
message = exception_while_unregistering.message
if exception
message += "\nOriginal Exception (#{exception.class}): #{exception.message}"
message += "\n #{exception.backtrace.join(" \n")}" if exception.backtrace
end
fail(exception_while_unregistering.class,
message,
exception_while_unregistering.backtrace)
end | [
"def",
"unregister_worker",
"(",
"exception",
"=",
"nil",
")",
"# If we're still processing a job, make sure it gets logged as a",
"# failure.",
"if",
"(",
"hash",
"=",
"processing",
")",
"&&",
"!",
"hash",
".",
"empty?",
"job",
"=",
"Job",
".",
"new",
"(",
"hash",
"[",
"'queue'",
"]",
",",
"hash",
"[",
"'payload'",
"]",
")",
"# Ensure the proper worker is attached to this job, even if",
"# it's not the precise instance that died.",
"job",
".",
"worker",
"=",
"self",
"begin",
"job",
".",
"fail",
"(",
"exception",
"||",
"DirtyExit",
".",
"new",
"(",
"\"Job still being processed\"",
")",
")",
"rescue",
"RuntimeError",
"=>",
"e",
"log_with_severity",
":error",
",",
"e",
".",
"message",
"end",
"end",
"kill_background_threads",
"data_store",
".",
"unregister_worker",
"(",
"self",
")",
"do",
"Stat",
".",
"clear",
"(",
"\"processed:#{self}\"",
")",
"Stat",
".",
"clear",
"(",
"\"failed:#{self}\"",
")",
"end",
"rescue",
"Exception",
"=>",
"exception_while_unregistering",
"message",
"=",
"exception_while_unregistering",
".",
"message",
"if",
"exception",
"message",
"+=",
"\"\\nOriginal Exception (#{exception.class}): #{exception.message}\"",
"message",
"+=",
"\"\\n #{exception.backtrace.join(\" \\n\")}\"",
"if",
"exception",
".",
"backtrace",
"end",
"fail",
"(",
"exception_while_unregistering",
".",
"class",
",",
"message",
",",
"exception_while_unregistering",
".",
"backtrace",
")",
"end"
] | Unregisters ourself as a worker. Useful when shutting down. | [
"Unregisters",
"ourself",
"as",
"a",
"worker",
".",
"Useful",
"when",
"shutting",
"down",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L667-L697 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.working_on | def working_on(job)
data = encode \
:queue => job.queue,
:run_at => Time.now.utc.iso8601,
:payload => job.payload
data_store.set_worker_payload(self,data)
end | ruby | def working_on(job)
data = encode \
:queue => job.queue,
:run_at => Time.now.utc.iso8601,
:payload => job.payload
data_store.set_worker_payload(self,data)
end | [
"def",
"working_on",
"(",
"job",
")",
"data",
"=",
"encode",
":queue",
"=>",
"job",
".",
"queue",
",",
":run_at",
"=>",
"Time",
".",
"now",
".",
"utc",
".",
"iso8601",
",",
":payload",
"=>",
"job",
".",
"payload",
"data_store",
".",
"set_worker_payload",
"(",
"self",
",",
"data",
")",
"end"
] | Given a job, tells Redis we're working on it. Useful for seeing
what workers are doing and when. | [
"Given",
"a",
"job",
"tells",
"Redis",
"we",
"re",
"working",
"on",
"it",
".",
"Useful",
"for",
"seeing",
"what",
"workers",
"are",
"doing",
"and",
"when",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L701-L707 | train |
resque/resque | lib/resque/worker.rb | Resque.Worker.windows_worker_pids | def windows_worker_pids
tasklist_output = `tasklist /FI "IMAGENAME eq ruby.exe" /FO list`.encode("UTF-8", Encoding.locale_charmap)
tasklist_output.split($/).select { |line| line =~ /^PID:/ }.collect { |line| line.gsub(/PID:\s+/, '') }
end | ruby | def windows_worker_pids
tasklist_output = `tasklist /FI "IMAGENAME eq ruby.exe" /FO list`.encode("UTF-8", Encoding.locale_charmap)
tasklist_output.split($/).select { |line| line =~ /^PID:/ }.collect { |line| line.gsub(/PID:\s+/, '') }
end | [
"def",
"windows_worker_pids",
"tasklist_output",
"=",
"`",
"`",
".",
"encode",
"(",
"\"UTF-8\"",
",",
"Encoding",
".",
"locale_charmap",
")",
"tasklist_output",
".",
"split",
"(",
"$/",
")",
".",
"select",
"{",
"|",
"line",
"|",
"line",
"=~",
"/",
"/",
"}",
".",
"collect",
"{",
"|",
"line",
"|",
"line",
".",
"gsub",
"(",
"/",
"\\s",
"/",
",",
"''",
")",
"}",
"end"
] | Returns an Array of string pids of all the other workers on this
machine. Useful when pruning dead workers on startup. | [
"Returns",
"an",
"Array",
"of",
"string",
"pids",
"of",
"all",
"the",
"other",
"workers",
"on",
"this",
"machine",
".",
"Useful",
"when",
"pruning",
"dead",
"workers",
"on",
"startup",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/worker.rb#L818-L821 | train |
resque/resque | lib/resque/job.rb | Resque.Job.fail | def fail(exception)
begin
run_failure_hooks(exception)
rescue Exception => e
raise e
ensure
Failure.create \
:payload => payload,
:exception => exception,
:worker => worker,
:queue => queue
end
end | ruby | def fail(exception)
begin
run_failure_hooks(exception)
rescue Exception => e
raise e
ensure
Failure.create \
:payload => payload,
:exception => exception,
:worker => worker,
:queue => queue
end
end | [
"def",
"fail",
"(",
"exception",
")",
"begin",
"run_failure_hooks",
"(",
"exception",
")",
"rescue",
"Exception",
"=>",
"e",
"raise",
"e",
"ensure",
"Failure",
".",
"create",
":payload",
"=>",
"payload",
",",
":exception",
"=>",
"exception",
",",
":worker",
"=>",
"worker",
",",
":queue",
"=>",
"queue",
"end",
"end"
] | Given an exception object, hands off the needed parameters to
the Failure module. | [
"Given",
"an",
"exception",
"object",
"hands",
"off",
"the",
"needed",
"parameters",
"to",
"the",
"Failure",
"module",
"."
] | adb633a0f6b98b1eb5a5a85bb36ebac9309978fd | https://github.com/resque/resque/blob/adb633a0f6b98b1eb5a5a85bb36ebac9309978fd/lib/resque/job.rb#L232-L244 | train |
ruby-grape/grape | lib/grape/endpoint.rb | Grape.Endpoint.inherit_settings | def inherit_settings(namespace_stackable)
inheritable_setting.route[:saved_validations] += namespace_stackable[:validations]
parent_declared_params = namespace_stackable[:declared_params]
if parent_declared_params
inheritable_setting.route[:declared_params] ||= []
inheritable_setting.route[:declared_params].concat(parent_declared_params.flatten)
end
endpoints && endpoints.each { |e| e.inherit_settings(namespace_stackable) }
end | ruby | def inherit_settings(namespace_stackable)
inheritable_setting.route[:saved_validations] += namespace_stackable[:validations]
parent_declared_params = namespace_stackable[:declared_params]
if parent_declared_params
inheritable_setting.route[:declared_params] ||= []
inheritable_setting.route[:declared_params].concat(parent_declared_params.flatten)
end
endpoints && endpoints.each { |e| e.inherit_settings(namespace_stackable) }
end | [
"def",
"inherit_settings",
"(",
"namespace_stackable",
")",
"inheritable_setting",
".",
"route",
"[",
":saved_validations",
"]",
"+=",
"namespace_stackable",
"[",
":validations",
"]",
"parent_declared_params",
"=",
"namespace_stackable",
"[",
":declared_params",
"]",
"if",
"parent_declared_params",
"inheritable_setting",
".",
"route",
"[",
":declared_params",
"]",
"||=",
"[",
"]",
"inheritable_setting",
".",
"route",
"[",
":declared_params",
"]",
".",
"concat",
"(",
"parent_declared_params",
".",
"flatten",
")",
"end",
"endpoints",
"&&",
"endpoints",
".",
"each",
"{",
"|",
"e",
"|",
"e",
".",
"inherit_settings",
"(",
"namespace_stackable",
")",
"}",
"end"
] | Create a new endpoint.
@param new_settings [InheritableSetting] settings to determine the params,
validations, and other properties from.
@param options [Hash] attributes of this endpoint
@option options path [String or Array] the path to this endpoint, within
the current scope.
@option options method [String or Array] which HTTP method(s) can be used
to reach this endpoint.
@option options route_options [Hash]
@note This happens at the time of API definition, so in this context the
endpoint does not know if it will be mounted under a different endpoint.
@yield a block defining what your API should do when this endpoint is hit
Update our settings from a given set of stackable parameters. Used when
the endpoint's API is mounted under another one. | [
"Create",
"a",
"new",
"endpoint",
"."
] | e26ae618b86920b19b1a98945ba7d6e953a9b989 | https://github.com/ruby-grape/grape/blob/e26ae618b86920b19b1a98945ba7d6e953a9b989/lib/grape/endpoint.rb#L112-L122 | train |
ankane/blazer | app/helpers/blazer/base_helper.rb | Blazer.BaseHelper.blazer_json_escape | def blazer_json_escape(s)
if Rails::VERSION::STRING < "4.1"
result = s.to_s.gsub(JSON_ESCAPE_REGEXP, JSON_ESCAPE)
s.html_safe? ? result.html_safe : result
else
json_escape(s)
end
end | ruby | def blazer_json_escape(s)
if Rails::VERSION::STRING < "4.1"
result = s.to_s.gsub(JSON_ESCAPE_REGEXP, JSON_ESCAPE)
s.html_safe? ? result.html_safe : result
else
json_escape(s)
end
end | [
"def",
"blazer_json_escape",
"(",
"s",
")",
"if",
"Rails",
"::",
"VERSION",
"::",
"STRING",
"<",
"\"4.1\"",
"result",
"=",
"s",
".",
"to_s",
".",
"gsub",
"(",
"JSON_ESCAPE_REGEXP",
",",
"JSON_ESCAPE",
")",
"s",
".",
"html_safe?",
"?",
"result",
".",
"html_safe",
":",
"result",
"else",
"json_escape",
"(",
"s",
")",
"end",
"end"
] | Prior to version 4.1 of rails double quotes were inadventently removed in json_escape.
This adds the correct json_escape functionality to rails versions < 4.1 | [
"Prior",
"to",
"version",
"4",
".",
"1",
"of",
"rails",
"double",
"quotes",
"were",
"inadventently",
"removed",
"in",
"json_escape",
".",
"This",
"adds",
"the",
"correct",
"json_escape",
"functionality",
"to",
"rails",
"versions",
"<",
"4",
".",
"1"
] | c6c56314d47194b4b24aded4246835d036705bb3 | https://github.com/ankane/blazer/blob/c6c56314d47194b4b24aded4246835d036705bb3/app/helpers/blazer/base_helper.rb#L44-L51 | train |
activeadmin/activeadmin | lib/active_admin/namespace.rb | ActiveAdmin.Namespace.register | def register(resource_class, options = {}, &block)
config = find_or_build_resource(resource_class, options)
# Register the resource
register_resource_controller(config)
parse_registration_block(config, &block) if block_given?
reset_menu!
# Dispatch a registration event
ActiveSupport::Notifications.publish ActiveAdmin::Resource::RegisterEvent, config
# Return the config
config
end | ruby | def register(resource_class, options = {}, &block)
config = find_or_build_resource(resource_class, options)
# Register the resource
register_resource_controller(config)
parse_registration_block(config, &block) if block_given?
reset_menu!
# Dispatch a registration event
ActiveSupport::Notifications.publish ActiveAdmin::Resource::RegisterEvent, config
# Return the config
config
end | [
"def",
"register",
"(",
"resource_class",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"config",
"=",
"find_or_build_resource",
"(",
"resource_class",
",",
"options",
")",
"# Register the resource",
"register_resource_controller",
"(",
"config",
")",
"parse_registration_block",
"(",
"config",
",",
"block",
")",
"if",
"block_given?",
"reset_menu!",
"# Dispatch a registration event",
"ActiveSupport",
"::",
"Notifications",
".",
"publish",
"ActiveAdmin",
"::",
"Resource",
"::",
"RegisterEvent",
",",
"config",
"# Return the config",
"config",
"end"
] | Register a resource into this namespace. The preffered method to access this is to
use the global registration ActiveAdmin.register which delegates to the proper
namespace instance. | [
"Register",
"a",
"resource",
"into",
"this",
"namespace",
".",
"The",
"preffered",
"method",
"to",
"access",
"this",
"is",
"to",
"use",
"the",
"global",
"registration",
"ActiveAdmin",
".",
"register",
"which",
"delegates",
"to",
"the",
"proper",
"namespace",
"instance",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/namespace.rb#L65-L78 | train |
activeadmin/activeadmin | lib/active_admin/namespace.rb | ActiveAdmin.Namespace.build_menu | def build_menu(name = DEFAULT_MENU)
@menus.before_build do |menus|
menus.menu name do |menu|
yield menu
end
end
end | ruby | def build_menu(name = DEFAULT_MENU)
@menus.before_build do |menus|
menus.menu name do |menu|
yield menu
end
end
end | [
"def",
"build_menu",
"(",
"name",
"=",
"DEFAULT_MENU",
")",
"@menus",
".",
"before_build",
"do",
"|",
"menus",
"|",
"menus",
".",
"menu",
"name",
"do",
"|",
"menu",
"|",
"yield",
"menu",
"end",
"end",
"end"
] | Add a callback to be ran when we build the menu
@param [Symbol] name The name of the menu. Default: :default
@yield [ActiveAdmin::Menu] The block to be ran when the menu is built
@return [void] | [
"Add",
"a",
"callback",
"to",
"be",
"ran",
"when",
"we",
"build",
"the",
"menu"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/namespace.rb#L135-L141 | train |
activeadmin/activeadmin | lib/active_admin/namespace.rb | ActiveAdmin.Namespace.add_logout_button_to_menu | def add_logout_button_to_menu(menu, priority = 20, html_options = {})
if logout_link_path
html_options = html_options.reverse_merge(method: logout_link_method || :get)
menu.add id: 'logout', priority: priority, html_options: html_options,
label: -> { I18n.t 'active_admin.logout' },
url: -> { render_or_call_method_or_proc_on self, active_admin_namespace.logout_link_path },
if: :current_active_admin_user?
end
end | ruby | def add_logout_button_to_menu(menu, priority = 20, html_options = {})
if logout_link_path
html_options = html_options.reverse_merge(method: logout_link_method || :get)
menu.add id: 'logout', priority: priority, html_options: html_options,
label: -> { I18n.t 'active_admin.logout' },
url: -> { render_or_call_method_or_proc_on self, active_admin_namespace.logout_link_path },
if: :current_active_admin_user?
end
end | [
"def",
"add_logout_button_to_menu",
"(",
"menu",
",",
"priority",
"=",
"20",
",",
"html_options",
"=",
"{",
"}",
")",
"if",
"logout_link_path",
"html_options",
"=",
"html_options",
".",
"reverse_merge",
"(",
"method",
":",
"logout_link_method",
"||",
":get",
")",
"menu",
".",
"add",
"id",
":",
"'logout'",
",",
"priority",
":",
"priority",
",",
"html_options",
":",
"html_options",
",",
"label",
":",
"->",
"{",
"I18n",
".",
"t",
"'active_admin.logout'",
"}",
",",
"url",
":",
"->",
"{",
"render_or_call_method_or_proc_on",
"self",
",",
"active_admin_namespace",
".",
"logout_link_path",
"}",
",",
"if",
":",
":current_active_admin_user?",
"end",
"end"
] | The default logout menu item
@param [ActiveAdmin::MenuItem] menu The menu to add the logout link to
@param [Fixnum] priority The numeric priority for the order in which it appears
@param [Hash] html_options An options hash to pass along to link_to | [
"The",
"default",
"logout",
"menu",
"item"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/namespace.rb#L149-L157 | train |
activeadmin/activeadmin | lib/active_admin/namespace.rb | ActiveAdmin.Namespace.add_current_user_to_menu | def add_current_user_to_menu(menu, priority = 10, html_options = {})
if current_user_method
menu.add id: 'current_user', priority: priority, html_options: html_options,
label: -> { display_name current_active_admin_user },
url: -> { auto_url_for(current_active_admin_user) },
if: :current_active_admin_user?
end
end | ruby | def add_current_user_to_menu(menu, priority = 10, html_options = {})
if current_user_method
menu.add id: 'current_user', priority: priority, html_options: html_options,
label: -> { display_name current_active_admin_user },
url: -> { auto_url_for(current_active_admin_user) },
if: :current_active_admin_user?
end
end | [
"def",
"add_current_user_to_menu",
"(",
"menu",
",",
"priority",
"=",
"10",
",",
"html_options",
"=",
"{",
"}",
")",
"if",
"current_user_method",
"menu",
".",
"add",
"id",
":",
"'current_user'",
",",
"priority",
":",
"priority",
",",
"html_options",
":",
"html_options",
",",
"label",
":",
"->",
"{",
"display_name",
"current_active_admin_user",
"}",
",",
"url",
":",
"->",
"{",
"auto_url_for",
"(",
"current_active_admin_user",
")",
"}",
",",
"if",
":",
":current_active_admin_user?",
"end",
"end"
] | The default user session menu item
@param [ActiveAdmin::MenuItem] menu The menu to add the logout link to
@param [Fixnum] priority The numeric priority for the order in which it appears
@param [Hash] html_options An options hash to pass along to link_to | [
"The",
"default",
"user",
"session",
"menu",
"item"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/namespace.rb#L165-L172 | train |
activeadmin/activeadmin | lib/active_admin/page_dsl.rb | ActiveAdmin.PageDSL.content | def content(options = {}, &block)
config.set_page_presenter :index, ActiveAdmin::PagePresenter.new(options, &block)
end | ruby | def content(options = {}, &block)
config.set_page_presenter :index, ActiveAdmin::PagePresenter.new(options, &block)
end | [
"def",
"content",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"config",
".",
"set_page_presenter",
":index",
",",
"ActiveAdmin",
"::",
"PagePresenter",
".",
"new",
"(",
"options",
",",
"block",
")",
"end"
] | Page content.
The block should define the view using Arbre.
Example:
ActiveAdmin.register "My Page" do
content do
para "Sweet!"
end
end | [
"Page",
"content",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/page_dsl.rb#L17-L19 | train |
activeadmin/activeadmin | lib/active_admin/resource_collection.rb | ActiveAdmin.ResourceCollection.find_resource | def find_resource(obj)
resources.detect do |r|
r.resource_name.to_s == obj.to_s
end || resources.detect do |r|
r.resource_class.to_s == obj.to_s
end ||
if obj.respond_to? :base_class
resources.detect { |r| r.resource_class.to_s == obj.base_class.to_s }
end
end | ruby | def find_resource(obj)
resources.detect do |r|
r.resource_name.to_s == obj.to_s
end || resources.detect do |r|
r.resource_class.to_s == obj.to_s
end ||
if obj.respond_to? :base_class
resources.detect { |r| r.resource_class.to_s == obj.base_class.to_s }
end
end | [
"def",
"find_resource",
"(",
"obj",
")",
"resources",
".",
"detect",
"do",
"|",
"r",
"|",
"r",
".",
"resource_name",
".",
"to_s",
"==",
"obj",
".",
"to_s",
"end",
"||",
"resources",
".",
"detect",
"do",
"|",
"r",
"|",
"r",
".",
"resource_class",
".",
"to_s",
"==",
"obj",
".",
"to_s",
"end",
"||",
"if",
"obj",
".",
"respond_to?",
":base_class",
"resources",
".",
"detect",
"{",
"|",
"r",
"|",
"r",
".",
"resource_class",
".",
"to_s",
"==",
"obj",
".",
"base_class",
".",
"to_s",
"}",
"end",
"end"
] | Finds a resource based on the resource name, resource class, or base class. | [
"Finds",
"a",
"resource",
"based",
"on",
"the",
"resource",
"name",
"resource",
"class",
"or",
"base",
"class",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/resource_collection.rb#L34-L43 | train |
activeadmin/activeadmin | lib/active_admin/application.rb | ActiveAdmin.Application.register | def register(resource, options = {}, &block)
ns = options.fetch(:namespace) { default_namespace }
namespace(ns).register resource, options, &block
end | ruby | def register(resource, options = {}, &block)
ns = options.fetch(:namespace) { default_namespace }
namespace(ns).register resource, options, &block
end | [
"def",
"register",
"(",
"resource",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"ns",
"=",
"options",
".",
"fetch",
"(",
":namespace",
")",
"{",
"default_namespace",
"}",
"namespace",
"(",
"ns",
")",
".",
"register",
"resource",
",",
"options",
",",
"block",
"end"
] | Registers a brand new configuration for the given resource. | [
"Registers",
"a",
"brand",
"new",
"configuration",
"for",
"the",
"given",
"resource",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/application.rb#L63-L66 | train |
activeadmin/activeadmin | lib/active_admin/application.rb | ActiveAdmin.Application.namespace | def namespace(name)
name ||= :root
namespace = namespaces[name] ||= begin
namespace = Namespace.new(self, name)
ActiveSupport::Notifications.publish ActiveAdmin::Namespace::RegisterEvent, namespace
namespace
end
yield(namespace) if block_given?
namespace
end | ruby | def namespace(name)
name ||= :root
namespace = namespaces[name] ||= begin
namespace = Namespace.new(self, name)
ActiveSupport::Notifications.publish ActiveAdmin::Namespace::RegisterEvent, namespace
namespace
end
yield(namespace) if block_given?
namespace
end | [
"def",
"namespace",
"(",
"name",
")",
"name",
"||=",
":root",
"namespace",
"=",
"namespaces",
"[",
"name",
"]",
"||=",
"begin",
"namespace",
"=",
"Namespace",
".",
"new",
"(",
"self",
",",
"name",
")",
"ActiveSupport",
"::",
"Notifications",
".",
"publish",
"ActiveAdmin",
"::",
"Namespace",
"::",
"RegisterEvent",
",",
"namespace",
"namespace",
"end",
"yield",
"(",
"namespace",
")",
"if",
"block_given?",
"namespace",
"end"
] | Creates a namespace for the given name
Yields the namespace if a block is given
@return [Namespace] the new or existing namespace | [
"Creates",
"a",
"namespace",
"for",
"the",
"given",
"name"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/application.rb#L73-L85 | train |
activeadmin/activeadmin | lib/active_admin/application.rb | ActiveAdmin.Application.register_page | def register_page(name, options = {}, &block)
ns = options.fetch(:namespace) { default_namespace }
namespace(ns).register_page name, options, &block
end | ruby | def register_page(name, options = {}, &block)
ns = options.fetch(:namespace) { default_namespace }
namespace(ns).register_page name, options, &block
end | [
"def",
"register_page",
"(",
"name",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"ns",
"=",
"options",
".",
"fetch",
"(",
":namespace",
")",
"{",
"default_namespace",
"}",
"namespace",
"(",
"ns",
")",
".",
"register_page",
"name",
",",
"options",
",",
"block",
"end"
] | Register a page
@param name [String] The page name
@option [Hash] Accepts option :namespace.
@&block The registration block. | [
"Register",
"a",
"page"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/application.rb#L93-L96 | train |
activeadmin/activeadmin | lib/active_admin/application.rb | ActiveAdmin.Application.load! | def load!
unless loaded?
ActiveSupport::Notifications.publish BeforeLoadEvent, self # before_load hook
files.each { |file| load file } # load files
namespace(default_namespace) # init AA resources
ActiveSupport::Notifications.publish AfterLoadEvent, self # after_load hook
@@loaded = true
end
end | ruby | def load!
unless loaded?
ActiveSupport::Notifications.publish BeforeLoadEvent, self # before_load hook
files.each { |file| load file } # load files
namespace(default_namespace) # init AA resources
ActiveSupport::Notifications.publish AfterLoadEvent, self # after_load hook
@@loaded = true
end
end | [
"def",
"load!",
"unless",
"loaded?",
"ActiveSupport",
"::",
"Notifications",
".",
"publish",
"BeforeLoadEvent",
",",
"self",
"# before_load hook",
"files",
".",
"each",
"{",
"|",
"file",
"|",
"load",
"file",
"}",
"# load files",
"namespace",
"(",
"default_namespace",
")",
"# init AA resources",
"ActiveSupport",
"::",
"Notifications",
".",
"publish",
"AfterLoadEvent",
",",
"self",
"# after_load hook",
"@@loaded",
"=",
"true",
"end",
"end"
] | Loads all ruby files that are within the load_paths setting.
To reload everything simply call `ActiveAdmin.unload!` | [
"Loads",
"all",
"ruby",
"files",
"that",
"are",
"within",
"the",
"load_paths",
"setting",
".",
"To",
"reload",
"everything",
"simply",
"call",
"ActiveAdmin",
".",
"unload!"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/application.rb#L112-L120 | train |
activeadmin/activeadmin | lib/active_admin/application.rb | ActiveAdmin.Application.routes | def routes(rails_router)
load!
Router.new(router: rails_router, namespaces: namespaces).apply
end | ruby | def routes(rails_router)
load!
Router.new(router: rails_router, namespaces: namespaces).apply
end | [
"def",
"routes",
"(",
"rails_router",
")",
"load!",
"Router",
".",
"new",
"(",
"router",
":",
"rails_router",
",",
"namespaces",
":",
"namespaces",
")",
".",
"apply",
"end"
] | Creates all the necessary routes for the ActiveAdmin configurations
Use this within the routes.rb file:
Application.routes.draw do |map|
ActiveAdmin.routes(self)
end
@param rails_router [ActionDispatch::Routing::Mapper] | [
"Creates",
"all",
"the",
"necessary",
"routes",
"for",
"the",
"ActiveAdmin",
"configurations"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/application.rb#L140-L143 | train |
activeadmin/activeadmin | lib/active_admin/application.rb | ActiveAdmin.Application.attach_reloader | def attach_reloader
Rails.application.config.after_initialize do |app|
ActiveSupport::Reloader.after_class_unload do
ActiveAdmin.application.unload!
end
admin_dirs = {}
load_paths.each do |path|
admin_dirs[path] = [:rb]
end
routes_reloader = app.config.file_watcher.new([], admin_dirs) do
app.reload_routes!
end
app.reloaders << routes_reloader
ActiveSupport::Reloader.to_prepare do
# Rails might have reloaded the routes for other reasons (e.g.
# routes.rb has changed), in which case Active Admin would have been
# loaded via the `ActiveAdmin.routes` call in `routes.rb`.
#
# Otherwise, we should check if any of the admin files are changed
# and force the routes to reload if necessary. This would again causes
# Active Admin to load via `ActiveAdmin.routes`.
#
# Finally, if Active Admin is still not loaded at this point, then we
# would need to load it manually.
unless ActiveAdmin.application.loaded?
routes_reloader.execute_if_updated
ActiveAdmin.application.load!
end
end
end
end | ruby | def attach_reloader
Rails.application.config.after_initialize do |app|
ActiveSupport::Reloader.after_class_unload do
ActiveAdmin.application.unload!
end
admin_dirs = {}
load_paths.each do |path|
admin_dirs[path] = [:rb]
end
routes_reloader = app.config.file_watcher.new([], admin_dirs) do
app.reload_routes!
end
app.reloaders << routes_reloader
ActiveSupport::Reloader.to_prepare do
# Rails might have reloaded the routes for other reasons (e.g.
# routes.rb has changed), in which case Active Admin would have been
# loaded via the `ActiveAdmin.routes` call in `routes.rb`.
#
# Otherwise, we should check if any of the admin files are changed
# and force the routes to reload if necessary. This would again causes
# Active Admin to load via `ActiveAdmin.routes`.
#
# Finally, if Active Admin is still not loaded at this point, then we
# would need to load it manually.
unless ActiveAdmin.application.loaded?
routes_reloader.execute_if_updated
ActiveAdmin.application.load!
end
end
end
end | [
"def",
"attach_reloader",
"Rails",
".",
"application",
".",
"config",
".",
"after_initialize",
"do",
"|",
"app",
"|",
"ActiveSupport",
"::",
"Reloader",
".",
"after_class_unload",
"do",
"ActiveAdmin",
".",
"application",
".",
"unload!",
"end",
"admin_dirs",
"=",
"{",
"}",
"load_paths",
".",
"each",
"do",
"|",
"path",
"|",
"admin_dirs",
"[",
"path",
"]",
"=",
"[",
":rb",
"]",
"end",
"routes_reloader",
"=",
"app",
".",
"config",
".",
"file_watcher",
".",
"new",
"(",
"[",
"]",
",",
"admin_dirs",
")",
"do",
"app",
".",
"reload_routes!",
"end",
"app",
".",
"reloaders",
"<<",
"routes_reloader",
"ActiveSupport",
"::",
"Reloader",
".",
"to_prepare",
"do",
"# Rails might have reloaded the routes for other reasons (e.g.",
"# routes.rb has changed), in which case Active Admin would have been",
"# loaded via the `ActiveAdmin.routes` call in `routes.rb`.",
"#",
"# Otherwise, we should check if any of the admin files are changed",
"# and force the routes to reload if necessary. This would again causes",
"# Active Admin to load via `ActiveAdmin.routes`.",
"#",
"# Finally, if Active Admin is still not loaded at this point, then we",
"# would need to load it manually.",
"unless",
"ActiveAdmin",
".",
"application",
".",
"loaded?",
"routes_reloader",
".",
"execute_if_updated",
"ActiveAdmin",
".",
"application",
".",
"load!",
"end",
"end",
"end",
"end"
] | Hook into the Rails code reloading mechanism so that things are reloaded
properly in development mode.
If any of the app files (e.g. models) has changed, we need to reload all
the admin files. If the admin files themselves has changed, we need to
regenerate the routes as well. | [
"Hook",
"into",
"the",
"Rails",
"code",
"reloading",
"mechanism",
"so",
"that",
"things",
"are",
"reloaded",
"properly",
"in",
"development",
"mode",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/application.rb#L188-L223 | train |
activeadmin/activeadmin | lib/active_admin/form_builder.rb | ActiveAdmin.HasManyBuilder.extract_custom_settings! | def extract_custom_settings!(options)
@heading = options.key?(:heading) ? options.delete(:heading) : default_heading
@sortable_column = options.delete(:sortable)
@sortable_start = options.delete(:sortable_start) || 0
@new_record = options.key?(:new_record) ? options.delete(:new_record) : true
@destroy_option = options.delete(:allow_destroy)
options
end | ruby | def extract_custom_settings!(options)
@heading = options.key?(:heading) ? options.delete(:heading) : default_heading
@sortable_column = options.delete(:sortable)
@sortable_start = options.delete(:sortable_start) || 0
@new_record = options.key?(:new_record) ? options.delete(:new_record) : true
@destroy_option = options.delete(:allow_destroy)
options
end | [
"def",
"extract_custom_settings!",
"(",
"options",
")",
"@heading",
"=",
"options",
".",
"key?",
"(",
":heading",
")",
"?",
"options",
".",
"delete",
"(",
":heading",
")",
":",
"default_heading",
"@sortable_column",
"=",
"options",
".",
"delete",
"(",
":sortable",
")",
"@sortable_start",
"=",
"options",
".",
"delete",
"(",
":sortable_start",
")",
"||",
"0",
"@new_record",
"=",
"options",
".",
"key?",
"(",
":new_record",
")",
"?",
"options",
".",
"delete",
"(",
":new_record",
")",
":",
"true",
"@destroy_option",
"=",
"options",
".",
"delete",
"(",
":allow_destroy",
")",
"options",
"end"
] | remove options that should not render as attributes | [
"remove",
"options",
"that",
"should",
"not",
"render",
"as",
"attributes"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/form_builder.rb#L69-L76 | train |
activeadmin/activeadmin | lib/active_admin/form_builder.rb | ActiveAdmin.HasManyBuilder.render_has_many_form | def render_has_many_form(form_builder, parent, &block)
index = parent && form_builder.send(:parent_child_index, parent)
template.concat template.capture { yield(form_builder, index) }
template.concat has_many_actions(form_builder, "".html_safe)
end | ruby | def render_has_many_form(form_builder, parent, &block)
index = parent && form_builder.send(:parent_child_index, parent)
template.concat template.capture { yield(form_builder, index) }
template.concat has_many_actions(form_builder, "".html_safe)
end | [
"def",
"render_has_many_form",
"(",
"form_builder",
",",
"parent",
",",
"&",
"block",
")",
"index",
"=",
"parent",
"&&",
"form_builder",
".",
"send",
"(",
":parent_child_index",
",",
"parent",
")",
"template",
".",
"concat",
"template",
".",
"capture",
"{",
"yield",
"(",
"form_builder",
",",
"index",
")",
"}",
"template",
".",
"concat",
"has_many_actions",
"(",
"form_builder",
",",
"\"\"",
".",
"html_safe",
")",
"end"
] | Renders the Formtastic inputs then appends ActiveAdmin delete and sort actions. | [
"Renders",
"the",
"Formtastic",
"inputs",
"then",
"appends",
"ActiveAdmin",
"delete",
"and",
"sort",
"actions",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/form_builder.rb#L101-L105 | train |
activeadmin/activeadmin | lib/active_admin/form_builder.rb | ActiveAdmin.HasManyBuilder.js_for_has_many | def js_for_has_many(class_string, &form_block)
assoc_name = assoc_klass.model_name
placeholder = "NEW_#{assoc_name.to_s.underscore.upcase.gsub(/\//, '_')}_RECORD"
opts = {
for: [assoc, assoc_klass.new],
class: class_string,
for_options: { child_index: placeholder }
}
html = template.capture { __getobj__.send(:inputs_for_nested_attributes, opts, &form_block) }
text = new_record.is_a?(String) ? new_record : I18n.t('active_admin.has_many_new', model: assoc_name.human)
template.link_to text, '#', class: "button has_many_add", data: {
html: CGI.escapeHTML(html).html_safe, placeholder: placeholder
}
end | ruby | def js_for_has_many(class_string, &form_block)
assoc_name = assoc_klass.model_name
placeholder = "NEW_#{assoc_name.to_s.underscore.upcase.gsub(/\//, '_')}_RECORD"
opts = {
for: [assoc, assoc_klass.new],
class: class_string,
for_options: { child_index: placeholder }
}
html = template.capture { __getobj__.send(:inputs_for_nested_attributes, opts, &form_block) }
text = new_record.is_a?(String) ? new_record : I18n.t('active_admin.has_many_new', model: assoc_name.human)
template.link_to text, '#', class: "button has_many_add", data: {
html: CGI.escapeHTML(html).html_safe, placeholder: placeholder
}
end | [
"def",
"js_for_has_many",
"(",
"class_string",
",",
"&",
"form_block",
")",
"assoc_name",
"=",
"assoc_klass",
".",
"model_name",
"placeholder",
"=",
"\"NEW_#{assoc_name.to_s.underscore.upcase.gsub(/\\//, '_')}_RECORD\"",
"opts",
"=",
"{",
"for",
":",
"[",
"assoc",
",",
"assoc_klass",
".",
"new",
"]",
",",
"class",
":",
"class_string",
",",
"for_options",
":",
"{",
"child_index",
":",
"placeholder",
"}",
"}",
"html",
"=",
"template",
".",
"capture",
"{",
"__getobj__",
".",
"send",
"(",
":inputs_for_nested_attributes",
",",
"opts",
",",
"form_block",
")",
"}",
"text",
"=",
"new_record",
".",
"is_a?",
"(",
"String",
")",
"?",
"new_record",
":",
"I18n",
".",
"t",
"(",
"'active_admin.has_many_new'",
",",
"model",
":",
"assoc_name",
".",
"human",
")",
"template",
".",
"link_to",
"text",
",",
"'#'",
",",
"class",
":",
"\"button has_many_add\"",
",",
"data",
":",
"{",
"html",
":",
"CGI",
".",
"escapeHTML",
"(",
"html",
")",
".",
"html_safe",
",",
"placeholder",
":",
"placeholder",
"}",
"end"
] | Capture the ADD JS | [
"Capture",
"the",
"ADD",
"JS"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/form_builder.rb#L158-L172 | train |
activeadmin/activeadmin | lib/active_admin/router.rb | ActiveAdmin.Router.define_resources_routes | def define_resources_routes
resources = namespaces.flat_map { |n| n.resources.values }
resources.each do |config|
define_resource_routes(config)
end
end | ruby | def define_resources_routes
resources = namespaces.flat_map { |n| n.resources.values }
resources.each do |config|
define_resource_routes(config)
end
end | [
"def",
"define_resources_routes",
"resources",
"=",
"namespaces",
".",
"flat_map",
"{",
"|",
"n",
"|",
"n",
".",
"resources",
".",
"values",
"}",
"resources",
".",
"each",
"do",
"|",
"config",
"|",
"define_resource_routes",
"(",
"config",
")",
"end",
"end"
] | Defines the routes for each resource | [
"Defines",
"the",
"routes",
"for",
"each",
"resource"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/router.rb#L30-L35 | train |
activeadmin/activeadmin | lib/active_admin/router.rb | ActiveAdmin.Router.define_actions | def define_actions(config)
router.member do
config.member_actions.each { |action| build_action(action) }
end
router.collection do
config.collection_actions.each { |action| build_action(action) }
router.post :batch_action if config.batch_actions_enabled?
end
end | ruby | def define_actions(config)
router.member do
config.member_actions.each { |action| build_action(action) }
end
router.collection do
config.collection_actions.each { |action| build_action(action) }
router.post :batch_action if config.batch_actions_enabled?
end
end | [
"def",
"define_actions",
"(",
"config",
")",
"router",
".",
"member",
"do",
"config",
".",
"member_actions",
".",
"each",
"{",
"|",
"action",
"|",
"build_action",
"(",
"action",
")",
"}",
"end",
"router",
".",
"collection",
"do",
"config",
".",
"collection_actions",
".",
"each",
"{",
"|",
"action",
"|",
"build_action",
"(",
"action",
")",
"}",
"router",
".",
"post",
":batch_action",
"if",
"config",
".",
"batch_actions_enabled?",
"end",
"end"
] | Defines member and collection actions | [
"Defines",
"member",
"and",
"collection",
"actions"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/router.rb#L75-L84 | train |
activeadmin/activeadmin | lib/active_admin/callbacks.rb | ActiveAdmin.Callbacks.run_callback | def run_callback(method, *args)
case method
when Symbol
send(method, *args)
when Proc
instance_exec(*args, &method)
else
raise "Please register with callbacks using a symbol or a block/proc."
end
end | ruby | def run_callback(method, *args)
case method
when Symbol
send(method, *args)
when Proc
instance_exec(*args, &method)
else
raise "Please register with callbacks using a symbol or a block/proc."
end
end | [
"def",
"run_callback",
"(",
"method",
",",
"*",
"args",
")",
"case",
"method",
"when",
"Symbol",
"send",
"(",
"method",
",",
"args",
")",
"when",
"Proc",
"instance_exec",
"(",
"args",
",",
"method",
")",
"else",
"raise",
"\"Please register with callbacks using a symbol or a block/proc.\"",
"end",
"end"
] | Simple callback system. Implements before and after callbacks for
use within the controllers.
We didn't use the ActiveSupport callbacks because they do not support
passing in any arbitrary object into the callback method (which we
need to do) | [
"Simple",
"callback",
"system",
".",
"Implements",
"before",
"and",
"after",
"callbacks",
"for",
"use",
"within",
"the",
"controllers",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/callbacks.rb#L14-L23 | train |
activeadmin/activeadmin | lib/active_admin/resource_dsl.rb | ActiveAdmin.ResourceDSL.permit_params | def permit_params(*args, &block)
param_key = config.param_key.to_sym
belongs_to_param = config.belongs_to_param
create_another_param = :create_another if config.create_another
controller do
define_method :permitted_params do
permitted_params =
active_admin_namespace.permitted_params +
Array.wrap(belongs_to_param) +
Array.wrap(create_another_param)
params.permit(*permitted_params, param_key => block ? instance_exec(&block) : args)
end
private :permitted_params
end
end | ruby | def permit_params(*args, &block)
param_key = config.param_key.to_sym
belongs_to_param = config.belongs_to_param
create_another_param = :create_another if config.create_another
controller do
define_method :permitted_params do
permitted_params =
active_admin_namespace.permitted_params +
Array.wrap(belongs_to_param) +
Array.wrap(create_another_param)
params.permit(*permitted_params, param_key => block ? instance_exec(&block) : args)
end
private :permitted_params
end
end | [
"def",
"permit_params",
"(",
"*",
"args",
",",
"&",
"block",
")",
"param_key",
"=",
"config",
".",
"param_key",
".",
"to_sym",
"belongs_to_param",
"=",
"config",
".",
"belongs_to_param",
"create_another_param",
"=",
":create_another",
"if",
"config",
".",
"create_another",
"controller",
"do",
"define_method",
":permitted_params",
"do",
"permitted_params",
"=",
"active_admin_namespace",
".",
"permitted_params",
"+",
"Array",
".",
"wrap",
"(",
"belongs_to_param",
")",
"+",
"Array",
".",
"wrap",
"(",
"create_another_param",
")",
"params",
".",
"permit",
"(",
"permitted_params",
",",
"param_key",
"=>",
"block",
"?",
"instance_exec",
"(",
"block",
")",
":",
"args",
")",
"end",
"private",
":permitted_params",
"end",
"end"
] | Keys included in the `permitted_params` setting are automatically whitelisted.
Either
permit_params :title, :author, :body, tags: []
Or
permit_params do
defaults = [:title, :body]
if current_user.admin?
defaults + [:author]
else
defaults
end
end | [
"Keys",
"included",
"in",
"the",
"permitted_params",
"setting",
"are",
"automatically",
"whitelisted",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/resource_dsl.rb#L63-L80 | train |
activeadmin/activeadmin | lib/active_admin/resource_dsl.rb | ActiveAdmin.ResourceDSL.index | def index(options = {}, &block)
options[:as] ||= :table
config.set_page_presenter :index, ActiveAdmin::PagePresenter.new(options, &block)
end | ruby | def index(options = {}, &block)
options[:as] ||= :table
config.set_page_presenter :index, ActiveAdmin::PagePresenter.new(options, &block)
end | [
"def",
"index",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"options",
"[",
":as",
"]",
"||=",
":table",
"config",
".",
"set_page_presenter",
":index",
",",
"ActiveAdmin",
"::",
"PagePresenter",
".",
"new",
"(",
"options",
",",
"block",
")",
"end"
] | Configure the index page for the resource | [
"Configure",
"the",
"index",
"page",
"for",
"the",
"resource"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/resource_dsl.rb#L83-L86 | train |
activeadmin/activeadmin | lib/active_admin/resource_dsl.rb | ActiveAdmin.ResourceDSL.show | def show(options = {}, &block)
config.set_page_presenter :show, ActiveAdmin::PagePresenter.new(options, &block)
end | ruby | def show(options = {}, &block)
config.set_page_presenter :show, ActiveAdmin::PagePresenter.new(options, &block)
end | [
"def",
"show",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"config",
".",
"set_page_presenter",
":show",
",",
"ActiveAdmin",
"::",
"PagePresenter",
".",
"new",
"(",
"options",
",",
"block",
")",
"end"
] | Configure the show page for the resource | [
"Configure",
"the",
"show",
"page",
"for",
"the",
"resource"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/resource_dsl.rb#L89-L91 | train |
activeadmin/activeadmin | lib/active_admin/resource_dsl.rb | ActiveAdmin.ResourceDSL.csv | def csv(options = {}, &block)
options[:resource] = config
config.csv_builder = CSVBuilder.new(options, &block)
end | ruby | def csv(options = {}, &block)
options[:resource] = config
config.csv_builder = CSVBuilder.new(options, &block)
end | [
"def",
"csv",
"(",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"options",
"[",
":resource",
"]",
"=",
"config",
"config",
".",
"csv_builder",
"=",
"CSVBuilder",
".",
"new",
"(",
"options",
",",
"block",
")",
"end"
] | Configure the CSV format
For example:
csv do
column :name
column("Author") { |post| post.author.full_name }
end
csv col_sep: ";", force_quotes: true do
column :name
end | [
"Configure",
"the",
"CSV",
"format"
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/resource_dsl.rb#L110-L114 | train |
activeadmin/activeadmin | lib/active_admin/resource_dsl.rb | ActiveAdmin.ResourceDSL.action | def action(set, name, options = {}, &block)
warn "Warning: method `#{name}` already defined" if controller.method_defined?(name)
set << ControllerAction.new(name, options)
title = options.delete(:title)
controller do
before_action(only: [name]) { @page_title = title } if title
define_method(name, &block || Proc.new {})
end
end | ruby | def action(set, name, options = {}, &block)
warn "Warning: method `#{name}` already defined" if controller.method_defined?(name)
set << ControllerAction.new(name, options)
title = options.delete(:title)
controller do
before_action(only: [name]) { @page_title = title } if title
define_method(name, &block || Proc.new {})
end
end | [
"def",
"action",
"(",
"set",
",",
"name",
",",
"options",
"=",
"{",
"}",
",",
"&",
"block",
")",
"warn",
"\"Warning: method `#{name}` already defined\"",
"if",
"controller",
".",
"method_defined?",
"(",
"name",
")",
"set",
"<<",
"ControllerAction",
".",
"new",
"(",
"name",
",",
"options",
")",
"title",
"=",
"options",
".",
"delete",
"(",
":title",
")",
"controller",
"do",
"before_action",
"(",
"only",
":",
"[",
"name",
"]",
")",
"{",
"@page_title",
"=",
"title",
"}",
"if",
"title",
"define_method",
"(",
"name",
",",
"block",
"||",
"Proc",
".",
"new",
"{",
"}",
")",
"end",
"end"
] | Member Actions give you the functionality of defining both the
action and the route directly from your ActiveAdmin registration
block.
For example:
ActiveAdmin.register Post do
member_action :comments do
@post = Post.find(params[:id])
@comments = @post.comments
end
end
Will create a new controller action comments and will hook it up to
the named route (comments_admin_post_path) /admin/posts/:id/comments
You can treat everything within the block as a standard Rails controller
action. | [
"Member",
"Actions",
"give",
"you",
"the",
"functionality",
"of",
"defining",
"both",
"the",
"action",
"and",
"the",
"route",
"directly",
"from",
"your",
"ActiveAdmin",
"registration",
"block",
"."
] | 0759c8dcf97865748c9344459162ac3c7e65a6cd | https://github.com/activeadmin/activeadmin/blob/0759c8dcf97865748c9344459162ac3c7e65a6cd/lib/active_admin/resource_dsl.rb#L135-L145 | train |
Shopify/shopify_api | lib/shopify_api/resources/product.rb | ShopifyAPI.Product.price_range | def price_range
prices = variants.collect(&:price).collect(&:to_f)
format = "%0.2f"
if prices.min != prices.max
"#{format % prices.min} - #{format % prices.max}"
else
format % prices.min
end
end | ruby | def price_range
prices = variants.collect(&:price).collect(&:to_f)
format = "%0.2f"
if prices.min != prices.max
"#{format % prices.min} - #{format % prices.max}"
else
format % prices.min
end
end | [
"def",
"price_range",
"prices",
"=",
"variants",
".",
"collect",
"(",
":price",
")",
".",
"collect",
"(",
":to_f",
")",
"format",
"=",
"\"%0.2f\"",
"if",
"prices",
".",
"min",
"!=",
"prices",
".",
"max",
"\"#{format % prices.min} - #{format % prices.max}\"",
"else",
"format",
"%",
"prices",
".",
"min",
"end",
"end"
] | compute the price range | [
"compute",
"the",
"price",
"range"
] | 2e069578fcaa93188c4f5a919a76df7b3e2e26ef | https://github.com/Shopify/shopify_api/blob/2e069578fcaa93188c4f5a919a76df7b3e2e26ef/lib/shopify_api/resources/product.rb#L7-L15 | train |
sds/overcommit | lib/overcommit/configuration_loader.rb | Overcommit.ConfigurationLoader.load_file | def load_file(file)
config = self.class.load_from_file(file, default: false, logger: @log)
config = self.class.default_configuration.merge(config)
if @options.fetch(:verify) { config.verify_signatures? }
verify_signatures(config)
end
config
rescue Overcommit::Exceptions::ConfigurationSignatureChanged
raise
rescue StandardError => error
raise Overcommit::Exceptions::ConfigurationError,
"Unable to load configuration from '#{file}': #{error}",
error.backtrace
end | ruby | def load_file(file)
config = self.class.load_from_file(file, default: false, logger: @log)
config = self.class.default_configuration.merge(config)
if @options.fetch(:verify) { config.verify_signatures? }
verify_signatures(config)
end
config
rescue Overcommit::Exceptions::ConfigurationSignatureChanged
raise
rescue StandardError => error
raise Overcommit::Exceptions::ConfigurationError,
"Unable to load configuration from '#{file}': #{error}",
error.backtrace
end | [
"def",
"load_file",
"(",
"file",
")",
"config",
"=",
"self",
".",
"class",
".",
"load_from_file",
"(",
"file",
",",
"default",
":",
"false",
",",
"logger",
":",
"@log",
")",
"config",
"=",
"self",
".",
"class",
".",
"default_configuration",
".",
"merge",
"(",
"config",
")",
"if",
"@options",
".",
"fetch",
"(",
":verify",
")",
"{",
"config",
".",
"verify_signatures?",
"}",
"verify_signatures",
"(",
"config",
")",
"end",
"config",
"rescue",
"Overcommit",
"::",
"Exceptions",
"::",
"ConfigurationSignatureChanged",
"raise",
"rescue",
"StandardError",
"=>",
"error",
"raise",
"Overcommit",
"::",
"Exceptions",
"::",
"ConfigurationError",
",",
"\"Unable to load configuration from '#{file}': #{error}\"",
",",
"error",
".",
"backtrace",
"end"
] | Loads a configuration, ensuring it extends the default configuration. | [
"Loads",
"a",
"configuration",
"ensuring",
"it",
"extends",
"the",
"default",
"configuration",
"."
] | 35d60adb41da942178b789560968e3ad030b0ac7 | https://github.com/sds/overcommit/blob/35d60adb41da942178b789560968e3ad030b0ac7/lib/overcommit/configuration_loader.rb#L64-L79 | train |
sds/overcommit | lib/overcommit/hook_context/base.rb | Overcommit::HookContext.Base.filter_directories | def filter_directories(modified_files)
modified_files.reject do |file|
File.directory?(file) && !Overcommit::Utils::FileUtils.symlink?(file)
end
end | ruby | def filter_directories(modified_files)
modified_files.reject do |file|
File.directory?(file) && !Overcommit::Utils::FileUtils.symlink?(file)
end
end | [
"def",
"filter_directories",
"(",
"modified_files",
")",
"modified_files",
".",
"reject",
"do",
"|",
"file",
"|",
"File",
".",
"directory?",
"(",
"file",
")",
"&&",
"!",
"Overcommit",
"::",
"Utils",
"::",
"FileUtils",
".",
"symlink?",
"(",
"file",
")",
"end",
"end"
] | Filter out directories. This could happen when changing a symlink to a
directory as part of an amendment, since the symlink will still appear as
a file, but the actual working tree will have a directory. | [
"Filter",
"out",
"directories",
".",
"This",
"could",
"happen",
"when",
"changing",
"a",
"symlink",
"to",
"a",
"directory",
"as",
"part",
"of",
"an",
"amendment",
"since",
"the",
"symlink",
"will",
"still",
"appear",
"as",
"a",
"file",
"but",
"the",
"actual",
"working",
"tree",
"will",
"have",
"a",
"directory",
"."
] | 35d60adb41da942178b789560968e3ad030b0ac7 | https://github.com/sds/overcommit/blob/35d60adb41da942178b789560968e3ad030b0ac7/lib/overcommit/hook_context/base.rb#L133-L137 | train |
Subsets and Splits