code
stringlengths
26
124k
docstring
stringlengths
23
125k
func_name
stringlengths
1
98
language
stringclasses
1 value
repo
stringlengths
5
53
path
stringlengths
7
151
url
stringlengths
50
211
license
stringclasses
7 values
def with_pt_checksum_user(username='pt-checksum') password = DB.random_password create_user username, password grant_privileges username, '*', 'PROCESS', 'REPLICATION CLIENT', 'REPLICATION SLAVE' grant_privileges username, app_schema, 'ALL PRIVILEGES' begin yield username, password rescue drop_user username raise end drop_user username end
#### NEW METHODS ########################################################## Creates a temporary user for use of pt-table-checksum, yields to the supplied block, and then drops the user. The user will have a randomly-generated 50-character password, and will have elevated permissions (ALL PRIVILEGES on the application schema, and a few global privs as well) since these are necessary to run the tools. The block will be passed the randomly-generated password.
with_pt_checksum_user
ruby
tumblr/jetpants
plugins/upgrade_helper/db.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/db.rb
Apache-2.0
def tcpdump!(duration=30, interface=false) interface ||= Jetpants.private_interface output "Using tcpdump to capture sample of MySQL traffic for #{duration} seconds" tcpdump_options = "-i #{interface} -s 65535 -x -n -q -tttt 'port 3306 and tcp[1] & 7 == 2 and tcp[3] & 7 == 2'" outfile = "#{Jetpants.export_location}/#{hostname}.dumpfile" ssh_cmd "tcpdump #{tcpdump_options} > #{outfile} & export DUMP_PID=$! && sleep #{duration} && kill $DUMP_PID" output "Completed capturing traffic sample" "#{hostname}.dumpfile" end
Captures mysql traffic with tcpdump for the specified amount of time, in seconds. The dumpfile will be saved to #{Jetpants.export_location} with filename #{hostname}.dumpfile, and the filename portion will be returned by this method. Not all traffic will be included -- uses a method by Devananda van der Veen described in http://www.mysqlperformanceblog.com/2011/04/18/how-to-use-tcpdump-on-very-busy-hosts/ to sample the traffic. Requires that tcpdump is available in root's PATH. Also assumes root's shell is bash or supports equivalent syntax. Currently only works if mysqld running on port 3306. Warning: tcpdump can be taxing on the server, and also can generate rather large amounts of output! Also, will overwrite any previous file at the destination path!
tcpdump!
ruby
tumblr/jetpants
plugins/upgrade_helper/db.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/db.rb
Apache-2.0
def dumpfile_to_slowlog(tcpdump_output_file_path, delete_tcpdumpfile=true) pt_query_digest_version = `pt-query-digest --version`.to_s.split(' ').last.chomp rescue '0.0.0' raise "pt-query-digest executable is not available on the host" unless $?.exitstatus == 1 slowlog_file_path = tcpdump_output_file_path.sub('.dumpfile', '') + '.slowlog' if pt_query_digest_version.to_f >= 2.2 ssh_cmd "pt-query-digest #{tcpdump_output_file_path} --type tcpdump --no-report --output slowlog >#{slowlog_file_path}" else ssh_cmd "pt-query-digest #{tcpdump_output_file_path} --type tcpdump --no-report --print >#{slowlog_file_path}" end ssh_cmd "rm #{tcpdump_output_file_path}" if delete_tcpdumpfile slowlog_file_path = filter_slowlog(slowlog_file_path) slowlog_file_path end
#### NEW METHODS ########################################################## Converts tcpdump output into slowlog format using pt-query-digest. Requires that pt-query-digest is installed and in root's path. Returns the full path to the slowlog. Does not delete or remove the tcpdump output file. This is in Host instead of DB because it may be preferable to run this on the host running Jetpants, as opposed to the DB where the dumpfile came from, because pt-query-digest may be taxing to run on the server.
dumpfile_to_slowlog
ruby
tumblr/jetpants
plugins/upgrade_helper/host.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/host.rb
Apache-2.0
def checksum_tables options={} schema = master.app_schema success = false output_lines = [] # check if already running, or a previous run died previous_run = collins_checksum_running previous_run = nil if previous_run == '' if previous_run run_data = JSON.parse(previous_run.downcase) # hash with 'from_host', 'from_pid', 'timestamp' previous_host = run_data['from_host'].to_host previous_pid = run_data['from_pid'] or die 'No previous pid found in previous rundata?' still_running = previous_host.pid_running?(previous_pid, 'pt-table-checksum') raise "Checksum already in progress from #{previous_host}, pid=#{previous_pid}" if still_running output "Previous failed run detected, will use --resume parameter" end # Determine what to pass to --max-load master.output "Polling for normal max threads_running, please wait" max_threads_running = master.max_threads_running limit_threads_running = [(max_threads_running * 1.2).ceil, 20].max master.output "Found max threads_running=#{max_threads_running}, will use limit of #{limit_threads_running}" # Operate with a temporary user that has elevated permissions master.with_pt_checksum_user do |username, password| # Build command line command_line = ['pt-table-checksum', '--no-check-replication-filters', "--databases #{schema}", "--host #{master.ip}", "--port #{master.port}", "--max-load Threads_running:#{limit_threads_running}", "--replicate #{schema}.checksums", "--replicate-database #{schema}", "--user #{username}", "--password #{password}" ] command_line << '--nocheck-plan' if options[:no_check_plan] command_line << "--chunk-time #{options[:chunk_time]}" if options[:chunk_time] command_line << "--chunk-size-limit #{options[:chunk_size_limit]}" if options[:chunk_size_limit] command_line << ['--tables', options[:tables].join(',')].join(' ') if (options[:tables] && !options[:tables].empty?) command_line << '--resume' if previous_run command_line = command_line.join ' ' # Spawn the process Open3.popen3(command_line) do |stdin, stdout, stderr, wait_thread| exit_code = nil pid = wait_thread.pid puts "Running pt-table-checksum targetting #{master}, pid on Jetpants host is #{pid}" self.collins_checksum_running = { 'from_host' => Host.local.ip, 'from_pid' => pid, 'timestamp' => Time.now.to_i, }.to_json # Display STDERR output in real-time, via a separate thread Thread.new do begin stderr.each {|line| puts line} rescue IOError, Interrupt nil end end # Capture STDOUT and buffer it; since this is the main thread, also # watch out for broken pipe or ctrl-c begin stdout.each {|line| output_lines << line} exit_code = wait_thread.value.to_i rescue IOError, Interrupt => ex puts "Caught exception #{ex.message}" exit_code = 130 # by unix convention, return 128 + SIGINT end # Dump out stdout: first anything we buffered on our end, plus anything # that Perl or the OS had buffered on its end puts output_lines.each {|line| puts line} unless stdout.eof? stdout.each {|line| puts line} rescue nil end puts puts "Checksum completed with exit code #{exit_code}.\n" success = (exit_code == 0) # Run again with --replicate-check-only to display ALL diffs, including ones from # prior runs of the tool. puts 'Verifying all results via --replicate-check-only...' output, diff_success = `#{command_line} --replicate-check-only`, $?.success? if diff_success puts 'No diffs found in any tables.' puts output else puts 'Found diffs:' puts output success = false end # Drop the checksums table, but only if there were no diffs if success output "Dropping table #{schema}.checksums..." master.connect(user: username, pass: password) master.query('DROP TABLE checksums') output "Table dropped." master.disconnect self.collins_checksum_running = '' else output 'Keeping checksums table in place for your review.' output 'Please manually drop it when done.' end puts end # popen3 end # with_pt_checksum_user success end
Runs pt-table-checksum on the pool. Returns true if no problems found, false otherwise. If problems were found, the 'checksums' table will be left in the pool - the user must review and manually delete.
checksum_tables
ruby
tumblr/jetpants
plugins/upgrade_helper/pool.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/pool.rb
Apache-2.0
def compare_queries(slowlog_path, silent_run_first, *compare_nodes) if compare_nodes.size == 0 compare_nodes = standby_slaves else compare_nodes.flatten! raise "Supplied nodes must all be in this pool" unless compare_nodes.all? {|n| n == master || n.master == master} end pt_upgrade_version = `pt-upgrade --version`.to_s.split(' ').last.chomp rescue '0.0.0' raise "pt-upgrade executable is not available on the host" unless $?.exitstatus == 1 # We need to create a temporary SUPER user on the nodes to compare # Also attempt to silence warning 1592 about unsafe-for-replication statements if # using Percona Server 5.5.10+ which supports this. username = 'pt-upgrade' password = DB.random_password remove_suppress_1592 = [] compare_nodes.each do |node| node.create_user username, password node.grant_privileges username, '*', 'SUPER' node.grant_privileges username, node.app_schema, 'ALL PRIVILEGES' if pt_upgrade_version.to_f >= 2.2 node.mysql_root_cmd "SET SESSION sql_log_bin = 0; CREATE DATABASE IF NOT EXISTS percona_schema;" node.grant_privileges username, 'percona_schema', 'ALL PRIVILEGES' node.mysql_root_cmd "SET SESSION sql_log_bin = 0; USE percona_schema;CREATE TABLE IF NOT EXISTS pt_upgrade ( id INT NOT NULL PRIMARY KEY );" end # We only want to try this if (a) the node supports log_warnings_suppress, # and (b) the node isn't already suppressing warning 1592 if node.global_variables[:log_warnings_suppress] == '' node.mysql_root_cmd "SET GLOBAL log_warnings_suppress = '1592'" remove_suppress_1592 << node end end node_text = compare_nodes.map {|s| s.to_s + ' (v' + s.normalized_version(3) + ')'}.join ' vs ' dsn_text = compare_nodes.map {|n| "h=#{n.ip},P=#{n.port},u=#{username},p=#{password},D=#{n.app_schema}"}.join ' ' display_buffer_pool_hit_rate(compare_nodes) # Do silent run if requested (to populate buffer pools) if silent_run_first output "Doing a silent run of pt-upgrade with slowlog #{slowlog_path} to populate buffer pool." output "Comparing nodes #{node_text}..." if pt_upgrade_version.to_f >= 2.2 stdout, exit_code = `pt-upgrade --set-vars wait_timeout=10000 #{slowlog_path} #{dsn_text} 2>&1`, $?.to_i else stdout, exit_code = `pt-upgrade --report=hosts,stats --set-vars wait_timeout=10000 --compare query_times,results #{slowlog_path} #{dsn_text} 2>&1`, $?.to_i end output "pt-upgrade silent run completed with exit code #{exit_code}" puts puts end display_buffer_pool_hit_rate(compare_nodes) # Run pt-upgrade for real. Note that we only compare query times and results, NOT warnings, # due to issues with warning 1592 causing a huge amount of difficult-to-parse output. output "Running pt-upgrade with slowlog #{slowlog_path}" output "Comparing nodes #{node_text}..." if pt_upgrade_version.to_f >= 2.2 stdout, exit_code = `pt-upgrade --set-vars wait_timeout=10000 #{slowlog_path} #{dsn_text} 2>&1`, $?.to_i else stdout, exit_code = `pt-upgrade --report=hosts,stats --set-vars wait_timeout=10000 --compare query_times,results #{slowlog_path} #{dsn_text} 2>&1`, $?.to_i end output stdout puts display_buffer_pool_hit_rate(compare_nodes) output "pt-upgrade completed with exit code #{exit_code}" # Drop the SUPER user and re-enable logging of warning 1592 compare_nodes.each do |node| node.mysql_root_cmd "SET SESSION sql_log_bin = 0; DROP DATABASE IF EXISTS percona_schema;" if pt_upgrade_version.to_f >= 2.2 node.drop_user username end remove_suppress_1592.each {|node| node.mysql_root_cmd "SET GLOBAL log_warnings_suppress = ''"} end
Uses pt-upgrade to compare query performance and resultsets among nodes in a pool. Supply params: * a full path to a slowlog file * a boolean indicating whether or not you want to do an initial silent run (results discarded) to populate the buffer pools on the nodes * Two or more nodes, or no nodes if you want to default to using the pool's standby slaves Requires that pt-upgrade is in root's PATH on the node running Jetpants.
compare_queries
ruby
tumblr/jetpants
plugins/upgrade_helper/pool.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/pool.rb
Apache-2.0
def branched_upgrade_prep(upgrade_shard_master_ip) raise "Shard #{self} in wrong state to perform this action! expected :ready, found #{@state}" unless @state == :ready raise "Not enough standby slaves of this shard!" unless standby_slaves.size >= slaves_layout[:standby_slave] source = slave_for_clone spares_needed = {'standby' => slaves_layout[:standby_slave] + 1, 'backup' => slaves_layout[:backup_slave]} # Array to hold all the target nodes targets = [] unless upgrade_shard_master_ip.empty? upgrade_shard_master = upgrade_shard_master_ip.to_db upgrade_shard_master.claim! if upgrade_shard_master.is_spare? spares_needed['standby'] = spares_needed['standby'] - 1 like_node = upgrade_shard_master targets << upgrade_shard_master else like_node = source end spares_needed.each do |role, needed| next if needed == 0 available = Jetpants.topology.count_spares(role: "#{role}_slave".to_sym, like: like_node, version: Plugin::UpgradeHelper.new_version) raise "Not enough spare machines with role of #{role} slave! Requested #{needed} but only have #{available} available." if needed > available end spares_needed.each do |role, needed| next if needed == 0 targets.concat Jetpants.topology.claim_spares(needed, role: "#{role}_slave".to_sym, like: like_node, version: Plugin::UpgradeHelper.new_version) end # Disable fast shutdown on the source source.mysql_root_cmd 'SET GLOBAL innodb_fast_shutdown = 0' # Flag the nodes as needing upgrade, which will get triggered when # enslave_siblings restarts them targets.each {|t| t.needs_upgrade = true} # Remove ib_lru_dump if present on targets targets.concurrent_each {|t| t.ssh_cmd "rm -rf #{t.mysql_directory}/ib_lru_dump"} source.enslave_siblings!(targets) targets.concurrent_each {|t| t.resume_replication; t.catch_up_to_master} source.pool.sync_configuration # Make the 1st new slave be the "future master" which the other new # slaves will replicate from future_master = targets.shift future_master.pause_replication_with *targets targets.concurrent_each do |slave| slave.change_master_to future_master slave.resume_replication slave.catch_up_to_master end future_master.resume_replication future_master.catch_up_to_master end
Builds a set of upgraded slaves, and then makes one of the new slaves become the master for the other new slaves
branched_upgrade_prep
ruby
tumblr/jetpants
plugins/upgrade_helper/shard.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/shard.rb
Apache-2.0
def branched_upgrade_move_reads raise "Shard #{self} in wrong state to perform this action! expected :ready, found #{@state}" unless @state == :ready future_master = nil slaves.each do |s| future_master = s if s.version_cmp(@master) == 1 && s.slaves.size == slaves_layout[:standby_slave] + slaves_layout[:backup_slave] end raise "Shard #{self} does not have correct hierarchical replication setup to proceed" unless future_master @master = future_master @state = :child sync_configuration end
Hack the pool configuration to send reads to the new master, but still send writes to the old master (they'll replicate over)
branched_upgrade_move_reads
ruby
tumblr/jetpants
plugins/upgrade_helper/shard.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/shard.rb
Apache-2.0
def branched_upgrade_move_writes raise "Shard #{self} in wrong state to perform this action! expected :child, found #{@state}" unless @state == :child @master.disable_read_only! @state = :needs_cleanup sync_configuration end
Move writes over to the new master
branched_upgrade_move_writes
ruby
tumblr/jetpants
plugins/upgrade_helper/shard.rb
https://github.com/tumblr/jetpants/blob/master/plugins/upgrade_helper/shard.rb
Apache-2.0
def initialize(arguments = {}, &block) @verified = false @warned = false @opaque_id_prefix = arguments[:opaque_id_prefix] || nil api_key(arguments) if arguments[:api_key] setup_cloud(arguments) if arguments[:cloud_id] set_user_agent!(arguments) unless sent_user_agent?(arguments) set_content_type!(arguments) @transport = Elastic::Transport::Client.new(arguments, &block) end
Create a client connected to an Elasticsearch cluster. @param [Hash] arguments - initializer arguments @option arguments [String] :cloud_id - The Cloud ID to connect to Elastic Cloud @option arguments [String, Hash] :api_key Use API Key Authentication, either the base64 encoding of `id` and `api_key` joined by a colon as a String, or a hash with the `id` and `api_key` values. @option arguments [String] :opaque_id_prefix set a prefix for X-Opaque-Id when initializing the client. This will be prepended to the id you set before each request if you're using X-Opaque-Id @option arguments [Hash] :headers Custom HTTP Request Headers
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch.rb
Apache-2.0
def encode(api_key) credentials = [api_key[:id], api_key[:api_key]].join(':') [credentials].pack('m0') end
Encode credentials for the Authorization Header Credentials is the base64 encoding of id and api_key joined by a colon @see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html
encode
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch.rb
Apache-2.0
def initialize(client, index, params = {}) @client = client @index = index @params = params end
Create a BulkHelper @param [Elasticsearch::Client] client Instance of Elasticsearch client to use. @param [String] index Index on which to perform the Bulk actions. @param [Hash] params Parameters to re-use in every bulk call
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
Apache-2.0
def ingest(docs, params = {}, body = {}, &block) ingest_docs = docs.map { |doc| { index: { _index: @index, data: doc} } } if (slice = params.delete(:slice)) ingest_docs.each_slice(slice) do |items| ingest(items.map { |item| item[:index][:data] }, params, &block) end else bulk_request(ingest_docs, params, &block) end end
Index documents using the Bulk API. @param [Array<Hash>] docs The documents to be indexed. @param [Hash] params Parameters to use in the bulk ingestion. See the official Elastic documentation for Bulk API for parameters to send to the Bulk API. @option params [Integer] slice number of documents to send to the Bulk API for eatch batch of ingestion. @param block [Block] Optional block to run after ingesting a batch of documents. @yieldparam response [Elasticsearch::Transport::Response] The response object from calling the Bulk API. @yieldparam ingest_docs [Array<Hash>] The collection of documents sent in the bulk request.
ingest
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
Apache-2.0
def delete(ids, params = {}, body = {}) delete_docs = ids.map { |id| { delete: { _index: @index, _id: id} } } @client.bulk({ body: delete_docs }.merge(params.merge(@params))) end
Delete documents using the Bulk API @param [Array] ids Array of id's for documents to delete. @param [Hash] params Parameters to send to bulk delete.
delete
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
Apache-2.0
def update(docs, params = {}, body = {}, &block) ingest_docs = docs.map do |doc| { update: { _index: @index, _id: doc.delete('id'), data: { doc: doc } } } end if (slice = params.delete(:slice)) ingest_docs.each_slice(slice) { |items| update(items, params, &block) } else bulk_request(ingest_docs, params, &block) end end
Update documents using the Bulk API @param [Array<Hash>] docs (Required) The documents to be updated. @option params [Integer] slice number of documents to send to the Bulk API for eatch batch of updates. @param block [Block] Optional block to run after ingesting a batch of documents. @yieldparam response [Elasticsearch::Transport::Response] The response object from calling the Bulk API. @yieldparam ingest_docs [Array<Hash>] The collection of documents sent in the bulk request.
update
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
Apache-2.0
def ingest_json(file, params = {}, &block) data = JSON.parse(File.read(file)) if (keys = params.delete(:keys)) keys = keys.split(',') if keys.is_a?(String) data = data.dig(*keys) end ingest(data, params, &block) end
Ingest data directly from a JSON file @param [String] file (Required) The file path. @param [Hash] params Parameters to use in the bulk ingestion. @option params [Integer] slice number of documents to send to the Bulk API for eatch batch of updates. @option params [Array|String] keys If the data needs to be digged from the JSON file, the keys can be passed in with this parameter to find it. E.g.: If the data in the parsed JSON Hash is found in +json_parsed['data']['items']+, keys would be passed like this (as an Array): +bulk_helper.ingest_json(file, { keys: ['data', 'items'] })+ or as a String: +bulk_helper.ingest_json(file, { keys: 'data, items' })+ @yieldparam response [Elasticsearch::Transport::Response] The response object from calling the Bulk API. @yieldparam ingest_docs [Array<Hash>] The collection of documents sent in the bulk request.
ingest_json
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/bulk_helper.rb
Apache-2.0
def initialize(client, index, body, scroll = '1m') @index = index @client = client @scroll = scroll @body = body end
Create a ScrollHelper @param [Elasticsearch::Client] client (Required) Instance of Elasticsearch client to use. @param [String] index (Required) Index on which to perform the Bulk actions. @param [Hash] body Body parameters to re-use in every scroll request @param [Time] scroll Specify how long a consistent view of the index should be maintained for scrolled search
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
Apache-2.0
def each(&block) until (docs = results).empty? docs.each(&block) end clear end
Implementation of +each+ for Enumerable module inclusion @yieldparam document [Hash] yields a document found in the search hits.
each
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
Apache-2.0
def results if @scroll_id scroll_request else initial_search end rescue StandardError => e raise e end
Results from a scroll. Can be called repeatedly (e.g. in a loop) to get the scroll pages.
results
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
Apache-2.0
def clear @client.clear_scroll(body: { scroll_id: @scroll_id }) if @scroll_id @scroll_id = nil end
Clear Scroll and resets inner documents collection
clear
ruby
elastic/elasticsearch-ruby
elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch/lib/elasticsearch/helpers/scroll_helper.rb
Apache-2.0
def initialize(file_name, client, features_to_skip = []) @name = file_name @client = client begin documents = YAML.load_stream(File.new(file_name)) rescue Psych::SyntaxError => e raise e unless e.message.include?('found unexpected \':\'') message = "Exception found when parsing YAML in #{file_name}: #{e.message}" logger.error message raise SkipTestsException, message rescue StandardError => e logger.error e logger.error "Filename : #{@name}" end @test_definitions = documents.reject { |doc| doc['setup'] || doc['teardown'] } @setup = documents.find { |doc| doc['setup'] } skip_entire_test_file?(file_name) if @setup @teardown = documents.find { |doc| doc['teardown'] } @features_to_skip = REST_API_YAML_SKIP_FEATURES + features_to_skip end
Initialize a single test file. @example Create a test file object. TestFile.new(file_name) @param [ String ] file_name The name of the test file. @param [ Client] An instance of the client @param [ Array<Symbol> ] skip_features The names of features to skip. @since 6.1.0
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file.rb
Apache-2.0
def tests @test_definitions.collect do |test_definition| Test.new(self, test_definition) end end
Get a list of tests in the test file. @example Get the list of tests test_file.tests @return [ Array<Test> ] A list of Test objects. @since 6.2.0
tests
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file.rb
Apache-2.0
def setup return unless @setup actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) } run_actions_and_retry(actions) self end
Run the setup tasks defined for a single test file. @example Run the setup tasks. test_file.setup @param [ Elasticsearch::Client ] client The client to use to perform the setup tasks. @return [ self ] @since 6.2.0
setup
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file.rb
Apache-2.0
def teardown return unless @teardown actions = @teardown['teardown'].select { |action| action['do'] }.map { |action| Action.new(action['do']) } run_actions_and_retry(actions) self end
Run the teardown tasks defined for a single test file. @example Run the teardown tasks. test_file.teardown @param [ Elasticsearch::Client ] client The client to use to perform the teardown tasks. @return [ self ] @since 6.2.0
teardown
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file.rb
Apache-2.0
def run_actions_and_retry(actions) count = 0 loop do actions.delete_if do |action| begin action.execute(client) true rescue Elastic::Transport::Transport::Errors::RequestTimeout, Net::ReadTimeout, # TODO: Replace this if we change adapters Elastic::Transport::Transport::Errors::ServiceUnavailable => e # The action sometimes gets the cluster in a recovering state, so we # retry a few times and then raise an exception if it's still # happening count += 1 sleep 10 logger.debug( "The server responded with an #{e.class} error. Retrying action - (#{count})" ) raise e if count > 11 false end end break if actions.empty? end end
Helper function to run actions. If the server returns an error, give it some time and retry for a few times.
run_actions_and_retry
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file.rb
Apache-2.0
def initialize(definition) @definition = definition @retries = 0 end
Initialize an Action object. @example Create an action object: Action.new("xpack.watcher.get_watch" => { "id" => "my_watch" }) @param [ Hash ] definition The action definition. @since 6.2.0
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/action.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/action.rb
Apache-2.0
def execute(client, test = nil) @definition.each.inject(client) do |client, (method_chain, args)| if method_chain.match?('_internal') perform_internal(method_chain, args, client, test) else chain = method_chain.split('.') # If we have a method nested in a namespace, client becomes the # client/namespace. Eg for `indices.resolve_index`, `client = # client.indices` and then we call `resolve_index` on `client`. if chain.size > 1 client = chain[0...-1].inject(client) do |shadow_client, method| shadow_client.send(method) end end _method = chain[-1] perform_action(_method, args, client, test) end end end
Execute the action. The method returns the client, in case the action created a new client with header settings. @example Execute the action. action.execute(client, test) @param [ Elasticsearch::Client ] client The client to use to execute the action. @param [ Test ] test The test containing this action. Necessary for caching variables. @return [ Elasticsearch::Client ] The client. It will be a new one, not the one passed in, if the action is to set headers. @since 6.2.0
execute
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/action.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/action.rb
Apache-2.0
def perform_internal(method, args, client, test) es_version = test.cached_values['es_version'] unless test.nil? case method when '_internal.update_desired_nodes' http = 'PUT' if (history_id = args.delete('history_id')).match?(/\s+/) require 'erb' history_id = ERB::Util.url_encode(history_id) end path = "/_internal/desired_nodes/#{history_id}/#{args.delete('version')}" body = args.delete('body') # Replace $es_version with actual value: body['nodes'].map do |node| node['node_version']&.gsub!('$es_version', es_version) if node['node_version'] && es_version end if body['nodes'] when '_internal.delete_desired_nodes' http = 'DELETE' path = '/_internal/desired_nodes/' body = args.delete('body') when /_internal\.get_([a-z_]+)/ http = 'GET' path = case Regexp.last_match(1) when 'desired_nodes' '/_internal/desired_nodes/_latest' when 'desired_balance' '/_internal/desired_balance' end body = nil when '_internal.health' path = if args['feature'] "_internal/_health/#{args.delete('feature')}/" else '_internal/_health' end http = 'GET' body = args.delete('body') when '_internal.prevalidate_node_removal' path = '/_internal/prevalidate_node_removal' http = 'POST' body = args.delete('body') end args = prepare_arguments(args, test) @response = Elasticsearch::API::Response.new(client.perform_request(http, path, args, body)) client end
Executes operations not implemented by elasticsearch-api, such as _internal
perform_internal
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/action.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/action.rb
Apache-2.0
def initialize(test) @actions = [] @exception = nil @response = nil @variables = {} @test = test end
Initialize a TaskGroup object. @example Create a TaskGroup TaskGroup.new(test) @param [ Test ] test The test this task group is part of. @since 6.2.0
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def add_action(action) @actions << action if ACTIONS.any? { |a| action[a] } self end
Add an action to the task group definition. @example Add an action task_group.add_action(action) @param [ Hash ] action The hash representation of an action. @return [ self ] @since 6.2.0
add_action
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def run(client) # Allow the actions to be execute only once. return if @executed @executed = true do_actions.inject(client) do |_client, action| action.execute(_client, test) # Cache the result of the action, if a set action is defined. set_variable(action) transform_and_set_variable(action) _client end self rescue => ex raise ex unless catch_exception? # Cache the exception raised as a result of the operation, if the task group has a 'catch' defined. @exception = ex end
Run the actions in the task group. @example Run the actions task_group.run(client) @param [ Elasticsearch::Client ] client The client to use to run the actions. @return [ self ] @since 6.2.0
run
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def response @response ||= begin if do_actions.any? { |a| a.yaml_response? } YAML.load(do_actions[-1].response.body) else do_actions[-1].response end end end
Consider the response of interest the one resulting from the last action. @return [ Hash ] The response from the last action. @since 6.2.0
response
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def expected_exception_message @expected_exception_message ||= begin if do_definitions = @actions.group_by { |a| a.keys.first }['do'] if catch_exception = do_definitions.find { |a| a['do']['catch'] } catch_exception['do']['catch'] end end end end
The expected exception message. @return [ String ] The expected exception message. @since 6.2.0
expected_exception_message
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def match_clauses @match_actions ||= @actions.group_by { |a| a.keys.first }['match'] end
The match clauses. @return [ Array<Hash> ] The match clauses. @since 6.2.0
match_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def true_clauses @true_clauses ||= @actions.group_by { |a| a.keys.first }['is_true'] end
The true match clauses. @return [ Array<Hash> ] The true match clauses. @since 6.2.0
true_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def false_clauses @false_clauses ||= @actions.group_by { |a| a.keys.first }['is_false'] end
The false match clauses. @return [ Array<Hash> ] The false match clauses. @since 6.2.0
false_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def gte_clauses @gte_clauses ||= @actions.group_by { |a| a.keys.first }['gte'] end
The gte clauses. @return [ Array<Hash> ] The gte clauses. @since 6.2.0
gte_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def gt_clauses @gt_clauses ||= @actions.group_by { |a| a.keys.first }['gt'] end
The gt clauses. @return [ Array<Hash> ] The gt clauses. @since 6.2.0
gt_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def lte_clauses @lte_clauses ||= @actions.group_by { |a| a.keys.first }['lte'] end
The lte clauses. @return [ Array<Hash> ] The lte clauses. @since 6.2.0
lte_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def lt_clauses @lt_clauses ||= @actions.group_by { |a| a.keys.first }['lt'] end
The lt clauses. @return [ Array<Hash> ] The lt clauses. @since 6.2.0
lt_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def length_match_clauses @match_length ||= @actions.group_by { |a| a.keys.first }['length'] end
The field length match clauses. @return [ Array<Hash> ] The field length match clauses. @since 6.2.0
length_match_clauses
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/task_group.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/task_group.rb
Apache-2.0
def find_value_in_document(chain, document) return document[chain] unless chain.is_a?(Array) return document[chain[0]] unless chain.size > 1 # a number can be a string key in a Hash or indicate an element in a list if document.is_a?(Hash) find_value_in_document(chain[1..-1], document[chain[0].to_s]) if document[chain[0].to_s] elsif document[chain[0]] find_value_in_document(chain[1..-1], document[chain[0]]) if document[chain[0]] end end
Given a list of keys, find the value in a recursively nested document. @param [ Array<String> ] chain The list of nested document keys. @param [ Hash ] document The document to find the value in. @return [ Object ] The value at the nested key. @since 6.2.0
find_value_in_document
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def split_and_parse_key(key) key.split(/(?<!\\)\./).reject(&:empty?).map do |key_part| case key_part when /^\.\$/ # For keys in the form of .$key key_part.gsub(/^\./, '') when /\A[-+]?[0-9]+\z/ key_part.to_i else key_part.gsub('\\', '') end end.reject { |k| k == '$body' } end
Given a string representing a nested document key using dot notation, split it, keeping escaped dots as part of a key name and replacing numerics with a Ruby Integer. For example: "joe.metadata.2.key2" => ['joe', 'metadata', 2, 'key2'] "jobs.0.node.attributes.ml\\.enabled" => ["jobs", 0, "node", "attributes", "ml\\.enabled"] @param [ String ] chain The list of nested document keys. @param [ Hash ] document The document to find the value in. @return [ Array<Object> ] A list of the nested keys. @since 6.2.0
split_and_parse_key
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def initialize(test_file, test_definition) @test_file = test_file @file_basename = test_file.name.gsub("#{YAML_FILES_DIRECTORY}/", '') @description = test_definition.keys.first @definition = test_definition[description].select { |doc| !doc.key?('skip') } @definition.delete_if { |doc| doc['skip'] } @cached_values = {} skip_definitions = test_definition[description].select { |doc| doc['skip'] }.compact @skip = skip_definitions unless skip_definitions.empty? end
Initialize the Test object. @example Create a test object Test.new(file, definition) @param [ String ] test_file The name of the test file. @param [ Hash ] test_definition A hash corresponding to the parsed YAML containing the test definition. @since 6.2.0
initialize
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def task_groups @task_groups ||= begin @definition.each_with_index.inject([]) do |task_groups, (action, i)| # the action has a catch, it's a singular task group if action['do'] && action['do']['catch'] task_groups << TaskGroup.new(self) elsif action['do'] && i > 0 && is_a_validation?(@definition[i-1]) task_groups << TaskGroup.new(self) elsif i == 0 task_groups << TaskGroup.new(self) end task_groups[-1].add_action(action) && task_groups end end end
Get the list of task groups in this test. @example test.task_groups @return [ Array<TaskGroup> ] The list of task groups. @since 6.2.0
task_groups
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def cache_value(cache_key, value) @cached_values[cache_key] = value @cached_values end
Cache a value on this test object. @example test.cache_value(cache_key, value) @param [ String ] cache_key The cache key for the value. @param [ Object ] value The value to cache. @return [ Hash ] The cached values. @since 6.2.0
cache_value
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def get_cached_value(key) case key when String key =~ /^\$/ ? @cached_values.fetch(key.gsub(/[\$\{\}]/, ''), key) : key when Hash key.inject({}) do |hash, (k, v)| k = k.to_s if [Float, Integer].include? k.class if v.is_a?(String) hash.merge(@cached_values.fetch(k.gsub(/[\$\{\}]/, ''), k) => @cached_values.fetch(v.gsub(/[\$\{\}]/, ''), v)) else hash.merge(@cached_values.fetch(k.gsub(/[\$\{\}]/, ''), k) => v) end end when Array key.collect do |k| k.is_a?(String) ? @cached_values.fetch(k.gsub(/[\$\{\}]/, ''), k) : k end else key end end
Get a cached value. @example test.get_cached_value('$watch_count_active') @param [ String ] key The key of the cached value. @return [ Hash ] The cached value at the key or the key if it's not found. @since 6.2.0
get_cached_value
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def run(client) task_groups.each { |task_group| task_group.run(client) } self end
Run all the tasks in this test. @example test.run(client) @param [ Elasticsearch::Client ] client The client to use when executing operations. @return [ self ] @since 6.2.0
run
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def skip_test?(client, features_to_skip = test_file.features_to_skip) return true if pre_defined_skip? if @skip @skip.collect { |s| s['skip'] }.any? do |skip_definition| contains_features_to_skip?(features_to_skip, skip_definition) || test_file.skip_version?(client, skip_definition) end end end
Determine whether this test should be skipped, given a list of unsupported features. @example test.skip_test?(['warnings']) @param [ Array<String> ] features_to_skip A list of the features to skip. @return [ true, false ] Whether this test should be skipped, given a list of unsupported features. @since 6.2.0
skip_test?
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def inject_master_node_id(expected_key) if cached_values['master'] expected_key.gsub(/\$master/, cached_values['master']) else expected_key end end
Replace the `$master` substring in a key with the cached master node's id. @param [ String ] expected_key The expected key, containing the substring `$master` that needs to be replaced. See test xpack/10_basic.yml @return [ String ] The altered key. @since 7.2.0
inject_master_node_id
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def skip_version?(client, skip_definition) return true if skip_definition['version'] == 'all' range_partition = /\s*-\s*/ if versions = skip_definition['version'] && skip_definition['version'].partition(range_partition) low, high = __parse_versions(versions) range = low..high begin server_version = client.info['version']['number'] rescue warn('Could not determine Elasticsearch version when checking if test should be skipped.') end range.cover?(server_version) end end
Given the server version and the skip definition version, returns if a test should be skipped. It will return true if the server version is contained in the range of the test's skip definition version.
skip_version?
ruby
elastic/elasticsearch-ruby
elasticsearch-api/api-spec-testing/test_file/test.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/api-spec-testing/test_file/test.rb
Apache-2.0
def escape(string) return string if string == '*' ERB::Util.url_encode(string.to_s) end
URL-escape a string @example escape('foo/bar') # => 'foo%2Fbar' escape('bar^bam') # => 'bar%5Ebam' @api private
escape
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/utils.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/utils.rb
Apache-2.0
def listify(*list) options = list.last.is_a?(Hash) ? list.pop : {} escape = options[:escape] Array(list) .flat_map { |e| e.respond_to?(:split) ? e.split(',') : e } .flatten .compact .map { |e| escape == false ? e : escape(e) } .join(',') end
Create a "list" of values from arguments, ignoring nil values and encoding special characters. @example Create a list from array listify(['A','B']) # => 'A,B' @example Create a list from arguments listify('A','B') # => 'A,B' @example Escape values listify('foo','bar^bam') # => 'foo,bar%5Ebam' @example Do not escape the values listify('foo','bar^bam', escape: false) # => 'foo,bar^bam' @api private
listify
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/utils.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/utils.rb
Apache-2.0
def pathify(*segments) Array(segments) .flatten .compact .reject { |s| s.to_s.strip.empty? } .join('/') .squeeze('/') end
Create a path (URL part) from arguments, ignoring nil values and empty strings. @example Create a path from array pathify(['foo', '', nil, 'bar']) # => 'foo/bar' @example Create a path from arguments pathify('foo', '', nil, 'bar') # => 'foo/bar' # @example Encode special characters pathify(['foo', 'bar^bam']) # => 'foo/bar%5Ebam' @api private
pathify
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/utils.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/utils.rb
Apache-2.0
def bulkify(payload) operations = %w[index create delete update] case # Hashes with `:data` when payload.any? { |d| d.is_a?(Hash) && d.values.first.is_a?(Hash) && operations.include?(d.keys.first.to_s) && (d.values.first[:data] || d.values.first['data']) } payload = payload. inject([]) do |sum, item| operation, meta = item.to_a.first meta = meta.clone data = meta.delete(:data) || meta.delete('data') sum << { operation => meta } sum << data if data sum end. map { |item| Elasticsearch::API.serializer.dump(item) } payload << '' unless payload.empty? # Array of strings when payload.all? { |d| d.is_a? String } payload << '' # Header/Data pairs else payload = payload.map { |item| Elasticsearch::API.serializer.dump(item) } payload << '' end payload = payload.join("\n") end
Convert an array of payloads into Elasticsearch `header\ndata` format Supports various different formats of the payload: Array of Strings, Header/Data pairs, or the conveniency "combined" format where data is passed along with the header in a single item. Elasticsearch::API::Utils.bulkify [ { :index => { :_index => 'myindexA', :_type => 'mytype', :_id => '1', :data => { :title => 'Test' } } }, { :update => { :_index => 'myindexB', :_type => 'mytype', :_id => '2', :data => { :doc => { :title => 'Update' } } } } ] # => {"index":{"_index":"myindexA","_type":"mytype","_id":"1"}} # => {"title":"Test"} # => {"update":{"_index":"myindexB","_type":"mytype","_id":"2"}} # => {"doc":{"title":"Update"}}
bulkify
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/utils.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/utils.rb
Apache-2.0
def extract_parts(arguments, _valid_parts = []) Hash[arguments].reduce([]) { |sum, item| k, v = item; v.is_a?(TrueClass) ? sum << k.to_s : sum << v } end
Extracts the valid parts of the URL from the arguments @note Mutates the `arguments` argument, to prevent failures in `__validate_and_extract_params`. @param arguments [Hash] Hash of arguments to verify and extract, **with symbolized keys** @param valid_parts [Array<Symbol>] An array of symbol with valid keys @return [Array<String>] Valid parts of the URL as an array of strings @example Extract parts extract_parts { :foo => true }, [:foo, :bar] # => [:foo] @api private
extract_parts
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/utils.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/utils.rb
Apache-2.0
def rescue_from_not_found(&block) yield rescue StandardError => e if e.class.to_s =~ /NotFound/ || e.message =~ /Not\s*Found/i false else raise e end end
Calls the given block, rescuing from `StandardError`. Primary use case is the `:ignore` parameter for API calls. Returns `false` if exception contains NotFound in its class name or message, else re-raises the exception. @yield [block] A block of code to be executed with exception handling. @api private
rescue_from_not_found
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/utils.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/utils.rb
Apache-2.0
def termvector(arguments = {}) warn '[DEPRECATION] `termvector` is deprecated. Please use the plural version, `termvectors` instead.' termvectors(arguments.merge(endpoint: '_termvector')) end
Deprecated: Use the plural version, {#termvectors}
termvector
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/actions/termvectors.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/actions/termvectors.rb
Apache-2.0
def help(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'cat.help' } arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil method = Elasticsearch::API::HTTP_GET path = '_cat' params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Get CAT help. Get help for the CAT APIs. @option arguments [Hash] :headers Custom HTTP headers @see https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat
help
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/actions/cat/help.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/actions/cat/help.rb
Apache-2.0
def get_node_stats(arguments = {}) request_opts = { endpoint: arguments[:endpoint] || 'transform.get_node_stats' } arguments = arguments.clone headers = arguments.delete(:headers) || {} body = nil method = Elasticsearch::API::HTTP_GET path = '_transform/_node_stats' params = {} Elasticsearch::API::Response.new( perform_request(method, path, params, body, headers, request_opts) ) end
Retrieves transform usage information for transform nodes. @option arguments [Hash] :headers Custom HTTP headers @see https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html
get_node_stats
ruby
elastic/elasticsearch-ruby
elasticsearch-api/lib/elasticsearch/api/actions/transform/get_node_stats.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/lib/elasticsearch/api/actions/transform/get_node_stats.rb
Apache-2.0
def specific_params super(@module_namespace.first, @method_name) end
Function that adds the listified h param code
specific_params
ruby
elastic/elasticsearch-ruby
elasticsearch-api/utils/thor/endpoint_spec.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/utils/thor/endpoint_spec.rb
Apache-2.0
def parse_required_parts(spec) required = [] return required if @endpoint_name == 'tasks.get' required << 'body' if (spec['body'] && spec['body']['required']) # Get required variables from paths: req_variables = parse_path_variables.inject(:&) # find intersection required << req_variables unless req_variables.empty? required.flatten end
Find parts that are definitely required and should raise an error if they're not present
parse_required_parts
ruby
elastic/elasticsearch-ruby
elasticsearch-api/utils/thor/endpoint_spec.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/utils/thor/endpoint_spec.rb
Apache-2.0
def specific_params(namespace, method_name) params = [] if H_PARAMS.include?(method_name) && namespace == 'cat' if method_name == 'nodes' params << 'params[:h] = Utils.__listify(params[:h], escape: false) if params[:h]' else params << 'params[:h] = Utils.__listify(params[:h]) if params[:h]' end end params end
Function that adds the listified h param code
specific_params
ruby
elastic/elasticsearch-ruby
elasticsearch-api/utils/thor/endpoint_specifics.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/utils/thor/endpoint_specifics.rb
Apache-2.0
def files json_files = Dir.entries(SRC_PATH) json_files.reject do |file| File.extname(file) != '.json' || File.basename(file) == '_common.json' end.map { |file| "#{SRC_PATH}#{file}" } end
Only get JSON files and remove hidden files
files
ruby
elastic/elasticsearch-ruby
elasticsearch-api/utils/thor/generator/files_helper.rb
https://github.com/elastic/elasticsearch-ruby/blob/master/elasticsearch-api/utils/thor/generator/files_helper.rb
Apache-2.0
def load!(controllers_path:) controllers(controllers_path: controllers_path) end
# Initialize the autoloaders with a given controllers path @param [String] controllers_path The path to Gruf Controllers
load!
ruby
bigcommerce/gruf
lib/gruf/autoloaders.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/autoloaders.rb
MIT
def each yield controllers end
# Enumerate across the managed set of autoloaders
each
ruby
bigcommerce/gruf
lib/gruf/autoloaders.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/autoloaders.rb
MIT
def controllers(controllers_path: nil) controllers_mutex do @controllers ||= ::Gruf::Controllers::Autoloader.new(path: controllers_path || ::Gruf.controllers_path) end end
# Lazily instantiate and memoize the Gruf Controllers autoloader in a thread-safe manner @return [::Gruf::Controllers::Autoloader] rubocop:disable ThreadSafety/ClassInstanceVariable
controllers
ruby
bigcommerce/gruf
lib/gruf/autoloaders.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/autoloaders.rb
MIT
def controllers_mutex(&block) @controllers_mutex ||= begin require 'monitor' Monitor.new end @controllers_mutex.synchronize(&block) end
rubocop:enable ThreadSafety/ClassInstanceVariable # Handle mutations to the controllers autoloader in a thread-safe manner rubocop:disable ThreadSafety/ClassInstanceVariable
controllers_mutex
ruby
bigcommerce/gruf
lib/gruf/autoloaders.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/autoloaders.rb
MIT
def initialize(service:, options: {}, client_options: {}) @base_klass = service @service_klass = "#{base_klass}::Service".constantize @opts = options || {} @opts[:password] = @opts.fetch(:password, '').to_s @opts[:hostname] = @opts.fetch(:hostname, Gruf.default_client_host) @opts[:channel_credentials] = @opts.fetch(:channel_credentials, Gruf.default_channel_credentials) @error_factory = Gruf::Client::ErrorFactory.new client_options[:timeout] = parse_timeout(client_options[:timeout]) if client_options.key?(:timeout) client = "#{service}::Stub".constantize.new(@opts[:hostname], build_ssl_credentials, **client_options) super(client) end
# Initialize the client and setup the stub @param [Module] service The namespace of the client Stub that is desired to load @param [Hash] options A hash of options for the client @option options [String] :password The password for basic authentication for the service. @option options [String] :hostname The hostname of the service. Defaults to linkerd. @param [Hash] client_options A hash of options to pass to the gRPC client stub
initialize
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def call(request_method, params = {}, metadata = {}, opts = {}, &block) request_method = request_method.to_sym req = if params.respond_to?(:to_proto) || streaming_request?(request_method) params else request_object(request_method, params) end md = build_metadata(metadata) call_sig = call_signature(request_method) unless call_sig raise NotImplementedError, "The method #{request_method} has not been implemented in this service." end resp, operation = execute(call_sig, req, md, opts, &block) raise @error_factory.from_exception(resp.result) unless resp.success? Gruf::Response.new(operation: operation, message: resp.result, execution_time: resp.time) end
# Call the client's method with given params @param [String|Symbol] request_method The method that is being requested on the service @param [Hash] params (Optional) A hash of parameters that will be inserted into the gRPC request message that is required for the given above call @param [Hash] metadata (Optional) A hash of metadata key/values that are transported with the client request @param [Hash] opts (Optional) A hash of options to send to the gRPC request_response method @return [Gruf::Response] The response from the server @raise [Gruf::Client::Error|GRPC::BadStatus] If an error occurs, an exception will be raised according to the error type that was returned
call
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def execute(call_sig, req, metadata, opts = {}, &block) operation = nil result = Gruf::Timer.time do opts[:return_op] = true opts[:metadata] = metadata operation = send(call_sig, req, opts, &block) operation.execute end [result, operation] end
# Execute the given request to the service @param [Symbol] call_sig The call signature being executed @param [Object] req (Optional) The protobuf request message to send @param [Hash] metadata (Optional) A hash of metadata key/values that are transported with the client request @param [Hash] opts (Optional) A hash of options to send to the gRPC request_response method @return [Array<Gruf::Timer::Result, GRPC::ActiveCall::Operation>]
execute
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def request_object(request_method, params = {}) desc = rpc_desc(request_method) desc&.input ? desc.input.new(params) : nil end
# Get the appropriate protobuf request message for the given request method on the service being called @param [Symbol] request_method The method name being called on the remote service @param [Hash] params (Optional) A hash of parameters that will populate the request object @return [Class] The request object that corresponds to the method being called
request_object
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def call_signature(request_method) desc = rpc_desc(request_method) desc&.name ? desc.name.to_s.underscore.to_sym : nil end
# Properly find the appropriate call signature for the GRPC::GenericService given the request method name @return [Symbol]
call_signature
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def build_metadata(metadata = {}) unless opts[:password].empty? username = opts.fetch(:username, 'grpc').to_s username = username.empty? ? '' : "#{username}:" auth_string = Base64.encode64("#{username}#{opts[:password]}") metadata[:authorization] = "Basic #{auth_string}".tr("\n", '') end metadata end
# Build a sanitized, authenticated metadata hash for the given request @param [Hash] metadata A base metadata hash to build from @return [Hash] The compiled metadata hash that is ready to be transported over the wire
build_metadata
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def build_ssl_credentials return opts[:channel_credentials] if opts[:channel_credentials] cert = nil if opts[:ssl_certificate_file] cert = File.read(opts[:ssl_certificate_file]).to_s.strip elsif opts[:ssl_certificate] cert = opts[:ssl_certificate].to_s.strip end cert ? GRPC::Core::ChannelCredentials.new(cert) : :this_channel_is_insecure end
# Build the SSL/TLS credentials for the outbound gRPC request @return [Symbol|GRPC::Core::ChannelCredentials] The generated SSL credentials for the outbound gRPC request :nocov:
build_ssl_credentials
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def error_deserializer_class if Gruf.error_serializer Gruf.error_serializer.is_a?(Class) ? Gruf.error_serializer : Gruf.error_serializer.to_s.constantize else Gruf::Serializers::Errors::Json end end
:nocov: # Return the specified error deserializer class by the configuration @return [Class] The proper error deserializer class. Defaults to JSON.
error_deserializer_class
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def parse_timeout(timeout) if timeout.nil? GRPC::Core::TimeConsts::ZERO elsif timeout.is_a?(GRPC::Core::TimeSpec) timeout elsif timeout.is_a?(Numeric) # rubocop:disable Lint/DuplicateBranch timeout elsif timeout.respond_to?(:to_f) timeout.to_f else raise ArgumentError, 'timeout is not a valid value: does not respond to to_f' end end
# Handle various timeout values and prevent improper value setting @see GRPC::Core::TimeConsts#from_relative_time @param [mixed] timeout @return [Float] @return [GRPC::Core::TimeSpec]
parse_timeout
ruby
bigcommerce/gruf
lib/gruf/client.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/client.rb
MIT
def configure yield self end
# Yield self for ruby-style initialization @yields [Gruf::Configuration] The configuration object for gruf @return [Gruf::Configuration] The configuration object for gruf
configure
ruby
bigcommerce/gruf
lib/gruf/configuration.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/configuration.rb
MIT
def options opts = {} VALID_CONFIG_KEYS.each_key do |k| opts.merge!(k => send(k)) end opts end
# Return the current configuration options as a Hash @return [Hash] The configuration for gruf, represented as a Hash
options
ruby
bigcommerce/gruf
lib/gruf/configuration.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/configuration.rb
MIT
def reset VALID_CONFIG_KEYS.each do |k, v| send(:"#{k}=", v) end self.server_binding_url = "#{::ENV.fetch('GRPC_SERVER_HOST', '0.0.0.0')}:#{::ENV.fetch('GRPC_SERVER_PORT', 9_001)}" self.interceptors = ::Gruf::Interceptors::Registry.new self.hooks = ::Gruf::Hooks::Registry.new self.root_path = ::Rails.root.to_s.chomp('/') if defined?(::Rails) determine_loggers self.ssl_crt_file = "#{root_path}config/ssl/#{environment}.crt" self.ssl_key_file = "#{root_path}config/ssl/#{environment}.key" cp = ::ENV.fetch('GRUF_CONTROLLERS_PATH', 'app/rpc').to_s self.controllers_path = root_path.to_s.empty? ? cp : "#{root_path}/#{cp}" self.backtrace_on_error = ::ENV.fetch('GRPC_BACKTRACE_ON_ERROR', 0).to_i.positive? self.rpc_server_options = { max_waiting_requests: ::ENV.fetch('GRPC_SERVER_MAX_WAITING_REQUESTS', GRPC::RpcServer::DEFAULT_MAX_WAITING_REQUESTS).to_i, pool_size: ::ENV.fetch('GRPC_SERVER_POOL_SIZE', GRPC::RpcServer::DEFAULT_POOL_SIZE).to_i, pool_keep_alive: ::ENV.fetch('GRPC_SERVER_POOL_KEEP_ALIVE', GRPC::Pool::DEFAULT_KEEP_ALIVE).to_i, poll_period: ::ENV.fetch('GRPC_SERVER_POLL_PERIOD', GRPC::RpcServer::DEFAULT_POLL_PERIOD).to_i, connect_md_proc: nil, server_args: {} } self.use_default_interceptors = ::ENV.fetch('GRUF_USE_DEFAULT_INTERCEPTORS', 1).to_i.positive? if use_default_interceptors if defined?(::Rails) interceptors.use(::Gruf::Interceptors::Rails::Reloader, reloader: Rails.application.reloader) end interceptors.use(::Gruf::Interceptors::ActiveRecord::ConnectionReset) interceptors.use(::Gruf::Interceptors::Instrumentation::OutputMetadataTimer) end self.health_check_enabled = ::ENV.fetch('GRUF_HEALTH_CHECK_ENABLED', 0).to_i.positive? options end
# Set the default configuration onto the extended class @return [Hash] options The reset options hash
reset
ruby
bigcommerce/gruf
lib/gruf/configuration.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/configuration.rb
MIT
def environment if defined?(::Rails) ::Rails.env.to_s else ENV.fetch('RACK_ENV') { ENV.fetch('RAILS_ENV', 'development') }.to_s end end
# Automatically determine environment @return [String] The current Ruby environment
environment
ruby
bigcommerce/gruf
lib/gruf/configuration.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/configuration.rb
MIT
def initialize(args = {}) @field_errors = [] @metadata = {} args.each do |k, v| send(:"#{k}=", v) if respond_to?(k) end end
# Initialize the error, setting default values @param [Hash] args (Optional) An optional hash of arguments that will set fields on the error object
initialize
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def add_field_error(field_name, error_code, message = '') @field_errors << Errors::Field.new(field_name, error_code, message) end
# Add a field error to this error package @param [Symbol] field_name The field name for the error @param [Symbol] error_code The application error code for the error; e.g. :job_not_found @param [String] message The application error message for the error; e.g. "Job not found with ID 123"
add_field_error
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def set_debug_info(detail, stack_trace = []) @debug_info = Errors::DebugInfo.new(detail, stack_trace) end
# Set the debugging information for the error message @param [String] detail The detailed message generated by the exception @param [Array<String>] stack_trace An array of strings that represents the exception backtrace generated by the service
set_debug_info
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def serialize serializer = serializer_class.new(self) serializer.serialize.to_s end
# Serialize the error for transport @return [String] The serialized error message
serialize
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def attach_to_call(active_call) metadata[Gruf.error_metadata_key.to_sym] = serialize if Gruf.append_server_errors_to_trailing_metadata return self if metadata.empty? || !active_call || !active_call.respond_to?(:output_metadata) # Check if we've overflown the maximum size of output metadata. If so, # log a warning and replace the metadata with something smaller to avoid # resource exhausted errors. if metadata.inspect.size > MAX_METADATA_SIZE code = METADATA_SIZE_EXCEEDED_CODE msg = METADATA_SIZE_EXCEEDED_MSG logger.warn "#{code}: #{msg} Original error: #{to_h.inspect}" err = Gruf::Error.new(code: :internal, app_code: code, message: msg) return err.attach_to_call(active_call) end active_call.output_metadata.update(metadata) self end
# Update the trailing metadata on the given gRPC call, including the error payload if configured to do so. @param [GRPC::ActiveCall] active_call The marshalled gRPC call @return [Error] Return the error itself after updating metadata on the given gRPC call. In the case of a metadata overflow error, we replace the current error with a new one that won't cause a low-level http2 error.
attach_to_call
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def fail!(active_call) raise attach_to_call(active_call).grpc_error end
# Fail the current gRPC call with the given error, properly attaching it to the call and raising the appropriate gRPC BadStatus code. @param [GRPC::ActiveCall] active_call The marshalled gRPC call @return [GRPC::BadStatus] The gRPC BadStatus code this error is mapped to
fail!
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def to_h { code: code, app_code: app_code, message: message, field_errors: field_errors.map(&:to_h), debug_info: debug_info.to_h } end
# Return the error represented in Hash form @return [Hash] The error as a hash
to_h
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def grpc_error md = @metadata || {} @grpc_error = grpc_class.new(message, **md) end
# Return the appropriately mapped GRPC::BadStatus error object for this error @return [GRPC::BadStatus]
grpc_error
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def serializer_class if Gruf.error_serializer Gruf.error_serializer.is_a?(Class) ? Gruf.error_serializer : Gruf.error_serializer.to_s.constantize else Gruf::Serializers::Errors::Json end end
# Return the error serializer being used for gruf @return [Gruf::Serializers::Errors::Base]
serializer_class
ruby
bigcommerce/gruf
lib/gruf/error.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/error.rb
MIT
def initialize(pool_size: DEFAULT_POOL_SIZE, max_waiting_requests: DEFAULT_MAX_WAITING_REQUESTS, poll_period: DEFAULT_POLL_PERIOD, pool_keep_alive: Pool::DEFAULT_KEEP_ALIVE, connect_md_proc: nil, server_args: {}, interceptors: [], event_listener_proc: nil) # Call the base class initializer super( pool_size: pool_size, max_waiting_requests: max_waiting_requests, poll_period: poll_period, pool_keep_alive: pool_keep_alive, connect_md_proc: connect_md_proc, server_args: server_args, interceptors: interceptors ) # Save event listener for later @event_listener_proc = event_listener_proc end
# Add an event_listener_proc that, if supplied, will be called when interesting events happen in the server.
initialize
ruby
bigcommerce/gruf
lib/gruf/instrumentable_grpc_server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/instrumentable_grpc_server.rb
MIT
def notify(event) return if @event_listener_proc.nil? || !@event_listener_proc.respond_to?(:call) @event_listener_proc.call(event) end
# Notify the event listener of something interesting
notify
ruby
bigcommerce/gruf
lib/gruf/instrumentable_grpc_server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/instrumentable_grpc_server.rb
MIT
def available?(an_rpc) super.tap do |obj| notify(:thread_pool_exhausted) unless obj end end
# Hook into the thread pool availability check for monitoring
available?
ruby
bigcommerce/gruf
lib/gruf/instrumentable_grpc_server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/instrumentable_grpc_server.rb
MIT
def implemented?(an_rpc) super.tap do |obj| notify(:unimplemented) unless obj end end
# Hook into the method implementation check for monitoring
implemented?
ruby
bigcommerce/gruf
lib/gruf/instrumentable_grpc_server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/instrumentable_grpc_server.rb
MIT
def initialize(operation:, message:, execution_time: nil) @operation = operation @message = message @metadata = operation.metadata @trailing_metadata = operation.trailing_metadata @deadline = operation.deadline @cancelled = operation.cancelled? @execution_time = execution_time || 0.0 end
# Initialize a response object with the given gRPC operation @param [GRPC::ActiveCall::Operation] operation The given operation for the current call @param [StdClass] message @param [Float] execution_time The amount of time that the response took to occur
initialize
ruby
bigcommerce/gruf
lib/gruf/response.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/response.rb
MIT
def message @message ||= @operation.execute end
# Return the message returned by the request @return [Object] The protobuf response message
message
ruby
bigcommerce/gruf
lib/gruf/response.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/response.rb
MIT
def initialize(opts = {}) @options = opts || {} @interceptors = opts.fetch(:interceptor_registry, Gruf.interceptors) @interceptors = Gruf::Interceptors::Registry.new unless @interceptors.is_a?(Gruf::Interceptors::Registry) @services = nil @started = false @hostname = opts.fetch(:hostname, Gruf.server_binding_url) @event_listener_proc = opts.fetch(:event_listener_proc, Gruf.event_listener_proc) end
# Initialize the server and load and setup the services @param [Hash] opts
initialize
ruby
bigcommerce/gruf
lib/gruf/server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/server.rb
MIT
def server server_mutex do @server ||= begin # For backward compatibility, we allow these options to be passed directly # in the Gruf::Server options, or via Gruf.rpc_server_options. server_options = { pool_size: options.fetch(:pool_size, Gruf.rpc_server_options[:pool_size]), max_waiting_requests: options.fetch(:max_waiting_requests, Gruf.rpc_server_options[:max_waiting_requests]), poll_period: options.fetch(:poll_period, Gruf.rpc_server_options[:poll_period]), pool_keep_alive: options.fetch(:pool_keep_alive, Gruf.rpc_server_options[:pool_keep_alive]), connect_md_proc: options.fetch(:connect_md_proc, Gruf.rpc_server_options[:connect_md_proc]), server_args: options.fetch(:server_args, Gruf.rpc_server_options[:server_args]) } server = if @event_listener_proc server_options[:event_listener_proc] = @event_listener_proc Gruf::InstrumentableGrpcServer.new(**server_options) else GRPC::RpcServer.new(**server_options) end @port = server.add_http2_port(@hostname, ssl_credentials) # do not reference `services` any earlier than this method, as it allows autoloading to take effect # and load services into `Gruf.services` as late as possible, which gives us flexibility with different # execution paths (such as vanilla ruby, grape, multiple Rails versions, etc). The autoloaders are # initially loaded in `Gruf::Cli::Executor` _directly_ before the gRPC services are loaded into the gRPC # server, to allow for loading services as late as possible in the execution chain. services.each { |s| server.handle(s) } server end end end
# @return [GRPC::RpcServer] The GRPC server running
server
ruby
bigcommerce/gruf
lib/gruf/server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/server.rb
MIT
def add_service(klass) raise ServerAlreadyStartedError if @started @services << klass unless services.include?(klass) end
:nocov: # Add a gRPC service stub to be served by gruf @param [Class] klass @raise [ServerAlreadyStartedError] if the server is already started
add_service
ruby
bigcommerce/gruf
lib/gruf/server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/server.rb
MIT
def add_interceptor(klass, opts = {}) raise ServerAlreadyStartedError if @started @interceptors.use(klass, opts) end
# Add an interceptor to the server @param [Class] klass The Interceptor to add to the registry @param [Hash] opts A hash of options for the interceptor @raise [ServerAlreadyStartedError] if the server is already started
add_interceptor
ruby
bigcommerce/gruf
lib/gruf/server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/server.rb
MIT
def insert_interceptor_before(before_class, interceptor_class, opts = {}) raise ServerAlreadyStartedError if @started @interceptors.insert_before(before_class, interceptor_class, opts) end
# Insert an interceptor before another in the currently registered order of execution @param [Class] before_class The interceptor that you want to add the new interceptor before @param [Class] interceptor_class The Interceptor to add to the registry @param [Hash] opts A hash of options for the interceptor
insert_interceptor_before
ruby
bigcommerce/gruf
lib/gruf/server.rb
https://github.com/bigcommerce/gruf/blob/master/lib/gruf/server.rb
MIT