_id
stringlengths 2
6
| title
stringlengths 9
130
| partition
stringclasses 3
values | text
stringlengths 66
10.5k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q3800
|
RightScale.VolumeManagementHelper.attach_planned_volume
|
train
|
def attach_planned_volume(mapping)
# preserve the initial list of disks/volumes before attachment for comparison later.
vm = RightScale::Platform.volume_manager
InstanceState.planned_volume_state.disks ||= vm.disks
InstanceState.planned_volume_state.volumes ||= vm.volumes
# attach.
payload = {:agent_identity => @agent_identity, :volume_id => mapping[:volume_id], :device_name => mapping[:device_name]}
Log.info("Attaching volume #{mapping[:volume_id]}.")
req = RetryableRequest.new("/storage_valet/attach_volume", payload, :retry_delay => VolumeManagement::VOLUME_RETRY_SECONDS)
req.callback do |res|
# don't set :volume_status here as that should only be queried
mapping[:management_status] = 'attached'
mapping[:attempts] = nil
yield if block_given?
end
req.errback do |res|
# volume could already be attaching or have been deleted
# which we can't see because of latency; go around again
# and check state of volume later.
Log.error("Failed to attach volume #{mapping[:volume_id]} (#{res})")
mapping[:attempts] ||= 0
mapping[:attempts] += 1
# retry indefinitely so long as core api instructs us to retry or else fail after max attempts.
if mapping[:attempts] >= VolumeManagement::MAX_VOLUME_ATTEMPTS
strand("Exceeded maximum of #{VolumeManagement::MAX_VOLUME_ATTEMPTS} attempts attaching volume #{mapping[:volume_id]} with error: #{res}")
else
yield if block_given?
end
end
req.run
end
|
ruby
|
{
"resource": ""
}
|
q3801
|
RightScale.VolumeManagementHelper.manage_volume_device_assignment
|
train
|
def manage_volume_device_assignment(mapping)
# only managed volumes should be in an attached state ready for assignment.
unless 'attached' == mapping[:management_status]
raise VolumeManagement::UnexpectedState.new("The volume #{mapping[:volume_id]} was in an unexpected managed state: #{mapping.inspect}")
end
# check for changes in disks.
last_disks = InstanceState.planned_volume_state.disks
last_volumes = InstanceState.planned_volume_state.volumes
vm = RightScale::Platform.volume_manager
current_disks = vm.disks
current_volumes = vm.volumes
# correctly managing device assignment requires expecting precise changes
# to disks and volumes. any deviation from this requires a retry.
succeeded = false
if new_disk = find_distinct_item(current_disks, last_disks, :index)
# if the new disk as no partitions, then we will format and assign device.
if vm.partitions(new_disk[:index]).empty?
# FIX: ignore multiple mount points for simplicity and only only create
# a single primary partition for the first mount point.
# if we had the UI for it, then the user would probably specify
# partition sizes as a percentage of disk size and associate those with
# mount points formatted optionally specifying file system, label, etc.
@audit.append_info("Creating primary partition and formatting \"#{mapping[:mount_points].first}\".")
vm.format_disk(new_disk[:index], mapping[:mount_points].first)
succeeded = true
else
# FIX: ignoring multiple existing partitiions on a disk (which should
# result in multiple new volumes appearing when the disk comes online)
# for simplicity until we have a UI supporting multiple mount points.
@audit.append_info("Preparing \"#{mapping[:mount_points].first}\" for use.")
new_volume = find_distinct_item(current_volumes, last_volumes, :device)
unless new_volume
vm.online_disk(new_disk[:index])
current_volumes = vm.volumes
new_volume = find_distinct_item(current_volumes, last_volumes, :device)
end
if new_volume
# prefer selection by existing device because it is more reliable in Windows 2003 case.
unless new_volume[:device] && (0 == new_volume[:device].casecmp(mapping[:mount_points].first))
device_or_index_to_select = new_volume[:device] || new_volume[:index]
vm.assign_device(device_or_index_to_select, mapping[:mount_points].first)
end
succeeded = true
end
end
end
# retry only if still not assigned.
if succeeded
# volume is (finally!) assigned to correct device name.
mapping[:management_status] = 'assigned'
mapping[:attempts] = nil
# reset cached volumes/disks for next attempt (to attach), if any.
InstanceState.planned_volume_state.disks = nil
InstanceState.planned_volume_state.volumes = nil
# continue.
yield if block_given?
else
mapping[:attempts] ||= 0
mapping[:attempts] += 1
if mapping[:attempts] >= VolumeManagement::MAX_VOLUME_ATTEMPTS
strand("Exceeded maximum of #{VolumeManagement::MAX_VOLUME_ATTEMPTS} attempts waiting for volume #{mapping[:volume_id]} to be in a managable state.")
else
yield if block_given?
end
end
rescue Exception => e
strand(e)
end
|
ruby
|
{
"resource": ""
}
|
q3802
|
RightScale.VolumeManagementHelper.merge_planned_volume_mappings
|
train
|
def merge_planned_volume_mappings(last_mappings, current_planned_volumes)
results = []
vm = RightScale::Platform.volume_manager
# merge latest mappings with last mappings, if any.
current_planned_volumes.each do |planned_volume|
raise VolumeManagement::InvalidResponse.new("Reponse for volume mapping was invalid: #{mapping.inspect}") unless planned_volume.is_valid?
if mount_point = planned_volume.mount_points.find { |mount_point| false == vm.is_attachable_volume_path?(mount_point) }
raise VolumeManagement::UnsupportedMountPoint.new("Cannot mount a volume using \"#{mount_point}\".")
end
mapping = {:volume_id => planned_volume.volume_id,
:device_name => planned_volume.device_name,
:volume_status => planned_volume.volume_status,
:mount_points => planned_volume.mount_points.dup}
if last_mapping = last_mappings.find { |last_mapping| last_mapping[:volume_id] == mapping[:volume_id] }
# if device name or mount point(s) have changed then we must start
# over (we can't prevent the user from doing this).
if last_mapping[:device_name] != mapping[:device_name] || last_mapping[:mount_points] != mapping[:mount_points]
last_mapping[:device_name] = mapping[:device_name]
last_mapping[:mount_points] = mapping[:mount_points].dup
last_mapping[:management_status] = nil
end
last_mapping[:volume_status] = mapping[:volume_status]
mapping = last_mapping
end
results << mapping
end
# preserve any last mappings which do not appear in current mappings by
# assuming that they are 'detached' to support a limitation of the initial
# query implementation.
last_mappings.each do |last_mapping|
mapping = results.find { |mapping| mapping[:volume_id] == last_mapping[:volume_id] }
unless mapping
last_mapping[:volume_status] = 'detached'
results << last_mapping
end
end
return results
end
|
ruby
|
{
"resource": ""
}
|
q3803
|
RightScale.LoginManager.supported_by_platform?
|
train
|
def supported_by_platform?
right_platform = RightScale::Platform.linux?
# avoid calling user_exists? on unsupported platform(s)
right_platform && LoginUserManager.user_exists?('rightscale') && FeatureConfigManager.feature_enabled?('managed_login_enable')
end
|
ruby
|
{
"resource": ""
}
|
q3804
|
RightScale.LoginManager.update_policy
|
train
|
def update_policy(new_policy, agent_identity)
return false unless supported_by_platform?
update_users(new_policy.users, agent_identity, new_policy) do |audit_content|
yield audit_content if block_given?
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3805
|
RightScale.LoginManager.get_key_prefix
|
train
|
def get_key_prefix(username, email, uuid, superuser, profile_data = nil)
if profile_data
profile = " --profile #{Shellwords.escape(profile_data).gsub('"', '\\"')}"
else
profile = ""
end
superuser = superuser ? " --superuser" : ""
%Q{command="rs_thunk --username #{username} --uuid #{uuid}#{superuser} --email #{email}#{profile}" }
end
|
ruby
|
{
"resource": ""
}
|
q3806
|
RightScale.LoginManager.get_ssh_host_keys
|
train
|
def get_ssh_host_keys()
# Try to read the sshd_config file first
keys = File.readlines(
File.join(RightScale::Platform.filesystem.ssh_cfg_dir, 'sshd_config')).map do |l|
key = nil
/^\s*HostKey\s+([^ ].*)/.match(l) { |m| key = m.captures[0] }
key
end.compact
# If the config file was empty, try these defaults
keys = keys.empty? ? SSH_DEFAULT_KEYS : keys
# Assume the public keys are just the public keys with '.pub' extended and
# read in each existing key.
keys.map { |k| k="#{k}.pub"; File.exists?(k) ? File.read(k) : nil }.compact
end
|
ruby
|
{
"resource": ""
}
|
q3807
|
RightScale.LoginManager.update_users
|
train
|
def update_users(users, agent_identity, new_policy)
# Create cache of public keys from stored instance state
# but there won't be any on initial launch
public_keys_cache = {}
if old_policy = InstanceState.login_policy
public_keys_cache = old_policy.users.inject({}) do |keys, user|
user.public_key_fingerprints ||= user.public_keys.map { |key| fingerprint(key, user.username) }
user.public_keys.zip(user.public_key_fingerprints).each { |(k, f)| keys[f] = k if f }
keys
end
end
# See if there are any missing keys and if so, send a request to retrieve them
# Then make one more pass to populate any missing keys and reject any that are still not populated
unless (missing = populate_public_keys(users, public_keys_cache)).empty?
payload = {:agent_identity => agent_identity, :public_key_fingerprints => missing.map { |(u, f)| f }}
request = RightScale::RetryableRequest.new("/key_server/retrieve_public_keys", payload)
request.callback do |public_keys|
if public_keys
missing = populate_public_keys(users, public_keys, remove_if_missing = true)
finalize_policy(new_policy, agent_identity, users, missing.map { |(u, f)| u }.uniq) do |audit_content|
yield audit_content
end
end
end
request.errback do |error|
Log.error("Failed to retrieve public keys for users #{missing.map { |(u, f)| u.username }.uniq.inspect} (#{error})")
missing = populate_public_keys(users, {}, remove_if_missing = true)
finalize_policy(new_policy, agent_identity, users, missing.map { |(u, f)| u }.uniq) do |audit_content|
yield audit_content
end
end
request.run
else
finalize_policy(new_policy, agent_identity, users, missing.map { |(u, f)| u }.uniq) do |audit_content|
yield audit_content
end
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3808
|
RightScale.LoginManager.finalize_policy
|
train
|
def finalize_policy(new_policy, agent_identity, new_policy_users, missing)
manage_existing_users(new_policy_users)
user_lines = login_users_to_authorized_keys(new_policy_users)
InstanceState.login_policy = new_policy
write_keys_file(user_lines, RIGHTSCALE_KEYS_FILE, { :user => 'rightscale', :group => 'rightscale' })
unless @login_policy_tag_set
tags = [RESTRICTED_TAG]
AgentTagManager.instance.add_tags(tags)
@login_policy_tag_set = true
end
# Schedule a timer to handle any expiration that is planned to happen in the future
schedule_expiry(new_policy, agent_identity)
# Yield a human-readable description of the policy, e.g. for an audit entry
yield describe_policy(new_policy_users, new_policy_users.select { |u| u.superuser }, missing)
true
end
|
ruby
|
{
"resource": ""
}
|
q3809
|
RightScale.LoginManager.populate_public_keys
|
train
|
def populate_public_keys(users, public_keys_cache, remove_if_missing = false)
missing = []
users.reject! do |user|
reject = false
# Create any missing fingerprints from the public keys so that fingerprints
# are as populated as possible
user.public_key_fingerprints ||= user.public_keys.map { |key| fingerprint(key, user.username) }
user.public_key_fingerprints = user.public_keys.zip(user.public_key_fingerprints).map do |(k, f)|
f || fingerprint(k, user.username)
end
# Where possible use cache of old public keys to populate any missing ones
public_keys = user.public_keys.zip(user.public_key_fingerprints).inject([]) do |keys, (k, f)|
if f
if k ||= public_keys_cache[f]
keys << k
else
if remove_if_missing
Log.error("Failed to obtain public key with fingerprint #{f.inspect} for user #{user.username}, " +
"removing it from login policy")
else
keys << k
end
missing << [user, f]
end
else
Log.error("Failed to obtain public key with fingerprint #{f.inspect} for user #{user.username}, " +
"removing it from login policy")
end
keys
end
# Reject user if none of its public keys could be populated
# This will not happen unless remove_if_missing is true
if public_keys.empty?
reject = true
else
user.public_keys = public_keys
end
reject
end
missing
end
|
ruby
|
{
"resource": ""
}
|
q3810
|
RightScale.LoginManager.fingerprint
|
train
|
def fingerprint(public_key, username)
LoginUser.fingerprint(public_key) if public_key
rescue Exception => e
Log.error("Failed to create public key fingerprint for user #{username}", e)
nil
end
|
ruby
|
{
"resource": ""
}
|
q3811
|
RightScale.LoginManager.load_keys
|
train
|
def load_keys(path)
file_lines = read_keys_file(path)
keys = []
file_lines.map do |l|
components = LoginPolicy.parse_public_key(l)
if components
#preserve algorithm, key and comments; discard options (the 0th element)
keys << [ components[1], components[2], components[3] ]
elsif l =~ COMMENT
next
else
RightScale::Log.error("Malformed (or not SSH2) entry in authorized_keys file: #{l}")
next
end
end
keys
end
|
ruby
|
{
"resource": ""
}
|
q3812
|
RightScale.LoginManager.describe_policy
|
train
|
def describe_policy(users, superusers, missing = [])
normal_users = users - superusers
audit = "#{users.size} authorized users (#{normal_users.size} normal, #{superusers.size} superuser).\n"
audit << "Public key missing for #{missing.map { |u| u.username }.join(", ") }.\n" if missing.size > 0
#unless normal_users.empty?
# audit += "\nNormal users:\n"
# normal_users.each do |u|
# audit += " #{u.common_name.ljust(40)} #{u.username}\n"
# end
#end
#
#unless superusers.empty?
# audit += "\nSuperusers:\n"
# superusers.each do |u|
# audit += " #{u.common_name.ljust(40)} #{u.username}\n"
# end
#end
return audit
end
|
ruby
|
{
"resource": ""
}
|
q3813
|
RightScale.LoginManager.login_users_to_authorized_keys
|
train
|
def login_users_to_authorized_keys(new_users)
now = Time.now
user_lines = []
new_users.each do |u|
if u.expires_at.nil? || u.expires_at > now
u.public_keys.each do |k|
user_lines << "#{get_key_prefix(u.username, u.common_name, u.uuid, u.superuser, u.profile_data)} #{k}"
end
end
end
return user_lines.sort
end
|
ruby
|
{
"resource": ""
}
|
q3814
|
RightScale.LoginManager.manage_existing_users
|
train
|
def manage_existing_users(new_policy_users)
now = Time.now
previous = {}
if InstanceState.login_policy
InstanceState.login_policy.users.each do |user|
previous[user.uuid] = user
end
end
current = {}
new_policy_users.each do |user|
current[user.uuid] = user
end
added = current.keys - previous.keys
removed = previous.keys - current.keys
stayed = current.keys & previous.keys
removed.each do |k|
begin
user = current[k] || previous[k]
LoginUserManager.manage_user(user.uuid, user.superuser, :disable => true)
rescue Exception => e
RightScale::Log.error("Failed to disable user '#{user.uuid}'", e) unless e.is_a?(ArgumentError)
end
end
(added + stayed).each do |k|
begin
user = current[k] || previous[k]
disable = !!(user.expires_at) && (now >= user.expires_at)
LoginUserManager.manage_user(user.uuid, user.superuser, :disable => disable)
rescue Exception => e
RightScale::Log.error("Failed to manage existing user '#{user.uuid}'", e) unless e.is_a?(ArgumentError)
end
end
rescue Exception => e
RightScale::Log.error("Failed to manage existing users", e)
end
|
ruby
|
{
"resource": ""
}
|
q3815
|
RightScale.LoginManager.write_keys_file
|
train
|
def write_keys_file(keys, keys_file, chown_params = nil)
dir = File.dirname(keys_file)
FileUtils.mkdir_p(dir)
FileUtils.chmod(0700, dir)
File.open(keys_file, 'w') do |f|
f.puts "#" * 78
f.puts "# USE CAUTION WHEN EDITING THIS FILE BY HAND"
f.puts "# This file is generated based on the RightScale dashboard permission"
f.puts "# 'server_login'. You can add trusted public keys to the file, but"
f.puts "# it is regenerated every 24 hours and keys may be added or removed"
f.puts "# without notice if they correspond to a dashboard user."
f.puts "#"
f.puts "# Instead of editing this file, you probably want to do one of the"
f.puts "# following:"
f.puts "# - Edit dashboard permissions (Settings > Account > Users)"
f.puts "# - Change your personal public key (Settings > User > SSH)"
f.puts "#"
keys.each { |k| f.puts k }
end
FileUtils.chmod(0600, keys_file)
FileUtils.chown_R(chown_params[:user], chown_params[:group], File.dirname(keys_file)) if chown_params
return true
end
|
ruby
|
{
"resource": ""
}
|
q3816
|
SadPanda.Helpers.frequencies_for
|
train
|
def frequencies_for(words)
word_frequencies = {}
words.each { |word| word_frequencies[word] = words.count(word) }
word_frequencies
end
|
ruby
|
{
"resource": ""
}
|
q3817
|
SadPanda.Helpers.stems_for
|
train
|
def stems_for(words)
stemmer = Lingua::Stemmer.new(language: 'en')
words.map! { |word| stemmer.stem(word) }
end
|
ruby
|
{
"resource": ""
}
|
q3818
|
SadPanda.Helpers.emojies_in
|
train
|
def emojies_in(text)
(sad_emojies + happy_emojies).map do |emoji|
text.scan(emoji)
end.flatten
end
|
ruby
|
{
"resource": ""
}
|
q3819
|
SadPanda.Helpers.sanitize
|
train
|
def sanitize(text)
text.gsub!(/[^a-z ]/i, '')
text.gsub!(/((([A-Za-z]{3,9}:(?:\/\/)?)(?:[-;:&=\+\$,\w]+@)?[A-Za-z0-9.-]+|(?:www.|[-;:&=\+\$,\w]+@)[A-Za-z0-9.-]+)((?:\/[\+~%\/.\w-_]*)?\??(?:[-\+=&;%@.\w_]*)#?(?:[\w]*))?)/, '')
text.gsub!(/(?=\w*h)(?=\w*t)(?=\w*t)(?=\w*p)\w*/, '')
text.gsub!(/\s\s+/, ' ')
text.downcase
end
|
ruby
|
{
"resource": ""
}
|
q3820
|
RightScale.AuditLogFormatter.call
|
train
|
def call(severity, time, progname, msg)
sprintf("%s: %s\n", time.strftime("%H:%M:%S"), hide_inputs(msg2str(msg)))
end
|
ruby
|
{
"resource": ""
}
|
q3821
|
RightScale.AuditLogger.is_filtered?
|
train
|
def is_filtered?(severity, message)
if filters = MESSAGE_FILTERS[severity]
filters.each do |filter|
return true if filter =~ message
end
end
return false
end
|
ruby
|
{
"resource": ""
}
|
q3822
|
RightScale.SystemConfigurator.configure_root_access
|
train
|
def configure_root_access(options = {})
public_key = ENV['VS_SSH_PUBLIC_KEY'].to_s.strip
# was there a key found?
if public_key.nil? || public_key.empty?
puts "No public SSH key found in metadata"
return
end
update_authorized_keys(public_key)
end
|
ruby
|
{
"resource": ""
}
|
q3823
|
RightScale.SystemConfigurator.update_authorized_keys
|
train
|
def update_authorized_keys(public_key)
auth_key_file = "/root/.ssh/authorized_keys"
FileUtils.mkdir_p(File.dirname(auth_key_file)) # make sure the directory exists
key_exists = false
File.open(auth_key_file, "r") do |file|
file.each_line { |line| key_exists = true if line == public_key }
end if File.exists?(auth_key_file)
if key_exists
puts "Public ssh key for root already exists in #{auth_key_file}"
else
puts "Appending public ssh key to #{auth_key_file}"
File.open(auth_key_file, "a") { |f| f.puts(public_key) }
end
# make sure it's private
FileUtils.chmod(0600, auth_key_file)
true
end
|
ruby
|
{
"resource": ""
}
|
q3824
|
RightScale.RightLinkAgentController.run_command
|
train
|
def run_command(message, command)
puts message
begin
send_command({ :name => command }, verbose = false, timeout = 100) { |r| puts r }
rescue SystemExit => e
raise e
rescue Exception => e
$stderr.puts Log.format("Failed or else time limit was exceeded, confirm that local instance is still running", e, :trace)
return false
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3825
|
RightScale.CloudUtilities.can_contact_metadata_server?
|
train
|
def can_contact_metadata_server?(addr, port, timeout=2)
t = Socket.new(Socket::Constants::AF_INET, Socket::Constants::SOCK_STREAM, 0)
saddr = Socket.pack_sockaddr_in(port, addr)
connected = false
begin
t.connect_nonblock(saddr)
rescue Errno::EINPROGRESS
r, w, e = IO::select(nil, [t], nil, timeout)
if !w.nil?
connected = true
else
begin
t.connect_nonblock(saddr)
rescue Errno::EISCONN
t.close
connected = true
rescue SystemCallError
end
end
rescue SystemCallError
end
connected
end
|
ruby
|
{
"resource": ""
}
|
q3826
|
RightScale.CloudUtilities.split_metadata
|
train
|
def split_metadata(data, splitter, name_value_delimiter = '=')
hash = {}
data.to_s.split(splitter).each do |pair|
name, value = pair.split(name_value_delimiter, 2)
hash[name.strip] = value.strip if name && value
end
hash
end
|
ruby
|
{
"resource": ""
}
|
q3827
|
RightScale.ResultsMock.success_results
|
train
|
def success_results(content = nil, reply_to = '*test*1')
Result.new(AgentIdentity.generate, reply_to,
{ @agent_id => OperationResult.success(content) }, @agent_id)
end
|
ruby
|
{
"resource": ""
}
|
q3828
|
FutureProof.ThreadPool.perform
|
train
|
def perform
unless @threads.any? { |t| t.alive? }
@values.start!
@size.times do
@threads << Thread.new do
while job = @queue.pop
if job == :END_OF_WORK
break
else
@values.push *job[1], &job[0]
end
end
end
end
end
end
|
ruby
|
{
"resource": ""
}
|
q3829
|
ProcToAst.Parser.parse
|
train
|
def parse(filename, linenum)
@filename, @linenum = filename, linenum
buf = []
File.open(filename, "rb").each_with_index do |line, index|
next if index < linenum - 1
buf << line
begin
return do_parse(buf.join)
rescue ::Parser::SyntaxError
node = trim_and_retry(buf)
return node if node
end
end
fail(::Parser::SyntaxError, 'Unknown error')
end
|
ruby
|
{
"resource": ""
}
|
q3830
|
ProcToAst.Parser.trim_and_retry
|
train
|
def trim_and_retry(buf)
*lines, last = buf
# For inner Array or Hash or Arguments list.
lines << last.gsub(/,\s*$/, "")
do_parse("a(#{lines.join})") # wrap dummy method
rescue ::Parser::SyntaxError
end
|
ruby
|
{
"resource": ""
}
|
q3831
|
ErrbitGitlabPlugin.IssueTracker.errors
|
train
|
def errors
errs = []
# Make sure that every field is filled out
self.class.fields.except(:project_id).each_with_object({}) do |(field_name, field_options), h|
if options[field_name].blank?
errs << "#{field_options[:label]} must be present"
end
end
# We can only perform the other tests if the necessary values are at least present
return {:base => errs.to_sentence} unless errs.size.zero?
# Check if the given endpoint actually exists
unless gitlab_endpoint_exists?(options[:endpoint])
errs << 'No Gitlab installation was found under the given URL'
return {:base => errs.to_sentence}
end
# Check if a user by the given token exists
unless gitlab_user_exists?(options[:endpoint], options[:api_token])
errs << 'No user with the given API token was found'
return {:base => errs.to_sentence}
end
# Check if there is a project with the given name on the server
unless gitlab_project_id(options[:endpoint], options[:api_token], options[:path_with_namespace])
errs << "A project named '#{options[:path_with_namespace]}' could not be found on the server.
Please make sure to enter it exactly as it appears in your address bar in Gitlab (case sensitive)"
return {:base => errs.to_sentence}
end
{}
end
|
ruby
|
{
"resource": ""
}
|
q3832
|
ErrbitGitlabPlugin.IssueTracker.gitlab_endpoint_exists?
|
train
|
def gitlab_endpoint_exists?(gitlab_url)
with_gitlab(gitlab_url, 'Iamsecret') do |g|
g.user
end
rescue Gitlab::Error::Unauthorized
true
rescue Exception
false
end
|
ruby
|
{
"resource": ""
}
|
q3833
|
ErrbitGitlabPlugin.IssueTracker.gitlab_user_exists?
|
train
|
def gitlab_user_exists?(gitlab_url, private_token)
with_gitlab(gitlab_url, private_token) do |g|
g.user
end
true
rescue Gitlab::Error::Unauthorized
false
end
|
ruby
|
{
"resource": ""
}
|
q3834
|
ErrbitGitlabPlugin.IssueTracker.with_gitlab
|
train
|
def with_gitlab(gitlab_url = options[:endpoint], private_token = options[:api_token])
yield Gitlab.client(endpoint: gitlab_endpoint(gitlab_url),
private_token: private_token,
user_agent: 'Errbit User Agent')
end
|
ruby
|
{
"resource": ""
}
|
q3835
|
RightScale.MetadataWriter.create_full_path
|
train
|
def create_full_path(file_name)
path = full_path(file_name)
FileUtils.mkdir_p(File.dirname(path))
path
end
|
ruby
|
{
"resource": ""
}
|
q3836
|
RightScale.MetadataWriter.write_file
|
train
|
def write_file(metadata)
File.open(create_full_path(@file_name_prefix), "w", DEFAULT_FILE_MODE) { |f| f.write(metadata.to_s) }
end
|
ruby
|
{
"resource": ""
}
|
q3837
|
RightScale.Cloud.write_metadata
|
train
|
def write_metadata(kind = WILDCARD)
options = @options.dup
kind = kind.to_sym
if kind == WILDCARD || kind == :user_metadata
# Both "blue-skies" cloud and "wrap instance" behave the same way, they lay down a
# file in a predefined location (/var/spool/rightscale/user-data.txt on linux,
# C:\ProgramData\RightScale\spool\rightscale\user-data.txt on windows. In both
# cases this userdata has *lower* precedence than cloud data. On a start/stop
# action where userdata is updated, we want the NEW userdata, not the old. So
# if cloud-based values exists, than we always use those.
api_source = RightScale::MetadataSources::RightScaleApiMetadataSource.new(options)
cloud_userdata_raw = ""
if api_source.source_exists?
extra_userdata_raw = api_source.get()
if (name == "azure")
# Azure is a special case -- we don't want to run the cloud userdata fetcher again
# as we can't update userdata anyways and it will currently fail as written
extra_userdata_raw = get_updated_userdata(extra_userdata_raw)
cloud_userdata_raw = extra_userdata_raw
elsif (name == "rackspace")
# Rackspace is another type of special case, for different reasons.
# The "wait_for_instance_ready" function on rackspace will get stuck in an
# infinite loops waiting for the userdata file to appear in the wrap instance
# or blue-skies cases. Since we don't support start/stop on rackspace anyways
# we can just skip the whole fetching of updated userdata to avoid this
# infinite loop scenario, instead just always using the blue-skies/wrap
# data that's on disk. The downside is that you better delete that data
# before a rebundle or it won't work. See rackspace/wait_for_instance_ready.rb
# counterpart code as well
cloud_userdata_raw = extra_userdata_raw
else
cloud_userdata_raw = userdata_raw
unless cloud_userdata_raw =~ /RS_rn_id/i
cloud_userdata_raw = extra_userdata_raw
end
end
else
cloud_userdata_raw = userdata_raw
end
cloud_userdata = parse_userdata(cloud_userdata_raw)
# Raw userdata is a special exception and gets its own writer
raw_writer = metadata_writers(:user_metadata).find { |writer| writer.kind_of?(RightScale::MetadataWriters::RawMetadataWriter) }
raw_writer.write(cloud_userdata_raw)
unless cloud_userdata.empty?
metadata_writers(:user_metadata).each { |writer| writer.write(cloud_userdata) }
end
end
if kind == WILDCARD || kind == :cloud_metadata
cloud_metadata = metadata
unless cloud_metadata.empty?
metadata_writers(:cloud_metadata).each { |writer| writer.write(cloud_metadata) }
end
end
return ActionResult.new
rescue Exception => e
return ActionResult.new(:exitstatus => 1, :error => "ERROR: #{e.message}", :exception => e)
ensure
finish()
end
|
ruby
|
{
"resource": ""
}
|
q3838
|
RightScale.Cloud.clear_state
|
train
|
def clear_state
output_dir_paths = []
[:user_metadata, :cloud_metadata].each do |kind|
output_dir_paths |= metadata_writers(kind).map { |w| w.output_dir_path }
end
last_exception = nil
output_dir_paths.each do |output_dir_path|
begin
FileUtils.rm_rf(output_dir_path) if File.directory?(output_dir_path)
rescue Exception => e
last_exception = e
end
end
fail(last_exception.message) if last_exception
return ActionResult.new
rescue Exception => e
return ActionResult.new(:exitstatus => 1, :error => "ERROR: #{e.message}", :exception => e)
end
|
ruby
|
{
"resource": ""
}
|
q3839
|
RightScale.Cloud.metadata_writers
|
train
|
def metadata_writers(kind)
return @metadata_writers[kind] if @metadata_writers && @metadata_writers[kind]
@metadata_writers ||= {}
@metadata_writers[kind] ||= []
options = @options.dup
options[:kind] = kind
if kind == :user_metadata
options[:formatted_path_prefix] = "RS_"
options[:output_dir_path] ||= RightScale::AgentConfig.cloud_state_dir
options[:file_name_prefix] = "user-data"
options[:generation_command] = nil
elsif kind == :cloud_metadata
options[:formatted_path_prefix] = "#{abbreviation.upcase}_"
options[:output_dir_path] ||= RightScale::AgentConfig.cloud_state_dir
options[:file_name_prefix] = "meta-data"
options[:generation_command] = cloud_metadata_generation_command if generates_metadata_cache?
end
begin
writers_dir_path = File.join(File.dirname(__FILE__), 'metadata_writers')
# dynamically register all clouds using the script name as cloud name.
pattern = File.join(writers_dir_path, '*.rb')
Dir[pattern].each do |writer_script_path|
writer_name = File.basename(writer_script_path, '.rb')
require writer_script_path
writer_class_name = writer_name.split(/[_ ]/).map {|w| w.capitalize}.join
writer_class = eval("RightScale::MetadataWriters::#{writer_class_name}")
@metadata_writers[kind] << writer_class.new(options)
end
end
@metadata_writers[kind]
end
|
ruby
|
{
"resource": ""
}
|
q3840
|
RightScale.Cloud.cloud_metadata_generation_command
|
train
|
def cloud_metadata_generation_command
ruby_path = File.normalize_path(AgentConfig.ruby_cmd)
rs_cloud_path = File.normalize_path(Gem.bin_path('right_link', 'cloud'))
return "#{ruby_path} #{rs_cloud_path} --action write_cloud_metadata"
end
|
ruby
|
{
"resource": ""
}
|
q3841
|
RightScale.Cloud.relative_to_script_path
|
train
|
def relative_to_script_path(path)
path = path.gsub("\\", '/')
unless path == File.expand_path(path)
path = File.normalize_path(File.join(File.dirname(@script_path), path))
end
path
end
|
ruby
|
{
"resource": ""
}
|
q3842
|
RightScale.CookState.dev_log_level
|
train
|
def dev_log_level
if value = tag_value(LOG_LEVEL_TAG)
value = value.downcase.to_sym
value = nil unless [:debug, :info, :warn, :error, :fatal].include?(value)
end
value
end
|
ruby
|
{
"resource": ""
}
|
q3843
|
RightScale.CookState.use_cookbooks_path?
|
train
|
def use_cookbooks_path?
res = !!(paths = cookbooks_path)
return false unless res
paths.each do |path|
res = path && File.directory?(path) && Dir.entries(path) != ['.', '..']
break unless res
end
res
end
|
ruby
|
{
"resource": ""
}
|
q3844
|
RightScale.CookState.update
|
train
|
def update(state_to_merge, overrides = {})
# only merge state if state to be merged has values
@startup_tags = state_to_merge.startup_tags if state_to_merge.respond_to?(:startup_tags)
@reboot = state_to_merge.reboot? if state_to_merge.respond_to?(:reboot?)
@log_level = state_to_merge.log_level if state_to_merge.respond_to?(:log_level)
if state_to_merge.respond_to?(:log_file) && state_to_merge.respond_to?(:value)
@log_file = state_to_merge.log_file(state_to_merge.value)
end
@startup_tags = overrides[:startup_tags] if overrides.has_key?(:startup_tags)
@reboot = overrides[:reboot] if overrides.has_key?(:reboot)
@log_file = overrides[:log_file] if overrides.has_key?(:log_file)
# check the log level again after the startup_tags have been updated or
# overridden.
if overrides.has_key?(:log_level)
@log_level = overrides[:log_level]
elsif tagged_log_level = dev_log_level
@log_level = tagged_log_level
end
save_state
true
end
|
ruby
|
{
"resource": ""
}
|
q3845
|
RightScale.CookState.save_state
|
train
|
def save_state
# start will al state to be saved
state_to_save = { 'startup_tags' => startup_tags,
'reboot' => reboot?,
'log_level' => log_level }
# only save a log file one is defined
if log_file
state_to_save['log_file'] = log_file
end
# only save persist the fact we downloaded cookbooks if we are in dev mode
if download_once?
state_to_save['has_downloaded_cookbooks'] = has_downloaded_cookbooks?
end
RightScale::JsonUtilities::write_json(RightScale::CookState::STATE_FILE, state_to_save)
true
end
|
ruby
|
{
"resource": ""
}
|
q3846
|
RightScale.CookState.load_state
|
train
|
def load_state
if File.file?(STATE_FILE)
state = RightScale::JsonUtilities::read_json(STATE_FILE)
@log_level = state['log_level'] || Logger::INFO
Log.info("Initializing CookState from #{STATE_FILE} with #{state.inspect}") if @log_level == Logger::DEBUG
@has_downloaded_cookbooks = state['has_downloaded_cookbooks']
@startup_tags = state['startup_tags'] || []
@reboot = state['reboot']
@log_file = state['log_file'] # nil if not in state loaded from disk
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3847
|
RightScale.CloudFactory.register
|
train
|
def register(cloud_name, cloud_script_path)
cloud_script_path = File.normalize_path(cloud_script_path)
registered_type(cloud_name.to_s, cloud_script_path)
true
end
|
ruby
|
{
"resource": ""
}
|
q3848
|
RightScale.CloudFactory.registered_script_path
|
train
|
def registered_script_path(cloud_name)
cloud_script_path = registered_type(cloud_name)
raise UnknownCloud.new("Unknown cloud: #{cloud_name}") unless cloud_script_path
return cloud_script_path
end
|
ruby
|
{
"resource": ""
}
|
q3849
|
RightScale.CloudFactory.create
|
train
|
def create(cloud_name, options)
raise ArgumentError.new("cloud_name is required") if cloud_name.to_s.empty?
raise ArgumentError.new("options[:logger] is required") unless logger = options[:logger]
raise UnknownCloud.new("No cloud definitions available.") unless @names_to_script_paths
cloud_name = cloud_name.to_sym
cloud_name = default_cloud_name if UNKNOWN_CLOUD_NAME == cloud_name
raise UnknownCloud.new("Unable to determine a default cloud") if UNKNOWN_CLOUD_NAME == cloud_name
cloud_script_path = registered_script_path(cloud_name)
options = options.dup
options[:name] ||= cloud_name.to_s
options[:script_path] = cloud_script_path
cloud = nil
begin
require cloud_script_path
cloud_classname = cloud_name.to_s.capitalize
cloud_class = eval("RightScale::Clouds::#{cloud_classname}")
cloud = cloud_class.new(options)
rescue LoadError => e
raise ArgumentError, "Could not load Cloud class for #{cloud_name}, #{e}"
end
extend_cloud_by_scripts(cloud, logger)
return cloud
end
|
ruby
|
{
"resource": ""
}
|
q3850
|
RightScale.CloudFactory.default_cloud_name
|
train
|
def default_cloud_name
cloud_file_path = RightScale::AgentConfig.cloud_file_path
value = File.read(cloud_file_path).strip if File.file?(cloud_file_path)
value.to_s.empty? ? UNKNOWN_CLOUD_NAME : value
end
|
ruby
|
{
"resource": ""
}
|
q3851
|
RightScale.MultiThreadBundleQueue.busy?
|
train
|
def busy?
busy = false
@mutex.synchronize { busy = @thread_name_to_queue.any? { |_, q| q.busy? } }
busy
end
|
ruby
|
{
"resource": ""
}
|
q3852
|
RightScale.MultiThreadBundleQueue.push_to_thread_queue
|
train
|
def push_to_thread_queue(context)
thread_name = context.respond_to?(:thread_name) ? context.thread_name : ::RightScale::AgentConfig.default_thread_name
queue = nil
@mutex.synchronize do
queue = @thread_name_to_queue[thread_name]
unless queue
# continuation for when thread-named queue is finally closed.
queue = create_thread_queue(thread_name) { push(THREAD_QUEUE_CLOSED_BUNDLE) }
@thread_name_to_queue[thread_name] = queue
end
end
# push context to selected thread queue
queue.push(context)
# always (re)activate in case an individual thread queue died unexpectedly.
# has no effect if already active.
queue.activate
true
end
|
ruby
|
{
"resource": ""
}
|
q3853
|
RightScale.MultiThreadBundleQueue.groom_thread_queues
|
train
|
def groom_thread_queues
still_active = false
@mutex.synchronize do
@thread_name_to_queue.delete_if { |_, queue| false == queue.active? }
still_active = false == @thread_name_to_queue.empty?
end
return still_active
end
|
ruby
|
{
"resource": ""
}
|
q3854
|
RightScale.MultiThreadBundleQueue.close_thread_queues
|
train
|
def close_thread_queues
still_active = false
@mutex.synchronize do
@thread_name_to_queue.each_value do |queue|
if queue.active?
queue.close
still_active = true
end
end
end
return still_active
end
|
ruby
|
{
"resource": ""
}
|
q3855
|
RightScale.InstanceAuthClient.create_http_client
|
train
|
def create_http_client
options = {
:api_version => API_VERSION,
:open_timeout => DEFAULT_OPEN_TIMEOUT,
:request_timeout => DEFAULT_REQUEST_TIMEOUT,
:non_blocking => @non_blocking }
auth_url = URI.parse(@api_url)
auth_url.user = @token_id.to_s
auth_url.password = @token
@http_client = RightScale::BalancedHttpClient.new(auth_url.to_s, options)
end
|
ruby
|
{
"resource": ""
}
|
q3856
|
RightScale.InstanceAuthClient.get_authorized
|
train
|
def get_authorized
retries = redirects = 0
api_url = @api_url
begin
Log.info("Getting authorized via #{@api_url}")
params = {
:grant_type => "client_credentials",
:account_id => @account_id,
:r_s_version => AgentConfig.protocol_version,
:right_link_version => RightLink.version }
response = @http_client.post("/oauth2", params,
:headers => @other_headers,
:open_timeout => DEFAULT_OPEN_TIMEOUT,
:request_timeout => DEFAULT_REQUEST_TIMEOUT)
response = SerializationHelper.symbolize_keys(response)
@access_token = response[:access_token]
@expires_at = Time.now + response[:expires_in]
update_urls(response)
self.state = :authorized
@communicated_callbacks.each { |callback| callback.call } if @communicated_callbacks
rescue BalancedHttpClient::NotResponding
if (retries += 1) > MAX_RETRIES
ErrorTracker.log(self, "Exceeded maximum authorization retries (#{MAX_RETRIES})")
else
sleep(RETRY_INTERVAL)
retry
end
raise
rescue HttpExceptions::MovedPermanently, HttpExceptions::Found => e
if (redirects += 1) > MAX_REDIRECTS
ErrorTracker.log(self, "Exceeded maximum redirects (#{MAX_REDIRECTS})")
elsif redirected(e)
retry
end
@api_url = api_url
raise
rescue HttpExceptions::Unauthorized => e
self.state = :unauthorized
@access_token = nil
@expires_at = Time.now
raise Exceptions::Unauthorized.new(e.http_body, e)
end
true
rescue BalancedHttpClient::NotResponding, Exceptions::Unauthorized, CommunicationModeSwitch, Exceptions::ConnectivityFailure
raise
rescue StandardError => e
ErrorTracker.log(self, "Failed authorizing", e)
self.state = :failed
raise
end
|
ruby
|
{
"resource": ""
}
|
q3857
|
RightScale.InstanceAuthClient.renew_authorization
|
train
|
def renew_authorization(wait = nil)
wait ||= (state == :authorized) ? ((@expires_at - Time.now).to_i / RENEW_FACTOR) : 0
if @renew_timer && wait == 0
@renew_timer.cancel
@renew_timer = nil
end
unless @renew_timer
@renew_timer = EM_S::Timer.new(wait) do
@renew_timer = nil
previous_state = state
begin
get_authorized
renew_authorization
rescue Exceptions::ConnectivityFailure => e
if wait > 0
renew_authorization([(wait * RENEW_FACTOR), MAX_RENEW_TIME].min)
else
renew_authorization(MIN_RENEW_TIME)
end
rescue BalancedHttpClient::NotResponding => e
if (expires_in = (@expires_at - Time.now).to_i) > MIN_RENEW_TIME
renew_authorization(expires_in / RENEW_FACTOR)
else
self.state = :expired
reconnect
end
rescue Exceptions::Unauthorized => e
if previous_state == :unauthorized && wait > 0
renew_authorization([(wait * RENEW_FACTOR), MAX_UNAUTHORIZED_RENEW_INTERVAL].min)
else
renew_authorization(UNAUTHORIZED_RENEW_INTERVAL)
end
rescue CommunicationModeSwitch => e
ErrorTracker.log(self, "Failed authorization renewal", e, nil, :no_trace)
self.state = :failed
rescue Exception => e
ErrorTracker.log(self, "Failed authorization renewal", e)
self.state = :failed
end
end
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3858
|
RightScale.InstanceAuthClient.update_urls
|
train
|
def update_urls(response)
mode = response[:mode].to_sym
raise CommunicationModeSwitch, "RightNet communication mode switching from #{@mode.inspect} to #{mode.inspect}" if @mode && @mode != mode
@mode = mode
@shard_id = response[:shard_id].to_i
if (new_url = response[:router_url]) != @router_url
Log.info("Updating RightNet router URL to #{new_url.inspect}")
@router_url = new_url
end
if (new_url = response[:api_url]) != @api_url
Log.info("Updating RightApi URL to #{new_url.inspect}")
@api_url = new_url
create_http_client
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3859
|
RightScale.InstanceAuthClient.redirected
|
train
|
def redirected(exception)
redirected = false
location = exception.response.headers[:location]
if location.nil? || location.empty?
ErrorTracker.log(self, "Redirect exception does contain a redirect location")
else
new_url = URI.parse(location)
if new_url.scheme !~ /http/ || new_url.host.empty?
ErrorTracker.log(self, "Failed redirect because location is invalid: #{location.inspect}")
else
# Apply scheme and host from new URL to existing URL, but not path
new_url.path = URI.parse(@api_url).path
@api_url = new_url.to_s
Log.info("Updating RightApi URL to #{@api_url.inspect} due to redirect to #{location.inspect}")
@stats["state"].update("redirect")
create_http_client
redirected = true
end
end
redirected
end
|
ruby
|
{
"resource": ""
}
|
q3860
|
RightScale.InstanceAuthClient.reconnect
|
train
|
def reconnect
unless @reconnecting
@reconnecting = true
@stats["reconnects"].update("initiate")
@reconnect_timer = EM_S::PeriodicTimer.new(rand(HEALTH_CHECK_INTERVAL)) do
begin
@http_client.check_health
@stats["reconnects"].update("success")
@reconnect_timer.cancel if @reconnect_timer # only need 'if' for test purposes
@reconnect_timer = @reconnecting = nil
renew_authorization(0)
rescue BalancedHttpClient::NotResponding => e
@stats["reconnects"].update("no response")
rescue Exception => e
ErrorTracker.log(self, "Failed authorization reconnect", e)
@stats["reconnects"].update("failure")
end
@reconnect_timer.interval = HEALTH_CHECK_INTERVAL if @reconnect_timer
end
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3861
|
FutureProof.FutureQueue.push
|
train
|
def push(*values, &block)
raise_future_proof_exception if finished?
value = if block_given?
begin
block.call(*values)
rescue => e
e
end
else
values.size == 1 ? values[0] : values
end
super(value)
end
|
ruby
|
{
"resource": ""
}
|
q3862
|
RightScale.WindowsNetworkConfigurator.get_device_ip
|
train
|
def get_device_ip(device)
ip_addr = device_config_show(device).lines("\n").grep(/IP Address/).shift
return nil unless ip_addr
ip_addr.strip.split.last
end
|
ruby
|
{
"resource": ""
}
|
q3863
|
RightScale.Thunker.run
|
train
|
def run(options)
@log_sink = StringIO.new
@log = Logger.new(@log_sink)
RightScale::Log.force_logger(@log)
check_privileges if right_agent_running?
username = options.delete(:username)
email = options.delete(:email)
uuid = options.delete(:uuid)
superuser = options.delete(:superuser)
profile = options.delete(:profile)
force = options.delete(:force)
fail(1) if missing_argument(username, "USERNAME") || missing_argument(email, "EMAIL") || missing_argument(uuid, "UUID")
# Fetch some information about the client's intentions and origin
orig = ENV['SSH2_ORIGINAL_COMMAND'] || ENV['SSH_ORIGINAL_COMMAND']
client_ip = ENV['SSH_CLIENT'].split(/\s+/).first if ENV.has_key?('SSH_CLIENT')
if orig =~ SCP_COMMAND
access = :scp
elsif orig =~ SFTP_COMMAND
access = :sftp
elsif (orig != nil) && (!orig.empty?)
access = :command
else
access = :shell
end
# Create user just-in-time; idempotent if user already exists
# Note that username == chosen here, they just get used in two different contexts
username = LoginUserManager.create_user(username, uuid, superuser ? true : false) do |chosen|
puts "Creating your user profile (#{chosen}) on this machine." if (:shell == access)
end
create_audit_entry(email, username, access, orig, client_ip) if right_agent_running?
chown_tty(username)
# Note that when execing sudo we use the N-argument form of Kernel.exec,
# which does not invoke a shell, but rather directly invokes the command specified
# by argv[0] and uses argv[1..N] as the command line. This protects us against shell
# escape characters and other badness.
#
# Unfortunately, this means that file globs and other 'useful' shell escape characters
# do not get parsed.
#
# As a workaround, for non-interactive access types, we tell sudo to invoke a shell and
# use the shell's '-c' argument to specify the command to run. We also use the -H
# argument to sudo, which forces it to set HOME to the user's homedir. We attempt to
# set some other environment variables to make the user feel more at home, but we
# are at the mercy of sudo.
#
# For interactive logins, we don't need to perform any trickiness since our goal is
# simply to get the user into a shell, with no command line args to parse.
case access
when :scp, :sftp, :command
LoginUserManager.simulate_login(username)
Kernel.exec('sudo', '-H', '-u', username, '/bin/sh', '-c', "cd $HOME ; #{orig}")
when :shell
if right_agent_running?
display_motd
else
display_right_link_is_not_running_warning
end
Kernel.exec('sudo', '-i', '-u', username)
end
rescue SystemExit => e
raise e
rescue Exception => e
fail(e)
end
|
ruby
|
{
"resource": ""
}
|
q3864
|
RightScale.Thunker.create_audit_entry
|
train
|
def create_audit_entry(email, username, access, command, client_ip=nil)
begin
hostname = `hostname`.strip
rescue Exception => e
hostname = 'localhost'
end
case access
when :scp then
summary = 'SSH file copy'
detail = "User copied files copied (scp) to/from host."
when :sftp then
summary = 'SSH interactive file transfer'
detail = "User initiated an SFTP session."
when :command
summary = 'SSH command'
detail = "User invoked an interactive program."
when :shell
summary = 'SSH interactive login'
detail = "User connected and invoked a login shell."
end
detail += "\nLogin: #{username}@#{hostname}" if username
detail += "\nClient IP: #{client_ip}" if client_ip
detail += "\nCommand: #{command}" if command
log_detail = detail.gsub("\n", '; ')
Log.info("#{summary} - #{log_detail}")
options = {
:name => 'audit_create_entry',
:user_email => email,
:summary => summary,
:detail => detail,
:category => RightScale::EventCategories::CATEGORY_SECURITY
}
send_command(options, false, AUDIT_REQUEST_TIMEOUT)
true
rescue Exception => e
Log.error("#{e.class.name}:#{e.message}")
Log.error(e.backtrace.join("\n"))
false
end
|
ruby
|
{
"resource": ""
}
|
q3865
|
RightScale.Thunker.display_motd
|
train
|
def display_motd
if ::File.exists?("/var/run/motd.dynamic")
# Ubuntu 14.04+ motd location
puts ::File.read("/var/run/motd.dynamic")
elsif ::File.exists?("/var/run/motd")
# Ubuntu 12.04 motd location
puts ::File.read("/var/run/motd")
elsif ::File.exists?("/etc/motd")
# Legacy (CentOS 6 style)
puts ::File.read("/etc/motd")
end
rescue
nil
end
|
ruby
|
{
"resource": ""
}
|
q3866
|
BEL.Quoting.quote
|
train
|
def quote(value)
string = value.to_s
unquoted = unquote(string)
escaped = unquoted.gsub(QuoteNotEscapedMatcher, "\\\"")
%Q{"#{escaped}"}
end
|
ruby
|
{
"resource": ""
}
|
q3867
|
RightScale.CloudController.control
|
train
|
def control(options)
fail("No action specified on the command line.") unless (options[:action] || options[:requires_network_config])
name = options[:name]
parameters = options[:parameters] || []
only_if = options[:only_if]
verbose = options[:verbose]
# support either single or a comma-delimited list of actions to execute
# sequentially (e.g. "--action clear_state,wait_for_instance_ready,write_user_metadata")
# (because we need to split bootstrap actions up in Windows case).
actions = options[:action].to_s.split(',').inject([]) do |result, action|
unless (action = action.strip).empty?
action = action.to_sym
case action
when :bootstrap
# bootstrap is shorthand for all standard actions performed on boot
result += [:clear_state, :wait_for_instance_ready, :write_cloud_metadata, :write_user_metadata, :wait_for_eip]
only_if = true
else
result << action
end
end
result
end
cloud = CloudFactory.instance.create(name, :logger => default_logger(verbose))
actions.each do |action|
if cloud.respond_to?(action)
# Expect most methods to return ActionResult, but a cloud can expose any
# custom method so we can't assume return type
result = cloud.send(action, *parameters)
$stderr.puts result.error if result.respond_to?(:error) && result.error
$stdout.puts result.output if verbose && result.respond_to?(:output) && result.output
if result.respond_to?(:exitstatus) && (result.exitstatus != 0)
raise StandardError, "Action #{action} failed with status #{result.exitstatus}"
end
elsif only_if
next
else
raise ArgumentError, "ERROR: Unknown cloud action: #{action}"
end
end
if options[:requires_network_config]
exit(cloud.requires_network_config? ? 0 : 1)
end
end
|
ruby
|
{
"resource": ""
}
|
q3868
|
Nguyen.Fdf.to_fdf
|
train
|
def to_fdf
fdf = header
@data.each do |key, value|
if Hash === value
value.each do |sub_key, sub_value|
fdf << field("#{key}_#{sub_key}", sub_value)
end
else
fdf << field(key, value)
end
end
fdf << footer
return fdf
end
|
ruby
|
{
"resource": ""
}
|
q3869
|
Nguyen.Xfdf.to_xfdf
|
train
|
def to_xfdf
builder = Nokogiri::XML::Builder.new(encoding: 'UTF-8') do |xml|
xml.xfdf('xmlns' => 'http://ns.adobe.com/xfdf/', 'xml:space' => 'preserve') {
xml.f(href: options[:file]) if options[:file]
xml.ids(original: options[:id], modified: options[:id]) if options[:id]
xml.fields {
@fields.each do |field, value|
xml.field(name: field) {
if value.is_a? Array
value.each { |item| xml.value(item.to_s) }
else
xml.value(value.to_s)
end
}
end
}
}
end
builder.to_xml(save_with: Nokogiri::XML::Node::SaveOptions::AS_XML)
end
|
ruby
|
{
"resource": ""
}
|
q3870
|
RightScale.NetworkConfigurator.configure_routes
|
train
|
def configure_routes
# required metadata values
routes = ENV.keys.select { |k| k =~ /^RS_ROUTE(\d+)$/ }
seen_route = {}
routes.each do |route|
begin
nat_server_ip, cidr = ENV[route].strip.split(/[,:]/)
if seen_route[cidr]
seen_nat_server_ip = seen_route[cidr]
logger.warn "Already added route #{cidr} to gateway #{seen_nat_server_ip}, skipping adding it to #{nat_server_ip}"
else
seen_route[cidr] = nat_server_ip
network_route_add(cidr.to_s.strip, nat_server_ip.to_s.strip)
end
rescue Exception => e
logger.error "Detected an error while adding route to NAT #{e.class}: #{e.message}"
end
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3871
|
RightScale.NetworkConfigurator.network_route_add
|
train
|
def network_route_add(network, nat_server_ip)
raise "ERROR: invalid nat_server_ip : '#{nat_server_ip}'" unless valid_ipv4?(nat_server_ip)
raise "ERROR: invalid CIDR network : '#{network}'" unless valid_ipv4_cidr?(network)
true
end
|
ruby
|
{
"resource": ""
}
|
q3872
|
RightScale.NetworkConfigurator.network_route_exists?
|
train
|
def network_route_exists?(network, nat_server_ip)
routes = routes_show()
matchdata = routes.match(route_regex(network, nat_server_ip))
matchdata != nil
end
|
ruby
|
{
"resource": ""
}
|
q3873
|
RightScale.NetworkConfigurator.add_static_ip
|
train
|
def add_static_ip(n_ip=0)
begin
# required metadata values
ipaddr = ENV["RS_IP#{n_ip}_ADDR"]
netmask = ENV["RS_IP#{n_ip}_NETMASK"]
# optional
gateway = ENV["RS_IP#{n_ip}_GATEWAY"]
device = shell_escape_if_necessary(device_name_from_mac(ENV["RS_IP#{n_ip}_MAC"]))
if ipaddr
# configure network adaptor
attached_nameservers = nameservers_for_device(n_ip)
logger.info "Setting up static IP address '#{ipaddr}' for '#{device}'"
logger.debug "Netmask: '#{netmask}' ; Gateway: '#{gateway}'"
logger.debug "Nameservers: '#{attached_nameservers.join(' ')}'" if attached_nameservers
raise "FATAL: RS_IP#{n_ip}_NETMASK not defined ; Cannot configure static IP address" unless netmask
ip = configure_network_adaptor(device, ipaddr, netmask, gateway, attached_nameservers)
end
rescue Exception => e
logger.error "Detected an error while configuring static IP#{n_ip}: #{e.message}"
raise e
end
end
|
ruby
|
{
"resource": ""
}
|
q3874
|
RightScale.NetworkConfigurator.configure_network_adaptor
|
train
|
def configure_network_adaptor(device, ip, netmask, gateway, nameservers)
raise "ERROR: invalid IP address: '#{ip}'" unless valid_ipv4?(ip)
raise "ERROR: invalid netmask: '#{netmask}'" unless valid_ipv4?(netmask)
# gateway is optional
if gateway
raise "ERROR: invalid gateway IP address: '#{gateway}'" unless valid_ipv4?(gateway)
end
end
|
ruby
|
{
"resource": ""
}
|
q3875
|
RightScale.NetworkConfigurator.nameservers_for_device
|
train
|
def nameservers_for_device(n_ip)
nameservers = []
raw_nameservers = ENV["RS_IP#{n_ip}_NAMESERVERS"].to_s.strip.split(/[, ]+/)
raw_nameservers.each do |nameserver|
if valid_ipv4?(nameserver)
nameservers << nameserver
else
# Non-fatal error, we only need one working
logger.error("Invalid nameserver #{nameserver} for interface##{n_ip}")
end
end
# Also a non-fatal error, DHCP or another interface specify nameservers and we're still good
logger.warn("No valid nameservers specified for static interface##{n_ip}") unless nameservers.length > 0
nameservers
end
|
ruby
|
{
"resource": ""
}
|
q3876
|
RightScale.Tagger.run
|
train
|
def run(options)
fail_if_right_agent_is_not_running
check_privileges
set_logger(options)
missing_argument unless options.include?(:action)
# Don't use send_command callback as it swallows exceptions by design
res = send_command(build_cmd(options), options[:verbose], options[:timeout])
case options[:action]
when :get_tags
get_tags(res, options)
when :query_tags
query_tags(res, options)
when :add_tag
add_tag(res, options)
when :remove_tag
remove_tag(res, options)
else
write_error(res)
end
rescue SystemExit => e
raise e
rescue Exception => e
fail(e)
end
|
ruby
|
{
"resource": ""
}
|
q3877
|
RightScale.Tagger.format_output
|
train
|
def format_output(result, format)
case format
when :json
JSON.pretty_generate(result)
when :yaml
YAML.dump(result)
when :text
result = result.keys if result.respond_to?(:keys)
result.join(" ")
else
raise ArgumentError, "Unknown output format #{format}"
end
end
|
ruby
|
{
"resource": ""
}
|
q3878
|
RightScale.AuditCookStub.forward_audit
|
train
|
def forward_audit(kind, text, thread_name, options)
auditor = @auditors[thread_name]
return unless auditor
if kind == :append_output
auditor.append_output(text)
else
auditor.__send__(kind, text, options)
end
end
|
ruby
|
{
"resource": ""
}
|
q3879
|
RightScale.AuditCookStub.close
|
train
|
def close(thread_name)
close_callback = @close_callbacks[thread_name]
close_callback.call if close_callback
true
ensure
@auditors[thread_name] = nil
@close_callbacks[thread_name] = nil
end
|
ruby
|
{
"resource": ""
}
|
q3880
|
RightScale.ReposeDownloader.download
|
train
|
def download(resource)
client = get_http_client
@size = 0
@speed = 0
@sanitized_resource = sanitize_resource(resource)
resource = parse_resource(resource)
attempts = 0
begin
balancer.request do |endpoint|
RightSupport::Net::SSL.with_expected_hostname(ips[endpoint]) do
logger.info("Requesting '#{sanitized_resource}' from '#{endpoint}'")
attempts += 1
t0 = Time.now
# Previously we accessed RestClient directly and used it's wrapper method to instantiate
# a RestClient::Request object. This wrapper was not passing all options down the stack
# so now we invoke the RestClient::Request object directly, passing it our desired options
client.execute(:method => :get, :url => "https://#{endpoint}:443#{resource}", :timeout => calculate_timeout(attempts),
:verify_ssl => OpenSSL::SSL::VERIFY_PEER, :ssl_ca_file => get_ca_file,
:ssl_version => RightSupport::Net::HTTPClient::DEFAULT_OPTIONS[:ssl_version],
:headers => {:user_agent => "RightLink v#{AgentConfig.protocol_version}",
'X-RightLink-Version' => RightLink.version }) do |response, request, result|
if result.kind_of?(Net::HTTPSuccess)
@size = result.content_length || response.size || 0
@speed = @size / (Time.now - t0)
yield response
else
response.return!(request, result)
end
end
end
end
rescue Exception => e
list = parse_exception_message(e)
message = list.join(", ")
logger.error("Request '#{sanitized_resource}' failed - #{message}")
raise ConnectionException, message unless (list & CONNECTION_EXCEPTIONS).empty?
raise DownloadException, message
end
end
|
ruby
|
{
"resource": ""
}
|
q3881
|
RightScale.ReposeDownloader.resolve
|
train
|
def resolve(hostnames)
ips = {}
hostnames.each do |hostname|
infos = nil
attempts = RETRY_MAX_ATTEMPTS
begin
infos = Socket.getaddrinfo(hostname, 443, Socket::AF_INET, Socket::SOCK_STREAM, Socket::IPPROTO_TCP)
rescue Exception => e
if attempts > 0
attempts -= 1
retry
else
logger.error "Failed to resolve hostnames (#{e.class.name}: #{e.message})"
raise e
end
end
# Randomly permute the addrinfos of each hostname to help spread load.
infos.shuffle.each do |info|
ip = info[3]
ips[ip] = hostname
end
end
ips
end
|
ruby
|
{
"resource": ""
}
|
q3882
|
RightScale.ReposeDownloader.parse_exception_message
|
train
|
def parse_exception_message(e)
if e.kind_of?(RightSupport::Net::NoResult)
# Expected format of exception message: "... endpoints: ('<ip address>' => <exception class name array>, ...)""
i = 0
e.message.split(/\[|\]/).select {((i += 1) % 2) == 0 }.map { |s| s.split(/,\s*/) }.flatten
else
[e.class.name]
end
end
|
ruby
|
{
"resource": ""
}
|
q3883
|
RightScale.ReposeDownloader.hostnames_ips
|
train
|
def hostnames_ips
@hostnames.map do |hostname|
ips.select { |ip, host| host == hostname }.keys
end.flatten
end
|
ruby
|
{
"resource": ""
}
|
q3884
|
RightScale.ReposeDownloader.balancer
|
train
|
def balancer
@balancer ||= RightSupport::Net::RequestBalancer.new(
hostnames_ips,
:policy => RightSupport::Net::LB::Sticky,
:retry => RETRY_MAX_ATTEMPTS,
:fatal => lambda do |e|
if RightSupport::Net::RequestBalancer::DEFAULT_FATAL_EXCEPTIONS.any? { |c| e.is_a?(c) }
true
elsif e.respond_to?(:http_code) && (e.http_code != nil)
(e.http_code >= 400 && e.http_code < 500) && (e.http_code != 408 && e.http_code != 500 )
else
false
end
end
)
end
|
ruby
|
{
"resource": ""
}
|
q3885
|
Verse.Padding.pad
|
train
|
def pad(padding = (not_set = true), options = {})
return text if @padding.empty? && not_set
if !not_set
@padding = Padder.parse(padding)
end
text_copy = text.dup
column_width = maximum_length(text)
elements = []
if @padding.top > 0
elements << (SPACE * column_width + NEWLINE) * @padding.top
end
elements << text_copy
if @padding.bottom > 0
elements << (SPACE * column_width + NEWLINE) * @padding.bottom
end
elements.map { |el| pad_multi_line(el) }.join(NEWLINE)
end
|
ruby
|
{
"resource": ""
}
|
q3886
|
RightScale.SingleThreadBundleQueue.create_sequence
|
train
|
def create_sequence(context)
pid_callback = lambda do |sequence|
@mutex.synchronize { @pid = sequence.pid }
end
return RightScale::ExecutableSequenceProxy.new(context, :pid_callback => pid_callback )
end
|
ruby
|
{
"resource": ""
}
|
q3887
|
RightScale.SingleThreadBundleQueue.audit_status
|
train
|
def audit_status(sequence)
context = sequence.context
title = context.decommission? ? 'decommission ' : ''
title += context.succeeded ? 'completed' : 'failed'
context.audit.update_status("#{title}: #{context.payload}")
true
rescue Exception => e
Log.error(Log.format("SingleThreadBundleQueue.audit_status failed for #{@thread_name} thread", e, :trace))
ensure
# release queue thread to wait on next bundle in queue. we must ensure
# that we are not currently on the queue thread so next-tick the signal.
EM.next_tick { @mutex.synchronize { @sequence_finished.signal } }
end
|
ruby
|
{
"resource": ""
}
|
q3888
|
RightScale.FetchRunner.setup_log
|
train
|
def setup_log(type=:memory)
case type
when :memory
@log_content = StringIO.new
@logger = Logger.new(@log_content)
else
unless defined?(@@log_file_base_name)
@@log_file_base_name = File.normalize_path(File.join(Dir.tmpdir, "#{File.basename(__FILE__, '.rb')}_#{Time.now.strftime("%Y-%m-%d-%H%M%S")}"))
@@log_file_index = 0
end
@@log_file_index += 1
@log_file_name = "#{@@log_file_base_name}_#{@@log_file_index}.log"
@log_file = File.open(@log_file_name, 'w')
@logger = Logger.new(@log_file)
end
@logger.level = is_debug? ? Logger::DEBUG : Logger::INFO
return @logger
end
|
ruby
|
{
"resource": ""
}
|
q3889
|
RightScale.FetchRunner.run_fetcher
|
train
|
def run_fetcher(*args, &block)
server = nil
done = false
last_exception = nil
results = []
EM.run do
begin
server = MockHTTPServer.new({:Logger => @logger}, &block)
EM.defer do
begin
args.each do |source|
results << source.call()
end
rescue Exception => e
last_exception = e
end
done = true
end
timer = EM.add_periodic_timer(0.1) do
if done
timer.cancel
timer = nil
EM.next_tick do
EM.stop
end
end
end
EM.add_timer(FETCH_TEST_TIMEOUT_SECS) { @logger.error("timeout"); raise "timeout" }
rescue Exception => e
last_exception = e
end
end
# stop server, if any.
(server.shutdown rescue nil) if server
# reraise with full backtrace for debugging purposes. this assumes the
# exception class accepts a single string on construction.
if last_exception
message = "#{last_exception.message}\n#{last_exception.backtrace.join("\n")}"
if last_exception.class == ArgumentError
raise ArgumentError, message
else
begin
raise last_exception.class, message
rescue ArgumentError
# exception class does not support single string construction.
message = "#{last_exception.class}: #{message}"
raise message
end
end
end
return 1 == results.size ? results[0] : results
end
|
ruby
|
{
"resource": ""
}
|
q3890
|
Mago.SexpProcessor.process_lit
|
train
|
def process_lit(exp)
exp.shift
value = exp.shift
if value.is_a?(Numeric) && [email protected]?(value)
@file.magic_numbers << MagicNumber.new(:value => value, :line => exp.line)
end
s()
end
|
ruby
|
{
"resource": ""
}
|
q3891
|
BEL::JSON.Implementation.write
|
train
|
def write(data, output_io, options = {})
options = {
:mode => :compat
}.merge!(options)
if output_io
# write json and return IO
Oj.to_stream(output_io, data, options)
output_io
else
# return json string
string_io = StringIO.new
Oj.to_stream(string_io, data, options)
string_io.string
end
end
|
ruby
|
{
"resource": ""
}
|
q3892
|
RightScale.ExecutableSequence.sensitive_inputs
|
train
|
def sensitive_inputs
inputs = {}
if @attributes
@attributes.values.select { |attr| attr.respond_to?(:has_key?) && attr.has_key?("parameters") }.each do |has_params|
has_params.each_pair do |_, params|
sensitive = params.select { |name, _| @sensitive_inputs.include?(name) }
inputs.merge!(sensitive) { |key, old, new| [old].flatten.push(new) }
end
end
end
inputs
end
|
ruby
|
{
"resource": ""
}
|
q3893
|
RightScale.ExecutableSequence.configure_logging
|
train
|
def configure_logging
Chef::Log.logger = AuditLogger.new(sensitive_inputs)
Chef::Log.logger.level = Log.level_from_sym(Log.level)
end
|
ruby
|
{
"resource": ""
}
|
q3894
|
RightScale.ExecutableSequence.configure_chef
|
train
|
def configure_chef
# setup logger for mixlib-shellout gem to consume instead of the chef
# v0.10.10 behavior of not logging ShellOut calls by default. also setup
# command failure exception and callback for legacy reasons.
::Mixlib::ShellOut.default_logger = ::Chef::Log
::Mixlib::ShellOut.command_failure_callback = lambda do |params|
failure_reason = ::RightScale::SubprocessFormatting.reason(params[:status])
expected_error_codes = Array(params[:args][:returns]).join(' or ')
::RightScale::Exceptions::Exec.new("\"#{params[:args][:command]}\" #{failure_reason}, expected #{expected_error_codes}.",
params[:args][:cwd])
end
# Chef run mode is always solo for cook
Chef::Config[:solo] = true
# determine default cookbooks path. If debugging cookbooks, place the debug pat(s) first, otherwise
# clear out the list as it will be filled out with cookbooks needed for this converge as they are downloaded.
if CookState.use_cookbooks_path?
Chef::Config[:cookbook_path] = [CookState.cookbooks_path].flatten
@audit.append_info("Using development cookbooks repositories path:\n\t- #{Chef::Config[:cookbook_path].join("\n\t- ")}")
else
# reset the cookbook path. Will be filled out with cookbooks needed for this execution
Chef::Config[:cookbook_path] = []
end
# add the rightscript cookbook if there are rightscripts in this converge
Chef::Config[:cookbook_path] << @right_scripts_cookbook.repo_dir unless @right_scripts_cookbook.empty?
# must set file cache path and ensure it exists otherwise evented run_command will fail
file_cache_path = File.join(AgentConfig.cache_dir, 'chef')
Chef::Config[:file_cache_path] = file_cache_path
FileUtils.mkdir_p(Chef::Config[:file_cache_path])
Chef::Config[:cache_options][:path] = File.join(file_cache_path, 'checksums')
FileUtils.mkdir_p(Chef::Config[:cache_options][:path])
# Where backups of chef-managed files should go. Set to nil to backup to the same directory the file being backed up is in.
Chef::Config[:file_backup_path] = nil
# Chef 11+ defaults client_fork to true which cause Chef::Client to fork
# This create problems with right_popen - right_popen expects to be used inside running EM reactor
# EM seems not to play well with forking
Chef::Config[:client_fork] = false
# Chef 11+ allow concurrent execution of the recipes in different theads,
# by setting different lockfile per thread.
Chef::Config[:lockfile] = File.join(Chef::Config[:file_cache_path], "chef-client-#{@thread_name}-running.pid")
true
end
|
ruby
|
{
"resource": ""
}
|
q3895
|
RightScale.ExecutableSequence.update_cookbook_path
|
train
|
def update_cookbook_path
# both cookbook sequences and paths are listed in same order as
# presented in repo UI. previous to RL v5.7 we received cookbook sequences
# in an arbitrary order, but this has been fixed as of the release of v5.8
# (we will not change the order for v5.7-).
# for chef to execute repos and paths in the order listed, both of these
# ordered lists need to be inserted in reverse order because the chef code
# replaces cookbook paths as it reads the array from beginning to end.
@cookbooks.reverse.each do |cookbook_sequence|
local_basedir = File.join(@download_path, cookbook_sequence.hash)
cookbook_sequence.paths.reverse.each do |path|
dir = File.expand_path(File.join(local_basedir, path))
unless Chef::Config[:cookbook_path].include?(dir)
if File.directory?(dir)
Chef::Config[:cookbook_path] << dir
else
RightScale::Log.info("Excluding #{path} from chef cookbooks_path because it was not downloaded")
end
end
end
end
RightScale::Log.info("Updated cookbook_path to: #{Chef::Config[:cookbook_path].join(", ")}")
true
end
|
ruby
|
{
"resource": ""
}
|
q3896
|
RightScale.ExecutableSequence.checkout_cookbook_repos
|
train
|
def checkout_cookbook_repos
return true unless @cookbook_repo_retriever.has_cookbooks?
@audit.create_new_section('Checking out cookbooks for development')
@audit.append_info("Cookbook repositories will be checked out to #{@cookbook_repo_retriever.checkout_root}")
audit_time do
# only create a scraper if there are dev cookbooks
@cookbook_repo_retriever.checkout_cookbook_repos do |state, operation, explanation, exception|
# audit progress
case state
when :begin
@audit.append_info("start #{operation} #{explanation}") if AUDIT_BEGIN_OPERATIONS.include?(operation)
when :commit
@audit.append_info("finish #{operation} #{explanation}") if AUDIT_COMMIT_OPERATIONS.include?(operation)
when :abort
@audit.append_error("Failed #{operation} #{explanation}")
Log.error(Log.format("Failed #{operation} #{explanation}", exception, :trace))
end
end
end
end
|
ruby
|
{
"resource": ""
}
|
q3897
|
RightScale.ExecutableSequence.check_ohai
|
train
|
def check_ohai(&block)
ohai = create_ohai
if ohai[:hostname]
block.call(ohai)
else
Log.warning("Could not determine node name from Ohai, will retry in #{@ohai_retry_delay}s...")
# Need to execute on defer thread consistent with where ExecutableSequence is running
# otherwise EM main thread command client activity will block
EM.add_timer(@ohai_retry_delay) { EM.defer { check_ohai(&block) } }
@ohai_retry_delay = [2 * @ohai_retry_delay, OHAI_RETRY_MAX_DELAY].min
end
true
end
|
ruby
|
{
"resource": ""
}
|
q3898
|
RightScale.ExecutableSequence.create_ohai
|
train
|
def create_ohai
ohai = Ohai::System.new
ohai.require_plugin('os')
ohai.require_plugin('hostname')
return ohai
end
|
ruby
|
{
"resource": ""
}
|
q3899
|
RightScale.ExecutableSequence.report_success
|
train
|
def report_success(node)
ChefState.merge_attributes(node.normal_attrs) if node
remove_right_script_params_from_chef_state
patch = ::RightSupport::Data::HashTools.deep_create_patch(@inputs, ChefState.attributes)
# We don't want to send back new attributes (ohai etc.)
patch[:right_only] = { }
@inputs_patch = patch
EM.next_tick { succeed }
true
end
|
ruby
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.