_id
stringlengths 2
6
| title
stringlengths 9
130
| partition
stringclasses 3
values | text
stringlengths 66
10.5k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q2100
|
RRSchedule.Schedule.face_to_face
|
train
|
def face_to_face(team_a,team_b)
res=[]
self.gamedays.each do |gd|
res << gd.games.select {|g| (g.team_a == team_a && g.team_b == team_b) || (g.team_a == team_b && g.team_b == team_a)}
end
res.flatten
end
|
ruby
|
{
"resource": ""
}
|
q2101
|
RRSchedule.Rule.gt=
|
train
|
def gt=(gt)
@gt = Array(gt).empty? ? ["7:00 PM"] : Array(gt)
@gt.collect! do |gt|
begin
DateTime.parse(gt)
rescue
raise "game times must be valid time representations in the string form (e.g. 3:00 PM, 11:00 AM, 18:20, etc)"
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2102
|
GnuplotRB.Terminal.options_hash_to_string
|
train
|
def options_hash_to_string(options)
result = ''
options.sort_by { |key, _| OPTION_ORDER.find_index(key) || -1 }.each do |key, value|
if value
result += "set #{OptionHandling.option_to_string(key, value)}\n"
else
result += "unset #{key}\n"
end
end
result
end
|
ruby
|
{
"resource": ""
}
|
q2103
|
Proj4.Projection.forwardDeg!
|
train
|
def forwardDeg!(point)
point.x *= Proj4::DEG_TO_RAD
point.y *= Proj4::DEG_TO_RAD
forward!(point)
end
|
ruby
|
{
"resource": ""
}
|
q2104
|
Proj4.Projection.inverseDeg!
|
train
|
def inverseDeg!(point)
inverse!(point)
point.x *= Proj4::RAD_TO_DEG
point.y *= Proj4::RAD_TO_DEG
point
end
|
ruby
|
{
"resource": ""
}
|
q2105
|
Proj4.Projection.transform_all!
|
train
|
def transform_all!(otherProjection, collection)
collection.each do |point|
transform!(otherProjection, point)
end
collection
end
|
ruby
|
{
"resource": ""
}
|
q2106
|
Faraday.Sunset.call
|
train
|
def call(env)
@app.call(env).on_complete do |response_env|
datetime = sunset_header(response_env.response_headers)
report_deprecated_usage(env, datetime) unless datetime.nil?
end
end
|
ruby
|
{
"resource": ""
}
|
q2107
|
GnuplotRB.Dataset.update
|
train
|
def update(data = nil, **options)
if data && @type == :datablock
new_datablock = @data.update(data)
if new_datablock == @data
update_options(options)
else
self.class.new(new_datablock, options)
end
else
update_options(options)
end
end
|
ruby
|
{
"resource": ""
}
|
q2108
|
GnuplotRB.Dataset.options_to_string
|
train
|
def options_to_string
options.sort_by { |key, _| OPTION_ORDER.find_index(key.to_s) || 999 }
.map { |key, value| OptionHandling.option_to_string(key, value) }
.join(' ')
end
|
ruby
|
{
"resource": ""
}
|
q2109
|
GnuplotRB.Dataset.init_string
|
train
|
def init_string(data, options)
@type, @data = File.exist?(data) ? [:datafile, "'#{data}'"] : [:math_function, data.clone]
@options = Hamster.hash(options)
end
|
ruby
|
{
"resource": ""
}
|
q2110
|
GnuplotRB.Dataset.get_daru_columns
|
train
|
def get_daru_columns(data, cnt)
new_opt = (2..cnt).to_a.join(':')
if data.index[0].is_a?(DateTime) || data.index[0].is_a?(Numeric)
"1:#{new_opt}"
else
"#{new_opt}:xtic(1)"
end
end
|
ruby
|
{
"resource": ""
}
|
q2111
|
GnuplotRB.Plot.provide_with_datetime_format
|
train
|
def provide_with_datetime_format(data, using)
return unless defined?(Daru)
return unless data.is_a?(Daru::DataFrame) || data.is_a?(Daru::Vector)
return unless data.index.first.is_a?(DateTime)
return if using[0..1] != '1:'
@options = Hamster::Hash.new(
xdata: 'time',
timefmt: '%Y-%m-%dT%H:%M:%S',
format_x: '%d\n%b\n%Y'
).merge(@options)
end
|
ruby
|
{
"resource": ""
}
|
q2112
|
GnuplotRB.Plot.dataset_from_any
|
train
|
def dataset_from_any(source)
ds = case source
# when initialized with dataframe (it passes here several vectors)
when (defined?(Daru) ? Daru::Vector : nil)
Dataset.new(source)
when Dataset
source.clone
else
Dataset.new(*source)
end
data = source.is_a?(Array) ? source[0] : source
provide_with_datetime_format(data, ds.using)
ds
end
|
ruby
|
{
"resource": ""
}
|
q2113
|
EventMachine.HttpResponse.send_headers
|
train
|
def send_headers
raise "sent headers already" if @sent_headers
@sent_headers = true
fixup_headers
ary = []
ary << "HTTP/1.1 #{@status || 200} #{@status_string || '...'}\r\n"
ary += generate_header_lines(@headers)
ary << "\r\n"
send_data ary.join
end
|
ruby
|
{
"resource": ""
}
|
q2114
|
Rest.BaseResponseWrapper.headers
|
train
|
def headers
new_h = {}
headers_orig.each_pair do |k, v|
if v.is_a?(Array) && v.size == 1
v = v[0]
end
new_h[k.downcase] = v
end
new_h
end
|
ruby
|
{
"resource": ""
}
|
q2115
|
GnuplotRB.Animation.plot
|
train
|
def plot(path = nil, **options)
options[:output] ||= path
plot_options = mix_options(options) do |plot_opts, anim_opts|
plot_opts.merge(term: ['gif', anim_opts])
end.to_h
need_output = plot_options[:output].nil?
plot_options[:output] = Dir::Tmpname.make_tmpname('anim', 0) if need_output
terminal = Terminal.new
multiplot(terminal, plot_options)
# guaranteed wait for plotting to finish
terminal.close
if need_output
result = File.binread(plot_options[:output])
File.delete(plot_options[:output])
else
result = nil
end
result
end
|
ruby
|
{
"resource": ""
}
|
q2116
|
GnuplotRB.Animation.specific_keys
|
train
|
def specific_keys
%w(
animate
size
background
transparent
enhanced
rounded
butt
linewidth
dashlength
tiny
small
medium
large
giant
font
fontscale
crop
)
end
|
ruby
|
{
"resource": ""
}
|
q2117
|
GnuplotRB.Fit.fit
|
train
|
def fit(data, function: 'a2*x*x+a1*x+a0', initials: { a2: 1, a1: 1, a0: 1 }, term_options: {}, **options)
dataset = data.is_a?(Dataset) ? Dataset.new(data.data) : Dataset.new(data)
opts_str = OptionHandling.ruby_class_to_gnuplot(options)
output = gnuplot_fit(function, dataset, opts_str, initials, term_options)
res = parse_output(initials.keys, function, output)
{
formula_ds: Dataset.new(res[2], title: 'Fit formula'),
coefficients: res[0],
deltas: res[1],
data: dataset
}
end
|
ruby
|
{
"resource": ""
}
|
q2118
|
GnuplotRB.Fit.wait_for_output
|
train
|
def wait_for_output(term, variables)
# now we should catch 'error' from terminal: it will contain approximation data
# but we can get a real error instead of output, so lets wait for limited time
start = Time.now
output = ''
until output_ready?(output, variables)
begin
term.check_errors(raw: true)
rescue GnuplotRB::GnuplotError => e
output += e.message
end
if Time.now - start > Settings.max_fit_delay
fail GnuplotError, "Seems like there is an error in gnuplotrb: #{output}"
end
end
output
end
|
ruby
|
{
"resource": ""
}
|
q2119
|
GnuplotRB.Fit.gnuplot_fit
|
train
|
def gnuplot_fit(function, data, options, initials, term_options)
variables = initials.keys
term = Terminal.new
term.set(term_options)
initials.each { |var_name, value| term.stream_puts "#{var_name} = #{value}" }
command = "fit #{function} #{data.to_s(term, without_options: true)} " \
"#{options} via #{variables.join(',')}"
term.stream_puts(command)
output = wait_for_output(term, variables)
begin
term.close
rescue GnuplotError
# Nothing interesting here.
# If we had an error, we never reach this line.
# Error here may be only additional information
# such as correlation matrix.
end
output
end
|
ruby
|
{
"resource": ""
}
|
q2120
|
ScatterSwap.Hasher.swapper_map
|
train
|
def swapper_map(index)
array = (0..9).to_a
10.times.collect.with_index do |i|
array.rotate!(index + i ^ spin).pop
end
end
|
ruby
|
{
"resource": ""
}
|
q2121
|
ScatterSwap.Hasher.unscatter
|
train
|
def unscatter
scattered_array = @working_array
sum_of_digits = scattered_array.inject(:+).to_i
@working_array = []
@working_array.tap do |unscatter|
10.times do
unscatter.push scattered_array.pop
unscatter.rotate! (sum_of_digits ^ spin) * -1
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2122
|
Awrence.Methods.to_camelback_keys
|
train
|
def to_camelback_keys(value = self)
case value
when Array
value.map { |v| to_camelback_keys(v) }
when Hash
Hash[value.map { |k, v| [camelize_key(k, false), to_camelback_keys(v)] }]
else
value
end
end
|
ruby
|
{
"resource": ""
}
|
q2123
|
Awrence.Methods.to_camel_keys
|
train
|
def to_camel_keys(value = self)
case value
when Array
value.map { |v| to_camel_keys(v) }
when Hash
Hash[value.map { |k, v| [camelize_key(k), to_camel_keys(v)] }]
else
value
end
end
|
ruby
|
{
"resource": ""
}
|
q2124
|
SimpleStateMachine.StateMachine.next_state
|
train
|
def next_state(event_name)
transition = transitions.select{|t| t.is_transition_for?(event_name, @subject.send(state_method))}.first
transition ? transition.to : nil
end
|
ruby
|
{
"resource": ""
}
|
q2125
|
SimpleStateMachine.StateMachine.error_state
|
train
|
def error_state(event_name, error)
transition = transitions.select{|t| t.is_error_transition_for?(event_name, error) }.first
transition ? transition.to : nil
end
|
ruby
|
{
"resource": ""
}
|
q2126
|
SimpleStateMachine.StateMachine.transition
|
train
|
def transition(event_name)
clear_raised_error
if to = next_state(event_name)
begin
result = yield
rescue => e
error_state = error_state(event_name, e) ||
state_machine_definition.default_error_state
if error_state
@raised_error = e
@subject.send("#{state_method}=", error_state)
return result
else
raise
end
end
# TODO refactor out to AR module
if defined?(::ActiveRecord) && @subject.is_a?(::ActiveRecord::Base)
if @subject.errors.entries.empty?
@subject.send("#{state_method}=", to)
return true
else
return false
end
else
@subject.send("#{state_method}=", to)
return result
end
else
illegal_event_callback event_name
end
end
|
ruby
|
{
"resource": ""
}
|
q2127
|
Inum.ActiveRecordMixin.bind_inum
|
train
|
def bind_inum(column, enum_class, options = {})
options = { prefix: column }.merge(options)
options[:prefix] = options[:prefix] ? "#{options[:prefix]}_" : ''
self.class_eval do
define_method(column) do
enum_class.parse(read_attribute(column))
end
define_method("#{column}=") do |value|
enum_class.parse(value).tap do |enum|
if enum
write_attribute(column, enum.to_i)
else
write_attribute(column, nil)
end
end
end
enum_class.each do |enum|
define_method("#{options[:prefix]}#{enum.to_s.underscore}?") do
enum.eql?(read_attribute(column))
end
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2128
|
I3Ipc.Protocol.receive
|
train
|
def receive(type = nil)
check_connected
# length of "i3-ipc" + 4 bytes length + 4 bytes type
data = @socket.read 14
magic, len, recv_type = unpack_header(data)
raise WrongMagicString.new(magic) unless MAGIC_STRING.eql? magic
type && (raise WrongType.new(type, recv_type) unless type == recv_type)
@socket.read(len)
end
|
ruby
|
{
"resource": ""
}
|
q2129
|
SimpleStateMachine.Transition.is_error_transition_for?
|
train
|
def is_error_transition_for?(event_name, error)
is_same_event?(event_name) && from.is_a?(Class) && error.is_a?(from)
end
|
ruby
|
{
"resource": ""
}
|
q2130
|
ICU.Normalizer.normalize
|
train
|
def normalize(input)
input_length = input.jlength
in_ptr = UCharPointer.from_string(input)
needed_length = capacity = 0
out_ptr = UCharPointer.new(needed_length)
retried = false
begin
Lib.check_error do |error|
needed_length = Lib.unorm2_normalize(@instance, in_ptr, input_length, out_ptr, capacity, error)
end
rescue BufferOverflowError
raise BufferOverflowError, "needed: #{needed_length}" if retried
capacity = needed_length
out_ptr = out_ptr.resized_to needed_length
retried = true
retry
end
out_ptr.string
end
|
ruby
|
{
"resource": ""
}
|
q2131
|
ValidatesFormattingOf.ModelAdditions.validates_formatting_of
|
train
|
def validates_formatting_of(attribute, options = {})
validation = Method.find(attribute, options)
options.reverse_merge!(:with => validation.regex, :message => validation.message)
self.validates_format_of(attribute, options)
end
|
ruby
|
{
"resource": ""
}
|
q2132
|
Sidekiq::Statsd.ServerMiddleware.call
|
train
|
def call worker, msg, queue
@statsd.batch do |b|
begin
# colon causes invalid metric names
worker_name = worker.class.name.gsub('::', '.')
b.time prefix(worker_name, 'processing_time') do
yield
end
b.increment prefix(worker_name, 'success')
rescue => e
b.increment prefix(worker_name, 'failure')
raise e
ensure
if @options[:sidekiq_stats]
# Queue sizes
b.gauge prefix('enqueued'), @sidekiq_stats.enqueued
if @sidekiq_stats.respond_to?(:retry_size)
# 2.6.0 doesn't have `retry_size`
b.gauge prefix('retry_set_size'), @sidekiq_stats.retry_size
end
# All-time counts
b.gauge prefix('processed'), @sidekiq_stats.processed
b.gauge prefix('failed'), @sidekiq_stats.failed
end
# Queue metrics
queue_name = msg['queue']
sidekiq_queue = Sidekiq::Queue.new(queue_name)
b.gauge prefix('queues', queue_name, 'enqueued'), sidekiq_queue.size
if sidekiq_queue.respond_to?(:latency)
b.gauge prefix('queues', queue_name, 'latency'), sidekiq_queue.latency
end
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2133
|
AutoIt.Control.control_command_select_string
|
train
|
def control_command_select_string(title, text, control, string)
command 'ControlCommand', [title, text, control, 'SelectString', string]
end
|
ruby
|
{
"resource": ""
}
|
q2134
|
AutoIt.Control.control_set_text
|
train
|
def control_set_text(title, text, control, value)
command_validate'ControlSetText', [title, text, control, value]
end
|
ruby
|
{
"resource": ""
}
|
q2135
|
AutoIt.Control.control_click
|
train
|
def control_click(title, text, control, button, clicks, x, y)
command_validate('ControlClick', [title, text, control, button, clicks, x, y])
end
|
ruby
|
{
"resource": ""
}
|
q2136
|
AutoIt.Control.control_command_set_current_selection
|
train
|
def control_command_set_current_selection(title, text, control, occurrance)
command('ControlCommand', [title, text, control, 'SetCurrentSelection', occurrance])
end
|
ruby
|
{
"resource": ""
}
|
q2137
|
HtmlPress.Html.process_ie_conditional_comments
|
train
|
def process_ie_conditional_comments (out)
out.gsub /(<!--\[[^\]]+\]>([\s\S]*?)<!\[[^\]]+\]-->)\s*/ do
m = $1
comment = $2
comment_compressed = Html.new.press(comment)
m.gsub!(comment, comment_compressed)
reserve m
end
end
|
ruby
|
{
"resource": ""
}
|
q2138
|
HtmlPress.Html.process_pres
|
train
|
def process_pres (out)
out.gsub /(<pre\b[^>]*?>([\s\S]*?)<\/pre>)\s*/i do
pre = $2
m = $1
pre_compressed = pre.lines.map{ |l| l.gsub(/\s+$/, '') }.join("\n")
pre_compressed = HtmlPress.entities_compressor pre_compressed
m.gsub!(pre, pre_compressed)
reserve m
end
end
|
ruby
|
{
"resource": ""
}
|
q2139
|
HtmlPress.Html.process_block_elements
|
train
|
def process_block_elements (out)
re = '\\s+(<\\/?(?:area|base(?:font)?|blockquote|body' +
'|caption|center|cite|col(?:group)?|dd|dir|div|dl|dt|fieldset|form' +
'|frame(?:set)?|h[1-6]|head|hr|html|legend|li|link|map|menu|meta' +
'|ol|opt(?:group|ion)|p|param|t(?:able|body|head|d|h|r|foot|itle)' +
'|ul)\\b[^>]*>)'
re = Regexp.new(re)
out.gsub!(re, '\\1')
# remove whitespaces outside of all elements
out.gsub! />([^<]+)</ do |m|
m.gsub(/^\s+|\s+$/, ' ')
end
out
end
|
ruby
|
{
"resource": ""
}
|
q2140
|
OpenAssets.Util.address_to_oa_address
|
train
|
def address_to_oa_address(btc_address)
begin
btc_hex = decode_base58(btc_address)
btc_hex = '0' +btc_hex if btc_hex.size==47
address = btc_hex[0..-9] # bitcoin address without checksum
named_addr = OA_NAMESPACE.to_s(16) + address
oa_checksum = checksum(named_addr)
encode_base58(named_addr + oa_checksum)
rescue ArgumentError
nil # bech32 format fails to decode. TODO define OA address for segwit
end
end
|
ruby
|
{
"resource": ""
}
|
q2141
|
OpenAssets.Util.oa_address_to_address
|
train
|
def oa_address_to_address(oa_address)
decode_address = decode_base58(oa_address)
btc_addr = decode_address[2..-9]
btc_checksum = checksum(btc_addr)
encode_base58(btc_addr + btc_checksum)
end
|
ruby
|
{
"resource": ""
}
|
q2142
|
OpenAssets.Util.valid_asset_id?
|
train
|
def valid_asset_id?(asset_id)
return false if asset_id.nil? || asset_id.length != 34
decoded = decode_base58(asset_id)
return false if decoded[0,2].to_i(16) != oa_version_byte
p2pkh_script_hash = decoded[2..-9]
address = hash160_to_address(p2pkh_script_hash)
valid_address?(address)
end
|
ruby
|
{
"resource": ""
}
|
q2143
|
OpenAssets.Util.read_var_integer
|
train
|
def read_var_integer(data, offset = 0)
raise ArgumentError, "data is nil." unless data
packed = [data].pack('H*')
return [nil, 0] if packed.bytesize < 1+offset
bytes = packed.bytes[offset..(offset + 9)] # 9 is variable integer max storage length.
first_byte = bytes[0]
if first_byte < 0xfd
[first_byte, offset + 1]
elsif first_byte == 0xfd
[calc_var_integer_val(bytes[1..2]), offset + 3]
elsif first_byte == 0xfe
[calc_var_integer_val(bytes[1..4]), offset + 5]
elsif first_byte == 0xff
[calc_var_integer_val(bytes[1..8]), offset + 9]
end
end
|
ruby
|
{
"resource": ""
}
|
q2144
|
OpenAssets.Util.read_leb128
|
train
|
def read_leb128(data, offset = 0)
bytes = [data].pack('H*').bytes
result = 0
shift = 0
while true
return [nil, offset] if bytes.length < 1 + offset
byte = bytes[offset..(offset + 1)][0]
result |= (byte & 0x7f) << shift
break if byte & 0x80 == 0
shift += 7
offset += 1
end
[result, offset + 1]
end
|
ruby
|
{
"resource": ""
}
|
q2145
|
ARPScan.ScanReport.to_array
|
train
|
def to_array
self.instance_variables.map do |var|
if var == :@hosts
self.instance_variable_get(var).map {|host| host.to_array}
else
self.instance_variable_get(var)
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2146
|
Telebot.Client.get_updates
|
train
|
def get_updates(offset: nil, limit: nil, timeout: nil)
result = call(:getUpdates, offset: offset, limit: limit, timeout: timeout)
result.map { |update_hash| Update.new(update_hash) }
end
|
ruby
|
{
"resource": ""
}
|
q2147
|
Telebot.Client.send_message
|
train
|
def send_message(chat_id:, text:, disable_web_page_preview: false, reply_to_message_id: nil, reply_markup: nil, parse_mode: nil)
result = call(:sendMessage,
chat_id: chat_id,
text: text,
disable_web_page_preview: disable_web_page_preview,
reply_to_message_id: reply_to_message_id,
reply_markup: reply_markup,
parse_mode: parse_mode
)
Message.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2148
|
Telebot.Client.forward_message
|
train
|
def forward_message(chat_id:, from_chat_id:, message_id:)
result = call(:forwardMessage, chat_id: chat_id, from_chat_id: from_chat_id, message_id: message_id)
Message.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2149
|
Telebot.Client.send_photo
|
train
|
def send_photo(chat_id:, photo:, caption: nil, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendPhoto, chat_id: chat_id, photo: photo, caption: caption, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2150
|
Telebot.Client.send_document
|
train
|
def send_document(chat_id:, document:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendDocument, chat_id: chat_id, document: document, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2151
|
Telebot.Client.send_sticker
|
train
|
def send_sticker(chat_id:, sticker:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendSticker, chat_id: chat_id, sticker: sticker, reply_to_message_id: reply_to_message_id, reply_markup: reply_markup)
Message.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2152
|
Telebot.Client.send_location
|
train
|
def send_location(chat_id:, latitude:, longitude:, reply_to_message_id: nil, reply_markup: nil)
result = call(:sendLocation, chat_id: chat_id,
latitude: latitude,
longitude: longitude,
reply_to_message_id: reply_to_message_id,
reply_markup: reply_markup)
Message.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2153
|
Telebot.Client.get_user_profile_photos
|
train
|
def get_user_profile_photos(user_id:, offset: nil, limit: nil)
result = call(:getUserProfilePhotos, user_id: user_id, offset: offset, limit: limit)
UserProfilePhotos.new(result)
end
|
ruby
|
{
"resource": ""
}
|
q2154
|
LambdaWrap.API.add_lambda
|
train
|
def add_lambda(*new_lambda)
flattened_lambdas = new_lambda.flatten
flattened_lambdas.each { |lambda| parameter_guard(lambda, LambdaWrap::Lambda, 'LambdaWrap::Lambda') }
lambdas.concat(flattened_lambdas)
end
|
ruby
|
{
"resource": ""
}
|
q2155
|
LambdaWrap.API.deploy
|
train
|
def deploy(environment_options)
environment_parameter_guard(environment_options)
if no_op?
puts 'Nothing to deploy.'
return
end
deployment_start_message = 'Deploying '
deployment_start_message += "#{dynamo_tables.length} Dynamo Tables, " unless dynamo_tables.empty?
deployment_start_message += "#{lambdas.length} Lambdas, " unless lambdas.empty?
deployment_start_message += "#{api_gateways.length} API Gateways " unless api_gateways.empty?
deployment_start_message += "to Environment: #{environment_options.name}"
puts deployment_start_message
total_time_start = Time.now
services_time_start = total_time_start
dynamo_tables.each { |table| table.deploy(environment_options, @dynamo_client, @region) }
services_time_end = Time.now
unless dynamo_tables.empty?
puts "Deploying #{dynamo_tables.length} Table(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
lambdas.each { |lambda| lambda.deploy(environment_options, @lambda_client, @region) }
services_time_end = Time.now
unless lambdas.empty?
puts "Deploying #{lambdas.length} Lambda(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
api_gateways.each { |apig| apig.deploy(environment_options, @api_gateway_client, @region) }
services_time_end = Time.now
unless api_gateways.empty?
puts "Deploying #{api_gateways.length} API Gateway(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
total_time_end = Time.now
puts "Total API Deployment took: \
#{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}"
puts "Successfully deployed API to #{environment_options.name}"
true
end
|
ruby
|
{
"resource": ""
}
|
q2156
|
LambdaWrap.API.delete
|
train
|
def delete
if dynamo_tables.empty? && lambdas.empty? && api_gateways.empty?
puts 'Nothing to Deleting.'
return
end
deployment_start_message = 'Deleting '
deployment_start_message += "#{dynamo_tables.length} Dynamo Tables, " unless dynamo_tables.empty?
deployment_start_message += "#{lambdas.length} Lambdas, " unless lambdas.empty?
deployment_start_message += "#{api_gateways.length} API Gateways " unless api_gateways.empty?
puts deployment_start_message
total_time_start = Time.now
services_time_start = total_time_start
dynamo_tables.each { |table| table.delete(@dynamo_client, @region) }
services_time_end = Time.now
unless dynamo_tables.empty?
puts "Deleting #{dynamo_tables.length} Table(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
lambdas.each { |lambda| lambda.delete(@lambda_client, @region) }
services_time_end = Time.now
unless lambdas.empty?
puts "Deleting #{lambdas.length} Lambda(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
services_time_start = Time.now
api_gateways.each { |apig| apig.delete(@api_gateway_client, @region) }
services_time_end = Time.now
unless api_gateways.empty?
puts "Deleting #{api_gateways.length} API Gateway(s) took: \
#{Time.at(services_time_end - services_time_start).utc.strftime('%H:%M:%S')}"
end
total_time_end = Time.now
puts "Total API Deletion took: \
#{Time.at(total_time_end - total_time_start).utc.strftime('%H:%M:%S')}"
puts 'Successful Deletion of API'
true
end
|
ruby
|
{
"resource": ""
}
|
q2157
|
LambdaWrap.Lambda.deploy
|
train
|
def deploy(environment_options, client, region = 'AWS_REGION')
super
puts "Deploying Lambda: #{@lambda_name} to Environment: #{environment_options.name}"
unless File.exist?(@path_to_zip_file)
raise ArgumentError, "Deployment Package Zip File does not exist: #{@path_to_zip_file}!"
end
lambda_details = retrieve_lambda_details
if lambda_details.nil?
function_version = create_lambda
else
update_lambda_config
function_version = update_lambda_code
end
create_alias(function_version, environment_options.name, environment_options.description)
cleanup_unused_versions if @delete_unreferenced_versions
puts "Lambda: #{@lambda_name} successfully deployed!"
true
end
|
ruby
|
{
"resource": ""
}
|
q2158
|
LambdaWrap.Lambda.delete
|
train
|
def delete(client, region = 'AWS_REGION')
super
puts "Deleting all versions and aliases for Lambda: #{@lambda_name}"
lambda_details = retrieve_lambda_details
if lambda_details.nil?
puts 'No Lambda to delete.'
else
options = { function_name: @lambda_name }
@client.delete_function(options)
puts "Lambda #{@lambda_name} and all Versions & Aliases have been deleted."
end
true
end
|
ruby
|
{
"resource": ""
}
|
q2159
|
LambdaWrap.ApiGateway.teardown
|
train
|
def teardown(environment_options, client, region = 'AWS_REGION')
super
api_id = get_id_for_api(@api_name)
if api_id
delete_stage(api_id, environment_options.name)
else
puts "API Gateway Object #{@api_name} not found. No environment to tear down."
end
true
end
|
ruby
|
{
"resource": ""
}
|
q2160
|
LambdaWrap.ApiGateway.delete
|
train
|
def delete(client, region = 'AWS_REGION')
super
api_id = get_id_for_api(@api_name)
if api_id
options = {
rest_api_id: api_id
}
@client.delete_rest_api(options)
puts "Deleted API: #{@api_name} ID:#{api_id}"
else
puts "API Gateway Object #{@api_name} not found. Nothing to delete."
end
true
end
|
ruby
|
{
"resource": ""
}
|
q2161
|
LambdaWrap.DynamoTable.deploy
|
train
|
def deploy(environment_options, client, region = 'AWS_REGION')
super
puts "Deploying Table: #{@table_name} to Environment: #{environment_options.name}"
full_table_name = @table_name + (@append_environment_on_deploy ? "-#{environment_options.name}" : '')
table_details = retrieve_table_details(full_table_name)
if table_details.nil?
create_table(full_table_name)
else
wait_until_table_is_available(full_table_name) if table_details[:table_status] != 'ACTIVE'
update_table(full_table_name, table_details)
end
puts "Dynamo Table #{full_table_name} is now available."
full_table_name
end
|
ruby
|
{
"resource": ""
}
|
q2162
|
LambdaWrap.DynamoTable.wait_until_table_is_available
|
train
|
def wait_until_table_is_available(full_table_name, delay = 5, max_attempts = 5)
puts "Waiting for Table #{full_table_name} to be available."
puts "Waiting with a #{delay} second delay between attempts, for a maximum of #{max_attempts} attempts."
max_time = Time.at(delay * max_attempts).utc.strftime('%H:%M:%S')
puts "Max waiting time will be: #{max_time} (approximate)."
# wait until the table has updated to being fully available
# waiting for ~2min at most; an error will be thrown afterwards
started_waiting_at = Time.now
max_attempts.times do |attempt|
puts "Attempt #{attempt + 1}/#{max_attempts}, \
#{Time.at(Time.now - started_waiting_at).utc.strftime('%H:%M:%S')}/#{max_time}"
details = retrieve_table_details(full_table_name)
if details.table_status != 'ACTIVE'
puts "Table: #{full_table_name} is not yet available. Status: #{details.table_status}. Retrying..."
else
updating_indexes = details.global_secondary_indexes.reject do |global_index|
global_index.index_status == 'ACTIVE'
end
return true if updating_indexes.empty?
puts 'Table is available, but the global indexes are not:'
puts(updating_indexes.map { |global_index| "#{global_index.index_name}, #{global_index.index_status}" })
end
Kernel.sleep(delay.seconds)
end
raise Exception, "Table #{full_table_name} did not become available after #{max_attempts} attempts. " \
'Try again later or inspect the AWS console.'
end
|
ruby
|
{
"resource": ""
}
|
q2163
|
LambdaWrap.DynamoTable.build_global_index_updates_array
|
train
|
def build_global_index_updates_array(current_global_indexes)
indexes_to_update = []
return indexes_to_update if current_global_indexes.empty?
current_global_indexes.each do |current_index|
@global_secondary_indexes.each do |target_index|
# Find the same named index
next unless target_index[:index_name] == current_index[:index_name]
# Skip unless a different ProvisionedThroughput is specified
break unless (target_index[:provisioned_throughput][:read_capacity_units] !=
current_index.provisioned_throughput.read_capacity_units) ||
(target_index[:provisioned_throughput][:write_capacity_units] !=
current_index.provisioned_throughput.write_capacity_units)
indexes_to_update << { index_name: target_index[:index_name],
provisioned_throughput: target_index[:provisioned_throughput] }
end
end
puts indexes_to_update
indexes_to_update
end
|
ruby
|
{
"resource": ""
}
|
q2164
|
Telebot.UserProfilePhotos.photos=
|
train
|
def photos=(values)
@photos = values.map do |photo|
photo.map do |photo_size_attrs|
PhotoSize.new(photo_size_attrs)
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2165
|
BorderPatrol.Polygon.contains_point?
|
train
|
def contains_point?(point)
return false unless inside_bounding_box?(point)
c = false
i = -1
j = size - 1
while (i += 1) < size
if (self[i].y <= point.y && point.y < self[j].y) ||
(self[j].y <= point.y && point.y < self[i].y)
if point.x < (self[j].x - self[i].x) * (point.y - self[i].y) / (self[j].y - self[i].y) + self[i].x
c = !c
end
end
j = i
end
c
end
|
ruby
|
{
"resource": ""
}
|
q2166
|
OpenAssets.Api.list_unspent
|
train
|
def list_unspent(oa_address_list = [])
btc_address_list = oa_address_list.map { |oa_address| oa_address_to_address(oa_address)}
outputs = get_unspent_outputs(btc_address_list)
result = outputs.map{|out| out.to_hash}
result
end
|
ruby
|
{
"resource": ""
}
|
q2167
|
OpenAssets.Api.get_balance
|
train
|
def get_balance(address = nil)
outputs = get_unspent_outputs(address.nil? ? [] : [oa_address_to_address(address)])
colored_outputs = outputs.map{|o|o.output}
sorted_outputs = colored_outputs.sort_by { |o|o.script.to_string}
groups = sorted_outputs.group_by{|o| o.script.to_string}
result = groups.map{|k, v|
btc_address = script_to_address(v[0].script)
sorted_script_outputs = v.sort_by{|o|o.asset_id unless o.asset_id}
group_assets = sorted_script_outputs.group_by{|o|o.asset_id}.select{|k,v| !k.nil?}
assets = group_assets.map{|asset_id, outputs|
{
'asset_id' => asset_id,
'quantity' => outputs.inject(0) { |sum, o| sum + o.asset_quantity }.to_s,
'amount' => outputs.inject(0) { |sum, o| sum + o.asset_amount }.to_s,
'asset_definition_url' => outputs[0].asset_definition_url,
'proof_of_authenticity' => outputs[0].proof_of_authenticity
}
}
{
'address' => btc_address,
'oa_address' => (btc_address.nil? || btc_address.is_a?(Array)) ? nil : address_to_oa_address(btc_address),
'value' => satoshi_to_coin(v.inject(0) { |sum, o|sum + o.value}),
'assets' => assets,
'account' => v[0].account
}
}
address.nil? ? result : result.select{|r|r['oa_address'] == address}
end
|
ruby
|
{
"resource": ""
}
|
q2168
|
OpenAssets.Api.issue_asset
|
train
|
def issue_asset(from, amount, metadata = nil, to = nil, fees = nil, mode = 'broadcast', output_qty = 1)
to = from if to.nil?
colored_outputs = get_unspent_outputs([oa_address_to_address(from)])
issue_param = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.issue_asset(issue_param, metadata, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end
|
ruby
|
{
"resource": ""
}
|
q2169
|
OpenAssets.Api.send_asset
|
train
|
def send_asset(from, asset_id, amount, to, fees = nil, mode = 'broadcast', output_qty = 1)
colored_outputs = get_unspent_outputs([oa_address_to_address(from)])
asset_transfer_spec = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.transfer_asset(asset_id, asset_transfer_spec, from, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end
|
ruby
|
{
"resource": ""
}
|
q2170
|
OpenAssets.Api.send_assets
|
train
|
def send_assets(from, send_asset_params, fees = nil, mode = 'broadcast')
transfer_specs = send_asset_params.map{ |param|
colored_outputs = get_unspent_outputs([oa_address_to_address(param.from || from)])
[param.asset_id, OpenAssets::Transaction::TransferParameters.new(colored_outputs, param.to, param.from || from, param.amount)]
}
btc_transfer_spec = OpenAssets::Transaction::TransferParameters.new(
get_unspent_outputs([oa_address_to_address(from)]), nil, oa_address_to_address(from), 0)
tx = create_tx_builder.transfer_assets(transfer_specs, btc_transfer_spec, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end
|
ruby
|
{
"resource": ""
}
|
q2171
|
OpenAssets.Api.send_bitcoin
|
train
|
def send_bitcoin(from, amount, to, fees = nil, mode = 'broadcast', output_qty = 1)
validate_address([from, to])
colored_outputs = get_unspent_outputs([from])
btc_transfer_spec = OpenAssets::Transaction::TransferParameters.new(colored_outputs, to, from, amount, output_qty)
tx = create_tx_builder.transfer_btc(btc_transfer_spec, fees.nil? ? @config[:default_fees]: fees)
process_transaction(tx, mode)
end
|
ruby
|
{
"resource": ""
}
|
q2172
|
OpenAssets.Api.send_bitcoins
|
train
|
def send_bitcoins(from, send_params, fees = nil, mode = 'broadcast')
colored_outputs = get_unspent_outputs([from])
btc_transfer_specs = send_params.map{|param|
OpenAssets::Transaction::TransferParameters.new(colored_outputs, param.to, from, param.amount)
}
tx = create_tx_builder.transfer_btcs(btc_transfer_specs, fees.nil? ? @config[:default_fees]: fees)
tx = process_transaction(tx, mode)
tx
end
|
ruby
|
{
"resource": ""
}
|
q2173
|
OpenAssets.Api.burn_asset
|
train
|
def burn_asset(oa_address, asset_id, fees = nil, mode = 'broadcast')
unspents = get_unspent_outputs([oa_address_to_address(oa_address)])
tx = create_tx_builder.burn_asset(unspents, asset_id, fees.nil? ? @config[:default_fees]: fees)
process_transaction(tx, mode)
end
|
ruby
|
{
"resource": ""
}
|
q2174
|
OpenAssets.Api.get_unspent_outputs
|
train
|
def get_unspent_outputs(addresses)
validate_address(addresses)
unspent = provider.list_unspent(addresses, @config[:min_confirmation], @config[:max_confirmation])
result = unspent.map{|item|
output_result = get_output(item['txid'], item['vout'])
output_result.account = item['account']
output = OpenAssets::Transaction::SpendableOutput.new(
OpenAssets::Transaction::OutPoint.new(item['txid'], item['vout']), output_result)
output.confirmations = item['confirmations']
output.spendable = item['spendable']
output.solvable = item['solvable']
output
}
result
end
|
ruby
|
{
"resource": ""
}
|
q2175
|
OpenAssets.Api.get_outputs_from_txid
|
train
|
def get_outputs_from_txid(txid, use_cache = false)
tx = get_tx(txid, use_cache)
outputs = get_color_outputs_from_tx(tx)
outputs.map.with_index{|out, i|out.to_hash.merge({'txid' => tx.hash, 'vout' => i})}
end
|
ruby
|
{
"resource": ""
}
|
q2176
|
OpenAssets.Api.parse_issuance_p2sh_pointer
|
train
|
def parse_issuance_p2sh_pointer(script_sig)
script = Bitcoin::Script.new(script_sig).chunks.last
redeem_script = Bitcoin::Script.new(script)
return nil unless redeem_script.chunks[1] == Bitcoin::Script::OP_DROP
asset_def = to_bytes(redeem_script.chunks[0].to_s.bth)[0..-1].map{|x|x.to_i(16).chr}.join
asset_def && asset_def.start_with?('u=') ? asset_def : nil
end
|
ruby
|
{
"resource": ""
}
|
q2177
|
Halite.SpecHelper.chef_runner_options
|
train
|
def chef_runner_options
super.tap do |options|
options[:halite_gemspec] = halite_gemspec
# And some legacy data.
options[:default_attributes].update(default_attributes)
options[:normal_attributes].update(normal_attributes)
options[:override_attributes].update(override_attributes)
options.update(chefspec_options)
end
end
|
ruby
|
{
"resource": ""
}
|
q2178
|
Halite.RakeHelper.install
|
train
|
def install
extend Rake::DSL
# Core Halite tasks
unless options[:no_gem]
desc "Convert #{gemspec.name}-#{gemspec.version} to a cookbook in the pkg directory"
task 'chef:build' do
build_cookbook
end
desc "Push #{gemspec.name}-#{gemspec.version} to Supermarket"
task 'chef:release' => ['chef:build'] do
release_cookbook(pkg_path)
end
# Patch the core gem tasks to run ours too
task 'build' => ['chef:build']
task 'release' => ['chef:release']
else
desc "Push #{gem_name} to Supermarket"
task 'chef:release' do
release_cookbook(base)
end
end
# Foodcritic doesn't have a config file, so just always try to add it.
unless options[:no_foodcritic]
install_foodcritic
end
# If a .kitchen.yml exists, install the Test Kitchen tasks.
unless options[:no_kitchen] || !File.exist?(File.join(@base, '.kitchen.yml'))
install_kitchen
end
end
|
ruby
|
{
"resource": ""
}
|
q2179
|
Halite.RakeHelper.remove_files_in_folder
|
train
|
def remove_files_in_folder(base_path)
existing_files = Dir.glob(File.join(base_path, '**', '*'), File::FNM_DOTMATCH).map {|path| File.expand_path(path)}.uniq.reverse # expand_path just to normalize foo/. -> foo
existing_files.delete(base_path) # Don't remove the base
# Fuck FileUtils, it is a confusing pile of fail for remove*/rm*
existing_files.each do |path|
if File.file?(path)
File.unlink(path)
elsif File.directory?(path)
Dir.unlink(path)
else
# Because paranoia
raise Error.new("Unknown type of file at '#{path}', possible symlink deletion attack")
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2180
|
QuartzTorrent.UdpTrackerDriver.readWithTimeout
|
train
|
def readWithTimeout(socket, length, timeout)
rc = IO.select([socket], nil, nil, timeout)
if ! rc
raise "Waiting for response from UDP tracker #{@host}:#{@trackerPort} timed out after #{@timeout} seconds"
elsif rc[0].size > 0
socket.recvfrom(length)[0]
else
raise "Error receiving response from UDP tracker #{@host}:#{@trackerPort}"
end
end
|
ruby
|
{
"resource": ""
}
|
q2181
|
Halite.HelperBase.find_gem_name
|
train
|
def find_gem_name(base)
spec = Dir[File.join(base, '*.gemspec')].first
File.basename(spec, '.gemspec') if spec
end
|
ruby
|
{
"resource": ""
}
|
q2182
|
Halite.HelperBase.gemspec
|
train
|
def gemspec
@gemspec ||= begin
raise Error.new("Unable to automatically determine gem name from specs in #{base}. Please set the gem name via #{self.class.name}.install_tasks(gem_name: 'name')") unless gem_name
g = Bundler.load_gemspec(File.join(base, gem_name+'.gemspec'))
# This is returning the path it would be in if installed normally,
# override so we get the local path. Also for reasons that are entirely
# beyond me, #tap makes Gem::Specification flip out so do it old-school.
g.full_gem_path = base
g
end
end
|
ruby
|
{
"resource": ""
}
|
q2183
|
QuartzTorrent.MetainfoPieceState.metainfoCompletedLength
|
train
|
def metainfoCompletedLength
num = @completePieces.countSet
# Last block may be smaller
extra = 0
if @completePieces.set?(@completePieces.length-1)
num -= 1
extra = @lastPieceLength
end
num*BlockSize + extra
end
|
ruby
|
{
"resource": ""
}
|
q2184
|
QuartzTorrent.MetainfoPieceState.savePiece
|
train
|
def savePiece(pieceIndex, data)
id = @pieceManager.writeBlock pieceIndex, 0, data
@pieceManagerRequests[id] = PieceManagerRequestMetadata.new(:write, pieceIndex)
id
end
|
ruby
|
{
"resource": ""
}
|
q2185
|
QuartzTorrent.MetainfoPieceState.readPiece
|
train
|
def readPiece(pieceIndex)
length = BlockSize
length = @lastPieceLength if pieceIndex == @numPieces - 1
id = @pieceManager.readBlock pieceIndex, 0, length
#result = manager.nextResult
@pieceManagerRequests[id] = PieceManagerRequestMetadata.new(:read, pieceIndex)
id
end
|
ruby
|
{
"resource": ""
}
|
q2186
|
QuartzTorrent.MetainfoPieceState.checkResults
|
train
|
def checkResults
results = []
while true
result = @pieceManager.nextResult
break if ! result
results.push result
metaData = @pieceManagerRequests.delete(result.requestId)
if ! metaData
@logger.error "Can't find metadata for PieceManager request #{result.requestId}"
next
end
if metaData.type == :write
if result.successful?
@completePieces.set(metaData.data)
else
@requestedPieces.clear(metaData.data)
@pieceRequestTime[metaData.data] = nil
@logger.error "Writing metainfo piece failed: #{result.error}"
end
elsif metaData.type == :read
if ! result.successful?
@logger.error "Reading metainfo piece failed: #{result.error}"
end
end
end
results
end
|
ruby
|
{
"resource": ""
}
|
q2187
|
QuartzTorrent.MetainfoPieceState.findRequestablePieces
|
train
|
def findRequestablePieces
piecesRequired = []
removeOldRequests
@numPieces.times do |pieceIndex|
piecesRequired.push pieceIndex if ! @completePieces.set?(pieceIndex) && ! @requestedPieces.set?(pieceIndex)
end
piecesRequired
end
|
ruby
|
{
"resource": ""
}
|
q2188
|
QuartzTorrent.MetainfoPieceState.findRequestablePeers
|
train
|
def findRequestablePeers(classifiedPeers)
result = []
classifiedPeers.establishedPeers.each do |peer|
result.push peer if ! @badPeers.findByAddr(peer.trackerPeer.ip, peer.trackerPeer.port)
end
result
end
|
ruby
|
{
"resource": ""
}
|
q2189
|
QuartzTorrent.MetainfoPieceState.setPieceRequested
|
train
|
def setPieceRequested(pieceIndex, bool)
if bool
@requestedPieces.set pieceIndex
@pieceRequestTime[pieceIndex] = Time.new
else
@requestedPieces.clear pieceIndex
@pieceRequestTime[pieceIndex] = nil
end
end
|
ruby
|
{
"resource": ""
}
|
q2190
|
QuartzTorrent.MetainfoPieceState.removeOldRequests
|
train
|
def removeOldRequests
now = Time.new
@requestedPieces.length.times do |i|
if @requestedPieces.set? i
if now - @pieceRequestTime[i] > @requestTimeout
@requestedPieces.clear i
@pieceRequestTime[i] = nil
end
end
end
end
|
ruby
|
{
"resource": ""
}
|
q2191
|
QuartzTorrent.Handler.scheduleTimer
|
train
|
def scheduleTimer(duration, metainfo = nil, recurring = true, immed = false)
@reactor.scheduleTimer(duration, metainfo, recurring, immed) if @reactor
end
|
ruby
|
{
"resource": ""
}
|
q2192
|
QuartzTorrent.Handler.connect
|
train
|
def connect(addr, port, metainfo, timeout = nil)
@reactor.connect(addr, port, metainfo, timeout) if @reactor
end
|
ruby
|
{
"resource": ""
}
|
q2193
|
QuartzTorrent.IoFacade.read
|
train
|
def read(length)
data = ''
while data.length < length
begin
toRead = length-data.length
rateLimited = false
if @ioInfo.readRateLimit
avail = @ioInfo.readRateLimit.avail.to_i
if avail < toRead
toRead = avail
rateLimited = true
end
@ioInfo.readRateLimit.withdraw toRead
end
@logger.debug "IoFacade: must read: #{length} have read: #{data.length}. Reading #{toRead} bytes now" if @logger
data << @io.read_nonblock(toRead) if toRead > 0
# If we tried to read more than we are allowed to by rate limiting, yield.
Fiber.yield if rateLimited
rescue Errno::EWOULDBLOCK
# Wait for more data.
@logger.debug "IoFacade: read would block" if @logger
Fiber.yield
rescue Errno::EAGAIN, Errno::EINTR
# Wait for more data.
@logger.debug "IoFacade: read was interrupted" if @logger
Fiber.yield
rescue
@logger.debug "IoFacade: read error: #{$!}" if @logger
# Read failure occurred
@ioInfo.lastReadError = $!
if @ioInfo.useErrorhandler
@ioInfo.state = :error
Fiber.yield
else
raise $!
end
end
end
data
end
|
ruby
|
{
"resource": ""
}
|
q2194
|
QuartzTorrent.Reactor.connect
|
train
|
def connect(addr, port, metainfo, timeout = nil)
ioInfo = startConnection(port, addr, metainfo)
@ioInfo[ioInfo.io] = ioInfo
if timeout && ioInfo.state == :connecting
ioInfo.connectTimeout = timeout
ioInfo.connectTimer = scheduleTimer(timeout, InternalTimerInfo.new(:connect_timeout, ioInfo), false)
end
end
|
ruby
|
{
"resource": ""
}
|
q2195
|
QuartzTorrent.Reactor.listen
|
train
|
def listen(addr, port, metainfo)
listener = Socket.new( AF_INET, SOCK_STREAM, 0 )
sockaddr = Socket.pack_sockaddr_in( port, "0.0.0.0" )
listener.setsockopt(Socket::SOL_SOCKET,Socket::SO_REUSEADDR, true)
listener.bind( sockaddr )
@logger.debug "listening on port #{port}" if @logger
listener.listen( @listenBacklog )
info = IOInfo.new(listener, metainfo)
info.readFiberIoFacade.logger = @logger if @logger
info.state = :listening
@ioInfo[info.io] = info
end
|
ruby
|
{
"resource": ""
}
|
q2196
|
QuartzTorrent.Reactor.open
|
train
|
def open(path, mode, metainfo, useErrorhandler = true)
file = File.open(path, mode)
info = IOInfo.new(file, metainfo, true)
info.useErrorhandler = useErrorhandler
info.readFiberIoFacade.logger = @logger if @logger
info.state = :connected
@ioInfo[info.io] = info
end
|
ruby
|
{
"resource": ""
}
|
q2197
|
QuartzTorrent.Reactor.start
|
train
|
def start
while true
begin
break if eventLoopBody == :halt
rescue
@logger.error "Unexpected exception in reactor event loop: #{$!}" if @logger
@logger.error $!.backtrace.join "\n" if @logger
end
end
@logger.info "Reactor shutting down" if @logger
# Event loop finished
@ioInfo.each do |k,v|
k.close
end
end
|
ruby
|
{
"resource": ""
}
|
q2198
|
QuartzTorrent.Reactor.findIoByMetainfo
|
train
|
def findIoByMetainfo(metainfo)
@ioInfo.each_value do |info|
if info.metainfo == metainfo
io = info.readFiberIoFacade
# Don't allow read calls from timer handlers. This is to prevent a complex situation.
# See the processTimer call in eventLoopBody for more info
io = WriteOnlyIoFacade.new(info) if @currentHandlerCallback == :timer
return io
end
end
nil
end
|
ruby
|
{
"resource": ""
}
|
q2199
|
QuartzTorrent.Reactor.handleAccept
|
train
|
def handleAccept(ioInfo)
socket, clientAddr = ioInfo.io.accept
info = IOInfo.new(socket, ioInfo.metainfo)
info.readFiberIoFacade.logger = @logger if @logger
info.state = :connected
@ioInfo[info.io] = info
if @logger
port, addr = Socket.unpack_sockaddr_in(clientAddr)
@logger.debug "Accepted connection from #{addr}:#{port}" if @logger
end
[info, addr, port]
end
|
ruby
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.